diff options
Diffstat (limited to 'weed/s3api')
| -rw-r--r-- | weed/s3api/auth_credentials.go | 53 | ||||
| -rw-r--r-- | weed/s3api/auth_credentials_subscribe.go | 2 | ||||
| -rw-r--r-- | weed/s3api/chunked_reader_v4.go | 3 | ||||
| -rw-r--r-- | weed/s3api/filer_util_tags.go | 4 | ||||
| -rw-r--r-- | weed/s3api/s3_constants/header.go (renamed from weed/s3api/http/header.go) | 2 | ||||
| -rw-r--r-- | weed/s3api/s3_constants/s3_actions.go | 2 | ||||
| -rw-r--r-- | weed/s3api/s3api_bucket_handlers.go | 23 | ||||
| -rw-r--r-- | weed/s3api/s3api_object_copy_handlers.go | 53 | ||||
| -rw-r--r-- | weed/s3api/s3api_object_copy_handlers_test.go | 148 | ||||
| -rw-r--r-- | weed/s3api/s3api_object_handlers.go | 36 | ||||
| -rw-r--r-- | weed/s3api/s3api_object_handlers_postpolicy.go | 2 | ||||
| -rw-r--r-- | weed/s3api/s3api_object_multipart_handlers.go | 17 | ||||
| -rw-r--r-- | weed/s3api/s3api_object_tagging_handlers.go | 8 | ||||
| -rw-r--r-- | weed/s3api/s3api_objects_list_handlers.go | 176 | ||||
| -rw-r--r-- | weed/s3api/s3err/audit_fluent.go | 8 | ||||
| -rw-r--r-- | weed/s3api/s3err/s3api_errors.go | 6 | ||||
| -rw-r--r-- | weed/s3api/stats.go | 6 |
17 files changed, 312 insertions, 237 deletions
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go index 53a55617f..fb23d9ce9 100644 --- a/weed/s3api/auth_credentials.go +++ b/weed/s3api/auth_credentials.go @@ -12,7 +12,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" - xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" ) @@ -26,8 +25,9 @@ type Iam interface { type IdentityAccessManagement struct { m sync.RWMutex - identities []*Identity - domain string + identities []*Identity + isAuthEnabled bool + domain string } type Identity struct { @@ -137,14 +137,15 @@ func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3Api iam.m.Lock() // atomically switch iam.identities = identities + if !iam.isAuthEnabled { // one-directional, no toggling + iam.isAuthEnabled = len(identities) > 0 + } iam.m.Unlock() return nil } func (iam *IdentityAccessManagement) isEnabled() bool { - iam.m.RLock() - defer iam.m.RUnlock() - return len(iam.identities) > 0 + return iam.isAuthEnabled } func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) { @@ -175,20 +176,20 @@ func (iam *IdentityAccessManagement) lookupAnonymous() (identity *Identity, foun } func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc { - - if !iam.isEnabled() { - return f - } - return func(w http.ResponseWriter, r *http.Request) { + if !iam.isEnabled() { + f(w, r) + return + } + identity, errCode := iam.authRequest(r, action) if errCode == s3err.ErrNone { if identity != nil && identity.Name != "" { - r.Header.Set(xhttp.AmzIdentityId, identity.Name) + r.Header.Set(s3_constants.AmzIdentityId, identity.Name) if identity.isAdmin() { - r.Header.Set(xhttp.AmzIsAdmin, "true") - } else if _, ok := r.Header[xhttp.AmzIsAdmin]; ok { - r.Header.Del(xhttp.AmzIsAdmin) + r.Header.Set(s3_constants.AmzIsAdmin, "true") + } else if _, ok := r.Header[s3_constants.AmzIsAdmin]; ok { + r.Header.Del(s3_constants.AmzIsAdmin) } } f(w, r) @@ -209,7 +210,7 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) return identity, s3err.ErrNone case authTypeUnknown: glog.V(3).Infof("unknown auth type") - r.Header.Set(xhttp.AmzAuthType, "Unknown") + r.Header.Set(s3_constants.AmzAuthType, "Unknown") return identity, s3err.ErrAccessDenied case authTypePresignedV2, authTypeSignedV2: glog.V(3).Infof("v2 auth type") @@ -221,17 +222,17 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) authType = "SigV4" case authTypePostPolicy: glog.V(3).Infof("post policy auth type") - r.Header.Set(xhttp.AmzAuthType, "PostPolicy") + r.Header.Set(s3_constants.AmzAuthType, "PostPolicy") return identity, s3err.ErrNone case authTypeJWT: glog.V(3).Infof("jwt auth type") - r.Header.Set(xhttp.AmzAuthType, "Jwt") + r.Header.Set(s3_constants.AmzAuthType, "Jwt") return identity, s3err.ErrNotImplemented case authTypeAnonymous: authType = "Anonymous" identity, found = iam.lookupAnonymous() if !found { - r.Header.Set(xhttp.AmzAuthType, authType) + r.Header.Set(s3_constants.AmzAuthType, authType) return identity, s3err.ErrAccessDenied } default: @@ -239,7 +240,7 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) } if len(authType) > 0 { - r.Header.Set(xhttp.AmzAuthType, authType) + r.Header.Set(s3_constants.AmzAuthType, authType) } if s3Err != s3err.ErrNone { return identity, s3Err @@ -247,7 +248,7 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) glog.V(3).Infof("user name: %v actions: %v, action: %v", identity.Name, identity.Actions, action) - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) if !identity.canDo(action, bucket, object) { return identity, s3err.ErrAccessDenied @@ -267,7 +268,7 @@ func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err return identity, s3err.ErrNone case authTypeUnknown: glog.V(3).Infof("unknown auth type") - r.Header.Set(xhttp.AmzAuthType, "Unknown") + r.Header.Set(s3_constants.AmzAuthType, "Unknown") return identity, s3err.ErrAccessDenied case authTypePresignedV2, authTypeSignedV2: glog.V(3).Infof("v2 auth type") @@ -279,17 +280,17 @@ func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err authType = "SigV4" case authTypePostPolicy: glog.V(3).Infof("post policy auth type") - r.Header.Set(xhttp.AmzAuthType, "PostPolicy") + r.Header.Set(s3_constants.AmzAuthType, "PostPolicy") return identity, s3err.ErrNone case authTypeJWT: glog.V(3).Infof("jwt auth type") - r.Header.Set(xhttp.AmzAuthType, "Jwt") + r.Header.Set(s3_constants.AmzAuthType, "Jwt") return identity, s3err.ErrNotImplemented case authTypeAnonymous: authType = "Anonymous" identity, found = iam.lookupAnonymous() if !found { - r.Header.Set(xhttp.AmzAuthType, authType) + r.Header.Set(s3_constants.AmzAuthType, authType) return identity, s3err.ErrAccessDenied } default: @@ -297,7 +298,7 @@ func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err } if len(authType) > 0 { - r.Header.Set(xhttp.AmzAuthType, authType) + r.Header.Set(s3_constants.AmzAuthType, authType) } glog.V(3).Infof("auth error: %v", s3Err) diff --git a/weed/s3api/auth_credentials_subscribe.go b/weed/s3api/auth_credentials_subscribe.go index 2cea739c6..91fd5d830 100644 --- a/weed/s3api/auth_credentials_subscribe.go +++ b/weed/s3api/auth_credentials_subscribe.go @@ -33,7 +33,7 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la } util.RetryForever("followIamChanges", func() error { - return pb.WithFilerClientFollowMetadata(s3a, clientName, s3a.randomClientId, prefix, &lastTsNs, 0, processEventFn, true) + return pb.WithFilerClientFollowMetadata(s3a, clientName, s3a.randomClientId, prefix, &lastTsNs, 0, 0, processEventFn, pb.FatalOnError) }, func(err error) bool { glog.V(0).Infof("iam follow metadata changes: %v", err) return true diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go index e683faf22..2678f312f 100644 --- a/weed/s3api/chunked_reader_v4.go +++ b/weed/s3api/chunked_reader_v4.go @@ -24,7 +24,6 @@ import ( "crypto/sha256" "encoding/hex" "errors" - xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "hash" @@ -92,7 +91,7 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr return nil, "", "", time.Time{}, s3err.ErrInvalidAccessKeyID } - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) if !identity.canDo(s3_constants.ACTION_WRITE, bucket, object) { errCode = s3err.ErrAccessDenied return diff --git a/weed/s3api/filer_util_tags.go b/weed/s3api/filer_util_tags.go index e45230165..18d4d69c5 100644 --- a/weed/s3api/filer_util_tags.go +++ b/weed/s3api/filer_util_tags.go @@ -1,14 +1,14 @@ package s3api import ( + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "strings" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" ) const ( - S3TAG_PREFIX = xhttp.AmzObjectTagging + "-" + S3TAG_PREFIX = s3_constants.AmzObjectTagging + "-" ) func (s3a *S3ApiServer) getTags(parentDirectoryPath string, entryName string) (tags map[string]string, err error) { diff --git a/weed/s3api/http/header.go b/weed/s3api/s3_constants/header.go index 30fc8eefa..cd725d435 100644 --- a/weed/s3api/http/header.go +++ b/weed/s3api/s3_constants/header.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package http +package s3_constants import ( "github.com/gorilla/mux" diff --git a/weed/s3api/s3_constants/s3_actions.go b/weed/s3api/s3_constants/s3_actions.go index 4e484ac98..0fbf134e3 100644 --- a/weed/s3api/s3_constants/s3_actions.go +++ b/weed/s3api/s3_constants/s3_actions.go @@ -6,4 +6,6 @@ const ( ACTION_ADMIN = "Admin" ACTION_TAGGING = "Tagging" ACTION_LIST = "List" + + SeaweedStorageDestinationHeader = "x-seaweedfs-destination" ) diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 6f72e045f..f70e46b92 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -13,7 +13,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/chrislusf/seaweedfs/weed/storage/needle" - xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/aws/aws-sdk-go/aws" @@ -52,7 +51,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques return } - identityId := r.Header.Get(xhttp.AmzIdentityId) + identityId := r.Header.Get(s3_constants.AmzIdentityId) var buckets []*s3.Bucket for _, entry := range entries { @@ -80,7 +79,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := xhttp.GetBucketAndObject(r) + bucket, _ := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("PutBucketHandler %s", bucket) // avoid duplicated buckets @@ -121,11 +120,11 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) } fn := func(entry *filer_pb.Entry) { - if identityId := r.Header.Get(xhttp.AmzIdentityId); identityId != "" { + if identityId := r.Header.Get(s3_constants.AmzIdentityId); identityId != "" { if entry.Extended == nil { entry.Extended = make(map[string][]byte) } - entry.Extended[xhttp.AmzIdentityId] = []byte(identityId) + entry.Extended[s3_constants.AmzIdentityId] = []byte(identityId) } } @@ -141,7 +140,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := xhttp.GetBucketAndObject(r) + bucket, _ := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("DeleteBucketHandler %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { @@ -194,7 +193,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := xhttp.GetBucketAndObject(r) + bucket, _ := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("HeadBucketHandler %s", bucket) if entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket); entry == nil || err == filer_pb.ErrNotFound { @@ -218,7 +217,7 @@ func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorC } func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool { - isAdmin := r.Header.Get(xhttp.AmzIsAdmin) != "" + isAdmin := r.Header.Get(s3_constants.AmzIsAdmin) != "" if isAdmin { return true } @@ -226,8 +225,8 @@ func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool { return true } - identityId := r.Header.Get(xhttp.AmzIdentityId) - if id, ok := entry.Extended[xhttp.AmzIdentityId]; ok { + identityId := r.Header.Get(s3_constants.AmzIdentityId) + if id, ok := entry.Extended[s3_constants.AmzIdentityId]; ok { if identityId != string(id) { return false } @@ -239,7 +238,7 @@ func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAcl.html func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { // collect parameters - bucket, _ := xhttp.GetBucketAndObject(r) + bucket, _ := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("GetBucketAclHandler %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { @@ -279,7 +278,7 @@ func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Reque // https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) { // collect parameters - bucket, _ := xhttp.GetBucketAndObject(r) + bucket, _ := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("GetBucketLifecycleConfigurationHandler %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go index c44ca7ddf..9157748f6 100644 --- a/weed/s3api/s3api_object_copy_handlers.go +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -3,8 +3,7 @@ package s3api import ( "fmt" "github.com/chrislusf/seaweedfs/weed/glog" - headers "github.com/chrislusf/seaweedfs/weed/s3api/http" - xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "modernc.org/strutil" "net/http" @@ -23,7 +22,7 @@ const ( func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { - dstBucket, dstObject := xhttp.GetBucketAndObject(r) + dstBucket, dstObject := s3_constants.GetBucketAndObject(r) // Copy source path. cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) @@ -94,7 +93,8 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request return } glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) - etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body) + destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject) + etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body, destination) if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) @@ -129,7 +129,7 @@ type CopyPartResult struct { func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) { // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - dstBucket, _ := xhttp.GetBucketAndObject(r) + dstBucket, dstObject := s3_constants.GetBucketAndObject(r) // Copy source path. cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) @@ -177,7 +177,8 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req defer dataReader.Close() glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) - etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject) + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader, destination) if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) @@ -196,24 +197,24 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req } func replaceDirective(reqHeader http.Header) (replaceMeta, replaceTagging bool) { - return reqHeader.Get(headers.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(headers.AmzObjectTaggingDirective) == DirectiveReplace + return reqHeader.Get(s3_constants.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(s3_constants.AmzObjectTaggingDirective) == DirectiveReplace } func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTagging bool, getTags func(parentDirectoryPath string, entryName string) (tags map[string]string, err error), dir, name string) (err error) { - if sc := reqHeader.Get(xhttp.AmzStorageClass); len(sc) == 0 { - if sc := existing[xhttp.AmzStorageClass]; len(sc) > 0 { - reqHeader[xhttp.AmzStorageClass] = sc + if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) == 0 { + if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 { + reqHeader[s3_constants.AmzStorageClass] = sc } } if !replaceMeta { for header, _ := range reqHeader { - if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) { + if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) { delete(reqHeader, header) } } for k, v := range existing { - if strings.HasPrefix(k, xhttp.AmzUserMetaPrefix) { + if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) { reqHeader[k] = v } } @@ -221,14 +222,14 @@ func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTaggin if !replaceTagging { for header, _ := range reqHeader { - if strings.HasPrefix(header, xhttp.AmzObjectTagging) { + if strings.HasPrefix(header, s3_constants.AmzObjectTagging) { delete(reqHeader, header) } } found := false for k, _ := range existing { - if strings.HasPrefix(k, xhttp.AmzObjectTaggingPrefix) { + if strings.HasPrefix(k, s3_constants.AmzObjectTaggingPrefix) { found = true break } @@ -245,7 +246,7 @@ func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTaggin tagArr = append(tagArr, fmt.Sprintf("%s=%s", k, v)) } tagStr := strutil.JoinFields(tagArr, "&") - reqHeader.Set(xhttp.AmzObjectTagging, tagStr) + reqHeader.Set(s3_constants.AmzObjectTagging, tagStr) } } return @@ -254,16 +255,16 @@ func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTaggin func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, replaceMeta, replaceTagging bool) (metadata map[string][]byte) { metadata = make(map[string][]byte) - if sc := existing[xhttp.AmzStorageClass]; len(sc) > 0 { - metadata[xhttp.AmzStorageClass] = sc + if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 { + metadata[s3_constants.AmzStorageClass] = sc } - if sc := reqHeader.Get(xhttp.AmzStorageClass); len(sc) > 0 { - metadata[xhttp.AmzStorageClass] = []byte(sc) + if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) > 0 { + metadata[s3_constants.AmzStorageClass] = []byte(sc) } if replaceMeta { for header, values := range reqHeader { - if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) { + if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) { for _, value := range values { metadata[header] = []byte(value) } @@ -271,30 +272,30 @@ func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, rep } } else { for k, v := range existing { - if strings.HasPrefix(k, xhttp.AmzUserMetaPrefix) { + if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) { metadata[k] = v } } } if replaceTagging { - if tags := reqHeader.Get(xhttp.AmzObjectTagging); tags != "" { + if tags := reqHeader.Get(s3_constants.AmzObjectTagging); tags != "" { for _, v := range strings.Split(tags, "&") { tag := strings.Split(v, "=") if len(tag) == 2 { - metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1]) + metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1]) } else if len(tag) == 1 { - metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = nil + metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = nil } } } } else { for k, v := range existing { - if strings.HasPrefix(k, xhttp.AmzObjectTagging) { + if strings.HasPrefix(k, s3_constants.AmzObjectTagging) { metadata[k] = v } } - delete(metadata, xhttp.AmzTagCount) + delete(metadata, s3_constants.AmzTagCount) } return diff --git a/weed/s3api/s3api_object_copy_handlers_test.go b/weed/s3api/s3api_object_copy_handlers_test.go index d2c8e488b..610b29a6b 100644 --- a/weed/s3api/s3api_object_copy_handlers_test.go +++ b/weed/s3api/s3api_object_copy_handlers_test.go @@ -2,7 +2,7 @@ package s3api import ( "fmt" - headers "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "net/http" "reflect" "sort" @@ -55,10 +55,10 @@ var processMetadataTestCases = []struct { { 202, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzUserMetaDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzUserMetaDirective: DirectiveReplace, }, H{ "X-Amz-Meta-My-Meta": "existing", @@ -71,20 +71,20 @@ var processMetadataTestCases = []struct { "type": "existing", }, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=existing", - headers.AmzUserMetaDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=existing", + s3_constants.AmzUserMetaDirective: DirectiveReplace, }, }, { 203, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, H{ "X-Amz-Meta-My-Meta": "existing", @@ -97,21 +97,21 @@ var processMetadataTestCases = []struct { "type": "existing", }, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "existing", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, }, { 204, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, H{ "X-Amz-Meta-My-Meta": "existing", @@ -125,40 +125,40 @@ var processMetadataTestCases = []struct { "type": "existing", }, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, }, { 205, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, H{}, H{}, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, }, { 206, H{ - "User-Agent": "firefox", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, H{ "X-Amz-Meta-My-Meta": "existing", @@ -172,19 +172,19 @@ var processMetadataTestCases = []struct { "type": "existing", }, H{ - "User-Agent": "firefox", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, }, { 207, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, H{ "X-Amz-Meta-My-Meta": "existing", @@ -198,10 +198,10 @@ var processMetadataTestCases = []struct { "type": "existing", }, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, }, } @@ -235,10 +235,10 @@ var processMetadataBytesTestCases = []struct { { 102, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzUserMetaDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzUserMetaDirective: DirectiveReplace, }, H{ "X-Amz-Meta-My-Meta": "existing", @@ -257,10 +257,10 @@ var processMetadataBytesTestCases = []struct { { 103, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, H{ "X-Amz-Meta-My-Meta": "existing", @@ -279,11 +279,11 @@ var processMetadataBytesTestCases = []struct { { 104, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, H{ "X-Amz-Meta-My-Meta": "existing", @@ -302,9 +302,9 @@ var processMetadataBytesTestCases = []struct { { 105, H{ - "User-Agent": "firefox", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, H{ "X-Amz-Meta-My-Meta": "existing", @@ -318,11 +318,11 @@ var processMetadataBytesTestCases = []struct { { 107, H{ - "User-Agent": "firefox", - "X-Amz-Meta-My-Meta": "request", - "X-Amz-Tagging": "A=B&a=b&type=request", - headers.AmzUserMetaDirective: DirectiveReplace, - headers.AmzObjectTaggingDirective: DirectiveReplace, + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + s3_constants.AmzUserMetaDirective: DirectiveReplace, + s3_constants.AmzObjectTaggingDirective: DirectiveReplace, }, H{}, H{ @@ -385,10 +385,10 @@ func TestProcessMetadataBytes(t *testing.T) { func fmtTagging(maps ...map[string]string) { for _, m := range maps { - if tagging := m[headers.AmzObjectTagging]; len(tagging) > 0 { + if tagging := m[s3_constants.AmzObjectTagging]; len(tagging) > 0 { split := strings.Split(tagging, "&") sort.Strings(split) - m[headers.AmzObjectTagging] = strings.Join(split, "&") + m[s3_constants.AmzObjectTagging] = strings.Join(split, "&") } } } diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 3d26d395e..4ad3454ba 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -6,6 +6,7 @@ import ( "encoding/json" "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util/mem" "golang.org/x/exp/slices" @@ -18,7 +19,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer" "github.com/pquerna/cachecontrol/cacheobject" - xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/chrislusf/seaweedfs/weed/glog" @@ -45,7 +45,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("PutObjectHandler %s %s", bucket, object) _, err := validateContentMd5(r.Header) @@ -92,18 +92,24 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) } defer dataReader.Close() + objectContentType := r.Header.Get("Content-Type") if strings.HasSuffix(object, "/") { - if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil { + if err := s3a.mkdir(s3a.option.BucketsPath, bucket+strings.TrimSuffix(object, "/"), func(entry *filer_pb.Entry) { + if objectContentType == "" { + objectContentType = "httpd/unix-directory" + } + entry.Attributes.Mime = objectContentType + }); err != nil { s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } } else { uploadUrl := s3a.toFilerUrl(bucket, object) - if r.Header.Get("Content-Type") == "" { + if objectContentType == "" { dataReader = mimeDetect(r, dataReader) } - etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) + etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, "") if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) @@ -132,7 +138,7 @@ func (s3a *S3ApiServer) toFilerUrl(bucket, object string) string { func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("GetObjectHandler %s %s", bucket, object) if strings.HasSuffix(r.URL.Path, "/") { @@ -147,7 +153,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("HeadObjectHandler %s %s", bucket, object) destUrl := s3a.toFilerUrl(bucket, object) @@ -157,7 +163,7 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("DeleteObjectHandler %s %s", bucket, object) destUrl := s3a.toFilerUrl(bucket, object) @@ -206,7 +212,7 @@ type DeleteObjectsResponse struct { // DeleteMultipleObjectsHandler - Delete multiple objects func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := xhttp.GetBucketAndObject(r) + bucket, _ := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket) deleteXMLBytes, err := io.ReadAll(r.Body) @@ -322,7 +328,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) for k, v := range r.URL.Query() { - if _, ok := xhttp.PassThroughHeaders[strings.ToLower(k)]; ok { + if _, ok := s3_constants.PassThroughHeaders[strings.ToLower(k)]; ok { proxyReq.Header[k] = v } } @@ -347,6 +353,11 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des return } + if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRange) + return + } + if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 { if r.Method != "DELETE" { s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) @@ -377,7 +388,7 @@ func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) (s return statusCode } -func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code s3err.ErrorCode) { +func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader, destination string) (etag string, code s3err.ErrorCode) { hash := md5.New() var body = io.TeeReader(dataReader, hash) @@ -390,6 +401,9 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader } proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) + if destination != "" { + proxyReq.Header.Set(s3_constants.SeaweedStorageDestinationHeader, destination) + } for header, values := range r.Header { for _, value := range values { diff --git a/weed/s3api/s3api_object_handlers_postpolicy.go b/weed/s3api/s3api_object_handlers_postpolicy.go index b0b71b1de..5704fcf38 100644 --- a/weed/s3api/s3api_object_handlers_postpolicy.go +++ b/weed/s3api/s3api_object_handlers_postpolicy.go @@ -115,7 +115,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object)) - etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody) + etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody, "") if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index d74867778..e650c9156 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -5,7 +5,7 @@ import ( "encoding/xml" "fmt" "github.com/chrislusf/seaweedfs/weed/glog" - xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" weed_server "github.com/chrislusf/seaweedfs/weed/server" "io" @@ -27,7 +27,7 @@ const ( // NewMultipartUploadHandler - New multipart upload. func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) createMultipartUploadInput := &s3.CreateMultipartUploadInput{ Bucket: aws.String(bucket), @@ -61,7 +61,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) parts := &CompleteMultipartUpload{} if err := xmlDecoder(r.Body, parts, r.ContentLength); err != nil { @@ -96,7 +96,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r // AbortMultipartUploadHandler - Aborts multipart upload. func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) @@ -125,7 +125,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht // ListMultipartUploadsHandler - Lists multipart uploads. func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := xhttp.GetBucketAndObject(r) + bucket, _ := s3_constants.GetBucketAndObject(r) prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query()) if maxUploads < 0 { @@ -164,7 +164,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht // ListObjectPartsHandler - Lists object parts in a multipart upload. func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query()) if partNumberMarker < 0 { @@ -203,7 +203,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re // PutObjectPartHandler - Put an object part in a multipart upload. func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) uploadID := r.URL.Query().Get("uploadId") err := s3a.checkUploadId(object, uploadID) @@ -250,8 +250,9 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ if partID == 1 && r.Header.Get("Content-Type") == "" { dataReader = mimeDetect(r, dataReader) } + destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) - etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) + etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, destination) if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) return diff --git a/weed/s3api/s3api_object_tagging_handlers.go b/weed/s3api/s3api_object_tagging_handlers.go index 5c66fb961..9fde0309c 100644 --- a/weed/s3api/s3api_object_tagging_handlers.go +++ b/weed/s3api/s3api_object_tagging_handlers.go @@ -3,7 +3,7 @@ package s3api import ( "encoding/xml" "fmt" - xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "io" "net/http" @@ -17,7 +17,7 @@ import ( // API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("GetObjectTaggingHandler %s %s", bucket, object) target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) @@ -43,7 +43,7 @@ func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.R // API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("PutObjectTaggingHandler %s %s", bucket, object) target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) @@ -99,7 +99,7 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R // API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := xhttp.GetBucketAndObject(r) + bucket, object := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("DeleteObjectTaggingHandler %s %s", bucket, object) target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index a3b858dcb..6b934bccd 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -5,6 +5,7 @@ import ( "encoding/xml" "fmt" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "io" "net/http" "net/url" @@ -15,7 +16,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" ) @@ -39,7 +39,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html // collect parameters - bucket, _ := xhttp.GetBucketAndObject(r) + bucket, _ := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("ListObjectsV2Handler %s", bucket) originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query()) @@ -95,7 +95,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html // collect parameters - bucket, _ := xhttp.GetBucketAndObject(r) + bucket, _ := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("ListObjectsV1Handler %s", bucket) originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query()) @@ -133,10 +133,10 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m reqDir = reqDir[1:] } bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket) + bucketPrefixLen := len(bucketPrefix) reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir) if strings.HasSuffix(reqDir, "/") { - // remove trailing "/" - reqDir = reqDir[:len(reqDir)-1] + reqDir = strings.TrimSuffix(reqDir, "/") } var contents []ListEntry @@ -148,31 +148,34 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m // check filer err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - _, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, func(dir string, entry *filer_pb.Entry) { + _, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, false, false, bucketPrefixLen, func(dir string, entry *filer_pb.Entry) { if entry.IsDirectory { if delimiter == "/" { commonPrefixes = append(commonPrefixes, PrefixEntry{ - Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):], + Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[bucketPrefixLen:], }) } - } else { - storageClass := "STANDARD" - if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok { - storageClass = string(v) + if !(entry.IsDirectoryKeyObject() && strings.HasSuffix(entry.Name, "/")) { + return } - contents = append(contents, ListEntry{ - Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):], - LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), - ETag: "\"" + filer.ETag(entry) + "\"", - Size: int64(filer.FileSize(entry)), - Owner: CanonicalUser{ - ID: fmt.Sprintf("%x", entry.Attributes.Uid), - DisplayName: entry.Attributes.UserName, - }, - StorageClass: StorageClass(storageClass), - }) } + storageClass := "STANDARD" + if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok { + storageClass = string(v) + } + contents = append(contents, ListEntry{ + Key: fmt.Sprintf("%s/%s", dir, entry.Name)[bucketPrefixLen:], + LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), + ETag: "\"" + filer.ETag(entry) + "\"", + Size: int64(filer.FileSize(entry)), + Owner: CanonicalUser{ + ID: fmt.Sprintf("%x", entry.Attributes.Uid), + DisplayName: entry.Attributes.UserName, + }, + StorageClass: StorageClass(storageClass), + }) }) + glog.V(4).Infof("end doListFilerEntries isTruncated:%v nextMarker:%v reqDir: %v prefix: %v", isTruncated, nextMarker, reqDir, prefix) if doErr != nil { return doErr } @@ -181,6 +184,39 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m nextMarker = "" } + if len(contents) == 0 && len(commonPrefixes) == 0 && maxKeys > 0 { + if strings.HasSuffix(originalPrefix, "/") && prefix == "" { + reqDir, prefix = filepath.Split(strings.TrimSuffix(reqDir, "/")) + reqDir = strings.TrimSuffix(reqDir, "/") + } + _, _, _, doErr = s3a.doListFilerEntries(client, reqDir, prefix, 1, prefix, delimiter, true, false, bucketPrefixLen, func(dir string, entry *filer_pb.Entry) { + if entry.IsDirectoryKeyObject() && entry.Name == prefix { + storageClass := "STANDARD" + if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok { + storageClass = string(v) + } + contents = append(contents, ListEntry{ + Key: fmt.Sprintf("%s/%s/", dir, entry.Name)[bucketPrefixLen:], + LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), + ETag: "\"" + fmt.Sprintf("%x", entry.Attributes.Md5) + "\"", + Size: int64(filer.FileSize(entry)), + Owner: CanonicalUser{ + ID: fmt.Sprintf("%x", entry.Attributes.Uid), + DisplayName: entry.Attributes.UserName, + }, + StorageClass: StorageClass(storageClass), + }) + } + }) + if doErr != nil { + return doErr + } + } + + if len(nextMarker) > 0 { + nextMarker = nextMarker[bucketPrefixLen:] + } + response = ListBucketResult{ Name: bucket, Prefix: originalPrefix, @@ -199,7 +235,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m return } -func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) { +func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, inclusiveStartFrom bool, subEntries bool, bucketPrefixLen int, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) { // invariants // prefix and marker should be under dir, marker may contain "/" // maxKeys should be updated for each recursion @@ -212,20 +248,27 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d } if strings.Contains(marker, "/") { + if strings.HasSuffix(marker, "/") { + marker = strings.TrimSuffix(marker, "/") + } sepIndex := strings.Index(marker, "/") - subDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:] - // println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys) - subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, eachEntryFn) - if subErr != nil { - err = subErr - return + if sepIndex != -1 { + subPrefix, subMarker := marker[0:sepIndex], marker[sepIndex+1:] + subDir := fmt.Sprintf("%s/%s", dir[0:bucketPrefixLen-1], subPrefix) + if strings.HasPrefix(subDir, dir) { + subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, subDir, "", maxKeys, subMarker, delimiter, false, false, bucketPrefixLen, eachEntryFn) + if subErr != nil { + err = subErr + return + } + counter += subCounter + isTruncated = isTruncated || subIsTruncated + maxKeys -= subCounter + nextMarker = subNextMarker + // finished processing this sub directory + marker = subPrefix + } } - counter += subCounter - isTruncated = isTruncated || subIsTruncated - maxKeys -= subCounter - nextMarker = subDir + "/" + subNextMarker - // finished processing this sub directory - marker = subDir } if maxKeys <= 0 { return @@ -237,7 +280,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d Prefix: prefix, Limit: uint32(maxKeys + 1), StartFromFileName: marker, - InclusiveStartFrom: false, + InclusiveStartFrom: inclusiveStartFrom, } ctx, cancel := context.WithCancel(context.Background()) @@ -263,39 +306,46 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d return } entry := resp.Entry - nextMarker = entry.Name + nextMarker = dir + "/" + entry.Name if entry.IsDirectory { // println("ListEntries", dir, "dir:", entry.Name) - if entry.Name != ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys - if delimiter != "/" { + if entry.Name == ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys + continue + } + if delimiter == "" { + eachEntryFn(dir, entry) + // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter) + subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, false, true, bucketPrefixLen, eachEntryFn) + if subErr != nil { + err = fmt.Errorf("doListFilerEntries2: %v", subErr) + return + } + // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated) + if subCounter == 0 && entry.IsDirectoryKeyObject() { + entry.Name += "/" eachEntryFn(dir, entry) - // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter) - subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, eachEntryFn) - if subErr != nil { - err = fmt.Errorf("doListFilerEntries2: %v", subErr) - return - } - // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated) - counter += subCounter - nextMarker = entry.Name + "/" + subNextMarker - if subIsTruncated { - isTruncated = true - return - } - } else { - var isEmpty bool - if !s3a.option.AllowEmptyFolder { - if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil { - glog.Errorf("check empty folder %s: %v", dir, err) - } - } - if !isEmpty { - eachEntryFn(dir, entry) - counter++ + counter++ + } + counter += subCounter + nextMarker = subNextMarker + if subIsTruncated { + isTruncated = true + return + } + } else if delimiter == "/" { + var isEmpty bool + if !s3a.option.AllowEmptyFolder && !entry.IsDirectoryKeyObject() { + if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil { + glog.Errorf("check empty folder %s: %v", dir, err) } } + if !isEmpty { + nextMarker += "/" + eachEntryFn(dir, entry) + counter++ + } } - } else { + } else if !(delimiter == "/" && subEntries) { // println("ListEntries", dir, "file:", entry.Name) eachEntryFn(dir, entry) counter++ diff --git a/weed/s3api/s3err/audit_fluent.go b/weed/s3api/s3err/audit_fluent.go index fcc5f9a0f..2deb56896 100644 --- a/weed/s3api/s3err/audit_fluent.go +++ b/weed/s3api/s3err/audit_fluent.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" "github.com/chrislusf/seaweedfs/weed/glog" - xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/fluent/fluent-logger-golang/fluent" "net/http" "os" @@ -134,7 +134,7 @@ func GetAccessHttpLog(r *http.Request, statusCode int, s3errCode ErrorCode) Acce } func GetAccessLog(r *http.Request, HTTPStatusCode int, s3errCode ErrorCode) *AccessLog { - bucket, key := xhttp.GetBucketAndObject(r) + bucket, key := s3_constants.GetBucketAndObject(r) var errorCode string if s3errCode != ErrNone { errorCode = GetAPIError(s3errCode).Code @@ -151,8 +151,8 @@ func GetAccessLog(r *http.Request, HTTPStatusCode int, s3errCode ErrorCode) *Acc HostHeader: hostHeader, RequestID: r.Header.Get("X-Request-ID"), RemoteIP: remoteIP, - Requester: r.Header.Get(xhttp.AmzIdentityId), - SignatureVersion: r.Header.Get(xhttp.AmzAuthType), + Requester: r.Header.Get(s3_constants.AmzIdentityId), + SignatureVersion: r.Header.Get(s3_constants.AmzAuthType), UserAgent: r.Header.Get("user-agent"), HostId: hostname, Bucket: bucket, diff --git a/weed/s3api/s3err/s3api_errors.go b/weed/s3api/s3err/s3api_errors.go index 52803f398..2e93f49cb 100644 --- a/weed/s3api/s3err/s3api_errors.go +++ b/weed/s3api/s3err/s3api_errors.go @@ -64,6 +64,7 @@ const ( ErrInvalidMaxDeleteObjects ErrInvalidPartNumberMarker ErrInvalidPart + ErrInvalidRange ErrInternalError ErrInvalidCopyDest ErrInvalidCopySource @@ -370,6 +371,11 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "Invalid Request", HTTPStatusCode: http.StatusBadRequest, }, + ErrInvalidRange: { + Code: "InvalidRange", + Description: "The requested range is not satisfiable", + HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable, + }, ErrAuthNotSetup: { Code: "InvalidRequest", Description: "Signed request requires setting up SeaweedFS S3 authentication", diff --git a/weed/s3api/stats.go b/weed/s3api/stats.go index 973d8c0eb..003807a25 100644 --- a/weed/s3api/stats.go +++ b/weed/s3api/stats.go @@ -1,6 +1,7 @@ package s3api import ( + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" stats_collect "github.com/chrislusf/seaweedfs/weed/stats" "net/http" "strconv" @@ -27,11 +28,12 @@ func (r *StatusRecorder) Flush() { func track(f http.HandlerFunc, action string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { + bucket, _ := s3_constants.GetBucketAndObject(r) w.Header().Set("Server", "SeaweedFS S3") recorder := NewStatusResponseWriter(w) start := time.Now() f(recorder, r) - stats_collect.S3RequestHistogram.WithLabelValues(action).Observe(time.Since(start).Seconds()) - stats_collect.S3RequestCounter.WithLabelValues(action, strconv.Itoa(recorder.Status)).Inc() + stats_collect.S3RequestHistogram.WithLabelValues(action, bucket).Observe(time.Since(start).Seconds()) + stats_collect.S3RequestCounter.WithLabelValues(action, strconv.Itoa(recorder.Status), bucket).Inc() } } |
