aboutsummaryrefslogtreecommitdiff
path: root/weed/s3api
diff options
context:
space:
mode:
Diffstat (limited to 'weed/s3api')
-rw-r--r--weed/s3api/auth_credentials.go275
-rw-r--r--weed/s3api/auth_credentials_subscribe.go70
-rw-r--r--weed/s3api/auth_credentials_test.go69
-rw-r--r--weed/s3api/auth_signature_v2.go427
-rw-r--r--weed/s3api/auth_signature_v4.go770
-rw-r--r--weed/s3api/auto_signature_v4_test.go421
-rw-r--r--weed/s3api/chunked_reader_v4.go159
-rw-r--r--weed/s3api/filer_multipart.go161
-rw-r--r--weed/s3api/filer_multipart_test.go23
-rw-r--r--weed/s3api/filer_util.go171
-rw-r--r--weed/s3api/filer_util_tags.go105
-rw-r--r--weed/s3api/http/header.go36
-rw-r--r--weed/s3api/policy/post-policy.go321
-rw-r--r--weed/s3api/policy/post-policy_test.go378
-rw-r--r--weed/s3api/policy/postpolicyform.go276
-rw-r--r--weed/s3api/policy/postpolicyform_test.go106
-rw-r--r--weed/s3api/s3_constants/s3_actions.go9
-rw-r--r--weed/s3api/s3api_auth.go6
-rw-r--r--weed/s3api/s3api_bucket_handlers.go151
-rw-r--r--weed/s3api/s3api_errors.go131
-rw-r--r--weed/s3api/s3api_handlers.go40
-rw-r--r--weed/s3api/s3api_object_copy_handlers.go174
-rw-r--r--weed/s3api/s3api_object_handlers.go303
-rw-r--r--weed/s3api/s3api_object_handlers_postpolicy.go241
-rw-r--r--weed/s3api/s3api_object_multipart_handlers.go117
-rw-r--r--weed/s3api/s3api_object_tagging_handlers.go117
-rw-r--r--weed/s3api/s3api_objects_list_handlers.go319
-rw-r--r--weed/s3api/s3api_server.go78
-rw-r--r--weed/s3api/s3api_test.go32
-rw-r--r--weed/s3api/s3err/s3-error.go61
-rw-r--r--weed/s3api/s3err/s3api_errors.go359
-rw-r--r--weed/s3api/stats.go38
-rw-r--r--weed/s3api/tags.go38
-rw-r--r--weed/s3api/tags_test.go50
34 files changed, 5429 insertions, 603 deletions
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
new file mode 100644
index 000000000..b8af6381a
--- /dev/null
+++ b/weed/s3api/auth_credentials.go
@@ -0,0 +1,275 @@
+package s3api
+
+import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+ xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+)
+
+type Action string
+
+type Iam interface {
+ Check(f http.HandlerFunc, actions ...Action) http.HandlerFunc
+}
+
+type IdentityAccessManagement struct {
+ identities []*Identity
+ domain string
+}
+
+type Identity struct {
+ Name string
+ Credentials []*Credential
+ Actions []Action
+}
+
+type Credential struct {
+ AccessKey string
+ SecretKey string
+}
+
+func NewIdentityAccessManagement(option *S3ApiServerOption) *IdentityAccessManagement {
+ iam := &IdentityAccessManagement{
+ domain: option.DomainName,
+ }
+ if option.Config != "" {
+ if err := iam.loadS3ApiConfigurationFromFile(option.Config); err != nil {
+ glog.Fatalf("fail to load config file %s: %v", option.Config, err)
+ }
+ } else {
+ if err := iam.loadS3ApiConfigurationFromFiler(option); err != nil {
+ glog.Warningf("fail to load config: %v", err)
+ }
+ }
+ return iam
+}
+
+func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3ApiServerOption) error {
+ content, err := filer.ReadContent(option.Filer, filer.IamConfigDirecotry, filer.IamIdentityFile)
+ if err != nil {
+ return fmt.Errorf("read S3 config: %v", err)
+ }
+ return iam.loadS3ApiConfigurationFromBytes(content)
+}
+
+func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error {
+ content, readErr := ioutil.ReadFile(fileName)
+ if readErr != nil {
+ glog.Warningf("fail to read %s : %v", fileName, readErr)
+ return fmt.Errorf("fail to read %s : %v", fileName, readErr)
+ }
+ return iam.loadS3ApiConfigurationFromBytes(content)
+}
+
+func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromBytes(content []byte) error {
+ s3ApiConfiguration := &iam_pb.S3ApiConfiguration{}
+ if err := filer.ParseS3ConfigurationFromBytes(content, s3ApiConfiguration); err != nil {
+ glog.Warningf("unmarshal error: %v", err)
+ return fmt.Errorf("unmarshal error: %v", err)
+ }
+ if err := iam.loadS3ApiConfiguration(s3ApiConfiguration); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3ApiConfiguration) error {
+ var identities []*Identity
+ for _, ident := range config.Identities {
+ t := &Identity{
+ Name: ident.Name,
+ Credentials: nil,
+ Actions: nil,
+ }
+ for _, action := range ident.Actions {
+ t.Actions = append(t.Actions, Action(action))
+ }
+ for _, cred := range ident.Credentials {
+ t.Credentials = append(t.Credentials, &Credential{
+ AccessKey: cred.AccessKey,
+ SecretKey: cred.SecretKey,
+ })
+ }
+ identities = append(identities, t)
+ }
+
+ // atomically switch
+ iam.identities = identities
+ return nil
+}
+
+func (iam *IdentityAccessManagement) isEnabled() bool {
+
+ return len(iam.identities) > 0
+}
+
+func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) {
+
+ for _, ident := range iam.identities {
+ for _, cred := range ident.Credentials {
+ if cred.AccessKey == accessKey {
+ return ident, cred, true
+ }
+ }
+ }
+ return nil, nil, false
+}
+
+func (iam *IdentityAccessManagement) lookupAnonymous() (identity *Identity, found bool) {
+
+ for _, ident := range iam.identities {
+ if ident.Name == "anonymous" {
+ return ident, true
+ }
+ }
+ return nil, false
+}
+
+func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc {
+
+ if !iam.isEnabled() {
+ return f
+ }
+
+ return func(w http.ResponseWriter, r *http.Request) {
+ identity, errCode := iam.authRequest(r, action)
+ if errCode == s3err.ErrNone {
+ if identity != nil && identity.Name != "" {
+ r.Header.Set(xhttp.AmzIdentityId, identity.Name)
+ if identity.isAdmin() {
+ r.Header.Set(xhttp.AmzIsAdmin, "true")
+ }
+ }
+ f(w, r)
+ return
+ }
+ writeErrorResponse(w, errCode, r.URL)
+ }
+}
+
+// check whether the request has valid access keys
+func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) (*Identity, s3err.ErrorCode) {
+ var identity *Identity
+ var s3Err s3err.ErrorCode
+ var found bool
+ switch getRequestAuthType(r) {
+ case authTypeStreamingSigned:
+ return identity, s3err.ErrNone
+ case authTypeUnknown:
+ glog.V(3).Infof("unknown auth type")
+ return identity, s3err.ErrAccessDenied
+ case authTypePresignedV2, authTypeSignedV2:
+ glog.V(3).Infof("v2 auth type")
+ identity, s3Err = iam.isReqAuthenticatedV2(r)
+ case authTypeSigned, authTypePresigned:
+ glog.V(3).Infof("v4 auth type")
+ identity, s3Err = iam.reqSignatureV4Verify(r)
+ case authTypePostPolicy:
+ glog.V(3).Infof("post policy auth type")
+ return identity, s3err.ErrNone
+ case authTypeJWT:
+ glog.V(3).Infof("jwt auth type")
+ return identity, s3err.ErrNotImplemented
+ case authTypeAnonymous:
+ identity, found = iam.lookupAnonymous()
+ if !found {
+ return identity, s3err.ErrAccessDenied
+ }
+ default:
+ return identity, s3err.ErrNotImplemented
+ }
+
+ if s3Err != s3err.ErrNone {
+ return identity, s3Err
+ }
+
+ glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions)
+
+ bucket, _ := getBucketAndObject(r)
+
+ if !identity.canDo(action, bucket) {
+ return identity, s3err.ErrAccessDenied
+ }
+
+ return identity, s3err.ErrNone
+
+}
+
+func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err.ErrorCode) {
+ var identity *Identity
+ var s3Err s3err.ErrorCode
+ var found bool
+ switch getRequestAuthType(r) {
+ case authTypeStreamingSigned:
+ return identity, s3err.ErrNone
+ case authTypeUnknown:
+ glog.V(3).Infof("unknown auth type")
+ return identity, s3err.ErrAccessDenied
+ case authTypePresignedV2, authTypeSignedV2:
+ glog.V(3).Infof("v2 auth type")
+ identity, s3Err = iam.isReqAuthenticatedV2(r)
+ case authTypeSigned, authTypePresigned:
+ glog.V(3).Infof("v4 auth type")
+ identity, s3Err = iam.reqSignatureV4Verify(r)
+ case authTypePostPolicy:
+ glog.V(3).Infof("post policy auth type")
+ return identity, s3err.ErrNone
+ case authTypeJWT:
+ glog.V(3).Infof("jwt auth type")
+ return identity, s3err.ErrNotImplemented
+ case authTypeAnonymous:
+ identity, found = iam.lookupAnonymous()
+ if !found {
+ return identity, s3err.ErrAccessDenied
+ }
+ default:
+ return identity, s3err.ErrNotImplemented
+ }
+
+ glog.V(3).Infof("auth error: %v", s3Err)
+ if s3Err != s3err.ErrNone {
+ return identity, s3Err
+ }
+ return identity, s3err.ErrNone
+}
+
+func (identity *Identity) canDo(action Action, bucket string) bool {
+ if identity.isAdmin() {
+ return true
+ }
+ for _, a := range identity.Actions {
+ if a == action {
+ return true
+ }
+ }
+ if bucket == "" {
+ return false
+ }
+ limitedByBucket := string(action) + ":" + bucket
+ adminLimitedByBucket := s3_constants.ACTION_ADMIN + ":" + bucket
+ for _, a := range identity.Actions {
+ if string(a) == limitedByBucket {
+ return true
+ }
+ if string(a) == adminLimitedByBucket {
+ return true
+ }
+ }
+ return false
+}
+
+func (identity *Identity) isAdmin() bool {
+ for _, a := range identity.Actions {
+ if a == "Admin" {
+ return true
+ }
+ }
+ return false
+}
diff --git a/weed/s3api/auth_credentials_subscribe.go b/weed/s3api/auth_credentials_subscribe.go
new file mode 100644
index 000000000..ea4b69550
--- /dev/null
+++ b/weed/s3api/auth_credentials_subscribe.go
@@ -0,0 +1,70 @@
+package s3api
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "io"
+ "time"
+)
+
+func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, lastTsNs int64) error {
+
+ processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
+
+ message := resp.EventNotification
+ if message.NewEntry == nil {
+ return nil
+ }
+
+ dir := resp.Directory
+
+ if message.NewParentPath != "" {
+ dir = message.NewParentPath
+ }
+ if dir == filer.IamConfigDirecotry && message.NewEntry.Name == filer.IamIdentityFile {
+ if err := s3a.iam.loadS3ApiConfigurationFromBytes(message.NewEntry.Content); err != nil {
+ return err
+ }
+ glog.V(0).Infof("updated %s/%s", filer.IamConfigDirecotry, filer.IamIdentityFile)
+ }
+
+ return nil
+ }
+
+ for {
+ err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
+ ClientName: clientName,
+ PathPrefix: prefix,
+ SinceNs: lastTsNs,
+ })
+ if err != nil {
+ return fmt.Errorf("subscribe: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+
+ if err := processEventFn(resp); err != nil {
+ glog.Fatalf("process %v: %v", resp, err)
+ }
+ lastTsNs = resp.TsNs
+ }
+ })
+ if err != nil {
+ glog.Errorf("subscribing filer meta change: %v", err)
+ }
+ time.Sleep(time.Second)
+ }
+}
diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go
new file mode 100644
index 000000000..0383ddbcd
--- /dev/null
+++ b/weed/s3api/auth_credentials_test.go
@@ -0,0 +1,69 @@
+package s3api
+
+import (
+ . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
+ "testing"
+
+ "github.com/golang/protobuf/jsonpb"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+)
+
+func TestIdentityListFileFormat(t *testing.T) {
+
+ s3ApiConfiguration := &iam_pb.S3ApiConfiguration{}
+
+ identity1 := &iam_pb.Identity{
+ Name: "some_name",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key1",
+ SecretKey: "some_secret_key2",
+ },
+ },
+ Actions: []string{
+ ACTION_ADMIN,
+ ACTION_READ,
+ ACTION_WRITE,
+ },
+ }
+ identity2 := &iam_pb.Identity{
+ Name: "some_read_only_user",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key1",
+ SecretKey: "some_secret_key1",
+ },
+ },
+ Actions: []string{
+ ACTION_READ,
+ },
+ }
+ identity3 := &iam_pb.Identity{
+ Name: "some_normal_user",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key2",
+ SecretKey: "some_secret_key2",
+ },
+ },
+ Actions: []string{
+ ACTION_READ,
+ ACTION_WRITE,
+ },
+ }
+
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity1)
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity2)
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity3)
+
+ m := jsonpb.Marshaler{
+ EmitDefaults: true,
+ Indent: " ",
+ }
+
+ text, _ := m.MarshalToString(s3ApiConfiguration)
+
+ println(text)
+
+}
diff --git a/weed/s3api/auth_signature_v2.go b/weed/s3api/auth_signature_v2.go
new file mode 100644
index 000000000..5694a96ac
--- /dev/null
+++ b/weed/s3api/auth_signature_v2.go
@@ -0,0 +1,427 @@
+/*
+ * The following code tries to reverse engineer the Amazon S3 APIs,
+ * and is mostly copied from minio implementation.
+ */
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package s3api
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/subtle"
+ "encoding/base64"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "net"
+ "net/http"
+ "net/url"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Whitelist resource list that will be used in query string for signature-V2 calculation.
+// The list should be alphabetically sorted
+var resourceList = []string{
+ "acl",
+ "delete",
+ "lifecycle",
+ "location",
+ "logging",
+ "notification",
+ "partNumber",
+ "policy",
+ "requestPayment",
+ "response-cache-control",
+ "response-content-disposition",
+ "response-content-encoding",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
+ "torrent",
+ "uploadId",
+ "uploads",
+ "versionId",
+ "versioning",
+ "versions",
+ "website",
+}
+
+// Verify if request has valid AWS Signature Version '2'.
+func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, s3err.ErrorCode) {
+ if isRequestSignatureV2(r) {
+ return iam.doesSignV2Match(r)
+ }
+ return iam.doesPresignV2SignatureMatch(r)
+}
+
+func (iam *IdentityAccessManagement) doesPolicySignatureV2Match(formValues http.Header) s3err.ErrorCode {
+ accessKey := formValues.Get("AWSAccessKeyId")
+ _, cred, found := iam.lookupByAccessKey(accessKey)
+ if !found {
+ return s3err.ErrInvalidAccessKeyID
+ }
+ policy := formValues.Get("Policy")
+ signature := formValues.Get("Signature")
+ if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) {
+ return s3err.ErrSignatureDoesNotMatch
+ }
+ return s3err.ErrNone
+}
+
+// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
+// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) );
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+// <HTTP-Request-URI, from the protocol name up to the query string> +
+// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+//
+// CanonicalizedProtocolHeaders = <described below>
+
+// doesSignV2Match - Verify authorization header with calculated header in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html
+// returns true if matches, false otherwise. if error is not nil then it is always false
+
+func validateV2AuthHeader(v2Auth string) (accessKey string, errCode s3err.ErrorCode) {
+ if v2Auth == "" {
+ return "", s3err.ErrAuthHeaderEmpty
+ }
+ // Verify if the header algorithm is supported or not.
+ if !strings.HasPrefix(v2Auth, signV2Algorithm) {
+ return "", s3err.ErrSignatureVersionNotSupported
+ }
+
+ // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string).
+ // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature
+ authFields := strings.Split(v2Auth, " ")
+ if len(authFields) != 2 {
+ return "", s3err.ErrMissingFields
+ }
+
+ // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string.
+ keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":")
+ if len(keySignFields) != 2 {
+ return "", s3err.ErrMissingFields
+ }
+
+ return keySignFields[0], s3err.ErrNone
+}
+
+func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, s3err.ErrorCode) {
+ v2Auth := r.Header.Get("Authorization")
+
+ accessKey, apiError := validateV2AuthHeader(v2Auth)
+ if apiError != s3err.ErrNone {
+ return nil, apiError
+ }
+
+ // Access credentials.
+ // Validate if access key id same.
+ ident, cred, found := iam.lookupByAccessKey(accessKey)
+ if !found {
+ return nil, s3err.ErrInvalidAccessKeyID
+ }
+
+ // r.RequestURI will have raw encoded URI as sent by the client.
+ tokens := strings.SplitN(r.RequestURI, "?", 2)
+ encodedResource := tokens[0]
+ encodedQuery := ""
+ if len(tokens) == 2 {
+ encodedQuery = tokens[1]
+ }
+
+ unescapedQueries, err := unescapeQueries(encodedQuery)
+ if err != nil {
+ return nil, s3err.ErrInvalidQueryParams
+ }
+
+ encodedResource, err = getResource(encodedResource, r.Host, iam.domain)
+ if err != nil {
+ return nil, s3err.ErrInvalidRequest
+ }
+
+ prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey)
+ if !strings.HasPrefix(v2Auth, prefix) {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ v2Auth = v2Auth[len(prefix):]
+ expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header)
+ if !compareSignatureV2(v2Auth, expectedAuth) {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ return ident, s3err.ErrNone
+}
+
+// doesPresignV2SignatureMatch - Verify query headers with presigned signature
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+// returns ErrNone if matches. S3 errors otherwise.
+func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, s3err.ErrorCode) {
+
+ // r.RequestURI will have raw encoded URI as sent by the client.
+ tokens := strings.SplitN(r.RequestURI, "?", 2)
+ encodedResource := tokens[0]
+ encodedQuery := ""
+ if len(tokens) == 2 {
+ encodedQuery = tokens[1]
+ }
+
+ var (
+ filteredQueries []string
+ gotSignature string
+ expires string
+ accessKey string
+ err error
+ )
+
+ var unescapedQueries []string
+ unescapedQueries, err = unescapeQueries(encodedQuery)
+ if err != nil {
+ return nil, s3err.ErrInvalidQueryParams
+ }
+
+ // Extract the necessary values from presigned query, construct a list of new filtered queries.
+ for _, query := range unescapedQueries {
+ keyval := strings.SplitN(query, "=", 2)
+ if len(keyval) != 2 {
+ return nil, s3err.ErrInvalidQueryParams
+ }
+ switch keyval[0] {
+ case "AWSAccessKeyId":
+ accessKey = keyval[1]
+ case "Signature":
+ gotSignature = keyval[1]
+ case "Expires":
+ expires = keyval[1]
+ default:
+ filteredQueries = append(filteredQueries, query)
+ }
+ }
+
+ // Invalid values returns error.
+ if accessKey == "" || gotSignature == "" || expires == "" {
+ return nil, s3err.ErrInvalidQueryParams
+ }
+
+ // Validate if access key id same.
+ ident, cred, found := iam.lookupByAccessKey(accessKey)
+ if !found {
+ return nil, s3err.ErrInvalidAccessKeyID
+ }
+
+ // Make sure the request has not expired.
+ expiresInt, err := strconv.ParseInt(expires, 10, 64)
+ if err != nil {
+ return nil, s3err.ErrMalformedExpires
+ }
+
+ // Check if the presigned URL has expired.
+ if expiresInt < time.Now().UTC().Unix() {
+ return nil, s3err.ErrExpiredPresignRequest
+ }
+
+ encodedResource, err = getResource(encodedResource, r.Host, iam.domain)
+ if err != nil {
+ return nil, s3err.ErrInvalidRequest
+ }
+
+ expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires)
+ if !compareSignatureV2(gotSignature, expectedSignature) {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+
+ return ident, s3err.ErrNone
+}
+
+// Escape encodedQuery string into unescaped list of query params, returns error
+// if any while unescaping the values.
+func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) {
+ for _, query := range strings.Split(encodedQuery, "&") {
+ var unescapedQuery string
+ unescapedQuery, err = url.QueryUnescape(query)
+ if err != nil {
+ return nil, err
+ }
+ unescapedQueries = append(unescapedQueries, unescapedQuery)
+ }
+ return unescapedQueries, nil
+}
+
+// Returns "/bucketName/objectName" for path-style or virtual-host-style requests.
+func getResource(path string, host string, domain string) (string, error) {
+ if domain == "" {
+ return path, nil
+ }
+ // If virtual-host-style is enabled construct the "resource" properly.
+ if strings.Contains(host, ":") {
+ // In bucket.mydomain.com:9000, strip out :9000
+ var err error
+ if host, _, err = net.SplitHostPort(host); err != nil {
+ return "", err
+ }
+ }
+ if !strings.HasSuffix(host, "."+domain) {
+ return path, nil
+ }
+ bucket := strings.TrimSuffix(host, "."+domain)
+ return "/" + pathJoin(bucket, path), nil
+}
+
+// pathJoin - like path.Join() but retains trailing "/" of the last element
+func pathJoin(elem ...string) string {
+ trailingSlash := ""
+ if len(elem) > 0 {
+ if strings.HasSuffix(elem[len(elem)-1], "/") {
+ trailingSlash = "/"
+ }
+ }
+ return path.Join(elem...) + trailingSlash
+}
+
+// Return the signature v2 of a given request.
+func signatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header) string {
+ stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "")
+ signature := calculateSignatureV2(stringToSign, cred.SecretKey)
+ return signature
+}
+
+// Return string to sign under two different conditions.
+// - if expires string is set then string to sign includes date instead of the Date header.
+// - if expires string is empty then string to sign includes date header instead.
+func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string {
+ canonicalHeaders := canonicalizedAmzHeadersV2(headers)
+ if len(canonicalHeaders) > 0 {
+ canonicalHeaders += "\n"
+ }
+
+ date := expires // Date is set to expires date for presign operations.
+ if date == "" {
+ // If expires date is empty then request header Date is used.
+ date = headers.Get("Date")
+ }
+
+ // From the Amazon docs:
+ //
+ // StringToSign = HTTP-Verb + "\n" +
+ // Content-Md5 + "\n" +
+ // Content-Type + "\n" +
+ // Date/Expires + "\n" +
+ // CanonicalizedProtocolHeaders +
+ // CanonicalizedResource;
+ stringToSign := strings.Join([]string{
+ method,
+ headers.Get("Content-MD5"),
+ headers.Get("Content-Type"),
+ date,
+ canonicalHeaders,
+ }, "\n")
+
+ return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery)
+}
+
+// Return canonical resource string.
+func canonicalizedResourceV2(encodedResource, encodedQuery string) string {
+ queries := strings.Split(encodedQuery, "&")
+ keyval := make(map[string]string)
+ for _, query := range queries {
+ key := query
+ val := ""
+ index := strings.Index(query, "=")
+ if index != -1 {
+ key = query[:index]
+ val = query[index+1:]
+ }
+ keyval[key] = val
+ }
+
+ var canonicalQueries []string
+ for _, key := range resourceList {
+ val, ok := keyval[key]
+ if !ok {
+ continue
+ }
+ if val == "" {
+ canonicalQueries = append(canonicalQueries, key)
+ continue
+ }
+ canonicalQueries = append(canonicalQueries, key+"="+val)
+ }
+
+ // The queries will be already sorted as resourceList is sorted, if canonicalQueries
+ // is empty strings.Join returns empty.
+ canonicalQuery := strings.Join(canonicalQueries, "&")
+ if canonicalQuery != "" {
+ return encodedResource + "?" + canonicalQuery
+ }
+ return encodedResource
+}
+
+// Return canonical headers.
+func canonicalizedAmzHeadersV2(headers http.Header) string {
+ var keys []string
+ keyval := make(map[string]string)
+ for key := range headers {
+ lkey := strings.ToLower(key)
+ if !strings.HasPrefix(lkey, "x-amz-") {
+ continue
+ }
+ keys = append(keys, lkey)
+ keyval[lkey] = strings.Join(headers[key], ",")
+ }
+ sort.Strings(keys)
+ var canonicalHeaders []string
+ for _, key := range keys {
+ canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key])
+ }
+ return strings.Join(canonicalHeaders, "\n")
+}
+
+func calculateSignatureV2(stringToSign string, secret string) string {
+ hm := hmac.New(sha1.New, []byte(secret))
+ hm.Write([]byte(stringToSign))
+ return base64.StdEncoding.EncodeToString(hm.Sum(nil))
+}
+
+// compareSignatureV2 returns true if and only if both signatures
+// are equal. The signatures are expected to be base64 encoded strings
+// according to the AWS S3 signature V2 spec.
+func compareSignatureV2(sig1, sig2 string) bool {
+ // Decode signature string to binary byte-sequence representation is required
+ // as Base64 encoding of a value is not unique:
+ // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice.
+ signature1, err := base64.StdEncoding.DecodeString(sig1)
+ if err != nil {
+ return false
+ }
+ signature2, err := base64.StdEncoding.DecodeString(sig2)
+ if err != nil {
+ return false
+ }
+ return subtle.ConstantTimeCompare(signature1, signature2) == 1
+}
+
+// Return signature-v2 for the presigned request.
+func preSignatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string {
+ stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires)
+ return calculateSignatureV2(stringToSign, cred.SecretKey)
+}
diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go
new file mode 100644
index 000000000..0df26e6fc
--- /dev/null
+++ b/weed/s3api/auth_signature_v4.go
@@ -0,0 +1,770 @@
+/*
+ * The following code tries to reverse engineer the Amazon S3 APIs,
+ * and is mostly copied from minio implementation.
+ */
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package s3api
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/subtle"
+ "encoding/hex"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, s3err.ErrorCode) {
+ sha256sum := getContentSha256Cksum(r)
+ switch {
+ case isRequestSignatureV4(r):
+ return iam.doesSignatureMatch(sha256sum, r)
+ case isRequestPresignedSignatureV4(r):
+ return iam.doesPresignedSignatureMatch(sha256sum, r)
+ }
+ return nil, s3err.ErrAccessDenied
+}
+
+// Streaming AWS Signature Version '4' constants.
+const (
+ emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
+ signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD"
+
+ // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the
+ // client did not calculate sha256 of the payload.
+ unsignedPayload = "UNSIGNED-PAYLOAD"
+)
+
+// Returns SHA256 for calculating canonical-request.
+func getContentSha256Cksum(r *http.Request) string {
+ var (
+ defaultSha256Cksum string
+ v []string
+ ok bool
+ )
+
+ // For a presigned request we look at the query param for sha256.
+ if isRequestPresignedSignatureV4(r) {
+ // X-Amz-Content-Sha256, if not set in presigned requests, checksum
+ // will default to 'UNSIGNED-PAYLOAD'.
+ defaultSha256Cksum = unsignedPayload
+ v, ok = r.URL.Query()["X-Amz-Content-Sha256"]
+ if !ok {
+ v, ok = r.Header["X-Amz-Content-Sha256"]
+ }
+ } else {
+ // X-Amz-Content-Sha256, if not set in signed requests, checksum
+ // will default to sha256([]byte("")).
+ defaultSha256Cksum = emptySHA256
+ v, ok = r.Header["X-Amz-Content-Sha256"]
+ }
+
+ // We found 'X-Amz-Content-Sha256' return the captured value.
+ if ok {
+ return v[0]
+ }
+
+ // We couldn't find 'X-Amz-Content-Sha256'.
+ return defaultSha256Cksum
+}
+
+// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
+func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) {
+
+ // Copy request.
+ req := *r
+
+ // Save authorization header.
+ v4Auth := req.Header.Get("Authorization")
+
+ // Parse signature version '4' header.
+ signV4Values, err := parseSignV4(v4Auth)
+ if err != s3err.ErrNone {
+ return nil, err
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r)
+ if errCode != s3err.ErrNone {
+ return nil, errCode
+ }
+
+ // Verify if the access key id matches.
+ identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
+ if !found {
+ return nil, s3err.ErrInvalidAccessKeyID
+ }
+
+ // Extract date, if not present throw error.
+ var date string
+ if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" {
+ if date = r.Header.Get("Date"); date == "" {
+ return nil, s3err.ErrMissingDateHeader
+ }
+ }
+ // Parse date header.
+ t, e := time.Parse(iso8601Format, date)
+ if e != nil {
+ return nil, s3err.ErrMalformedDate
+ }
+
+ // Query string.
+ queryStr := req.URL.Query().Encode()
+
+ // Get hashed Payload
+ if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil {
+ buf, _ := ioutil.ReadAll(r.Body)
+ r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
+ b, _ := ioutil.ReadAll(bytes.NewBuffer(buf))
+ if len(b) != 0 {
+ bodyHash := sha256.Sum256(b)
+ hashedPayload = hex.EncodeToString(bodyHash[:])
+ }
+ }
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope())
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(cred.SecretKey,
+ signV4Values.Credential.scope.date,
+ signV4Values.Credential.scope.region,
+ signV4Values.Credential.scope.service)
+
+ // Calculate signature.
+ newSignature := getSignature(signingKey, stringToSign)
+
+ // Verify if signature match.
+ if !compareSignatureV4(newSignature, signV4Values.Signature) {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+
+ // Return error none.
+ return identity, s3err.ErrNone
+}
+
+// credentialHeader data type represents structured form of Credential
+// string from authorization header.
+type credentialHeader struct {
+ accessKey string
+ scope struct {
+ date time.Time
+ region string
+ service string
+ request string
+ }
+}
+
+// signValues data type represents structured form of AWS Signature V4 header.
+type signValues struct {
+ Credential credentialHeader
+ SignedHeaders []string
+ Signature string
+}
+
+// Return scope string.
+func (c credentialHeader) getScope() string {
+ return strings.Join([]string{
+ c.scope.date.Format(yyyymmdd),
+ c.scope.region,
+ c.scope.service,
+ c.scope.request,
+ }, "/")
+}
+
+// Authorization: algorithm Credential=accessKeyID/credScope, \
+// SignedHeaders=signedHeaders, Signature=signature
+//
+func parseSignV4(v4Auth string) (sv signValues, aec s3err.ErrorCode) {
+ // Replace all spaced strings, some clients can send spaced
+ // parameters and some won't. So we pro-actively remove any spaces
+ // to make parsing easier.
+ v4Auth = strings.Replace(v4Auth, " ", "", -1)
+ if v4Auth == "" {
+ return sv, s3err.ErrAuthHeaderEmpty
+ }
+
+ // Verify if the header algorithm is supported or not.
+ if !strings.HasPrefix(v4Auth, signV4Algorithm) {
+ return sv, s3err.ErrSignatureVersionNotSupported
+ }
+
+ // Strip off the Algorithm prefix.
+ v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm)
+ authFields := strings.Split(strings.TrimSpace(v4Auth), ",")
+ if len(authFields) != 3 {
+ return sv, s3err.ErrMissingFields
+ }
+
+ // Initialize signature version '4' structured header.
+ signV4Values := signValues{}
+
+ var err s3err.ErrorCode
+ // Save credentail values.
+ signV4Values.Credential, err = parseCredentialHeader(authFields[0])
+ if err != s3err.ErrNone {
+ return sv, err
+ }
+
+ // Save signed headers.
+ signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1])
+ if err != s3err.ErrNone {
+ return sv, err
+ }
+
+ // Save signature.
+ signV4Values.Signature, err = parseSignature(authFields[2])
+ if err != s3err.ErrNone {
+ return sv, err
+ }
+
+ // Return the structure here.
+ return signV4Values, s3err.ErrNone
+}
+
+// parse credentialHeader string into its structured form.
+func parseCredentialHeader(credElement string) (ch credentialHeader, aec s3err.ErrorCode) {
+ creds := strings.Split(strings.TrimSpace(credElement), "=")
+ if len(creds) != 2 {
+ return ch, s3err.ErrMissingFields
+ }
+ if creds[0] != "Credential" {
+ return ch, s3err.ErrMissingCredTag
+ }
+ credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
+ if len(credElements) != 5 {
+ return ch, s3err.ErrCredMalformed
+ }
+ // Save access key id.
+ cred := credentialHeader{
+ accessKey: credElements[0],
+ }
+ var e error
+ cred.scope.date, e = time.Parse(yyyymmdd, credElements[1])
+ if e != nil {
+ return ch, s3err.ErrMalformedCredentialDate
+ }
+
+ cred.scope.region = credElements[2]
+ cred.scope.service = credElements[3] // "s3"
+ cred.scope.request = credElements[4] // "aws4_request"
+ return cred, s3err.ErrNone
+}
+
+// Parse slice of signed headers from signed headers tag.
+func parseSignedHeader(signedHdrElement string) ([]string, s3err.ErrorCode) {
+ signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=")
+ if len(signedHdrFields) != 2 {
+ return nil, s3err.ErrMissingFields
+ }
+ if signedHdrFields[0] != "SignedHeaders" {
+ return nil, s3err.ErrMissingSignHeadersTag
+ }
+ if signedHdrFields[1] == "" {
+ return nil, s3err.ErrMissingFields
+ }
+ signedHeaders := strings.Split(signedHdrFields[1], ";")
+ return signedHeaders, s3err.ErrNone
+}
+
+// Parse signature from signature tag.
+func parseSignature(signElement string) (string, s3err.ErrorCode) {
+ signFields := strings.Split(strings.TrimSpace(signElement), "=")
+ if len(signFields) != 2 {
+ return "", s3err.ErrMissingFields
+ }
+ if signFields[0] != "Signature" {
+ return "", s3err.ErrMissingSignTag
+ }
+ if signFields[1] == "" {
+ return "", s3err.ErrMissingFields
+ }
+ signature := signFields[1]
+ return signature, s3err.ErrNone
+}
+
+// doesPolicySignatureMatch - Verify query headers with post policy
+// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
+// returns ErrNone if the signature matches.
+func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http.Header) s3err.ErrorCode {
+
+ // Parse credential tag.
+ credHeader, err := parseCredentialHeader("Credential=" + formValues.Get("X-Amz-Credential"))
+ if err != s3err.ErrNone {
+ return s3err.ErrMissingFields
+ }
+
+ _, cred, found := iam.lookupByAccessKey(credHeader.accessKey)
+ if !found {
+ return s3err.ErrInvalidAccessKeyID
+ }
+
+ // Get signing key.
+ signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region, credHeader.scope.service)
+
+ // Get signature.
+ newSignature := getSignature(signingKey, formValues.Get("Policy"))
+
+ // Verify signature.
+ if !compareSignatureV4(newSignature, formValues.Get("X-Amz-Signature")) {
+ return s3err.ErrSignatureDoesNotMatch
+ }
+
+ // Success.
+ return s3err.ErrNone
+}
+
+// check query headers with presigned signature
+// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
+func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) {
+
+ // Copy request
+ req := *r
+
+ // Parse request query string.
+ pSignValues, err := parsePreSignV4(req.URL.Query())
+ if err != s3err.ErrNone {
+ return nil, err
+ }
+
+ // Verify if the access key id matches.
+ identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey)
+ if !found {
+ return nil, s3err.ErrInvalidAccessKeyID
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r)
+ if errCode != s3err.ErrNone {
+ return nil, errCode
+ }
+ // Construct new query.
+ query := make(url.Values)
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != "" {
+ query.Set("X-Amz-Content-Sha256", hashedPayload)
+ }
+
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+
+ now := time.Now().UTC()
+
+ // If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the
+ // request should still be allowed.
+ if pSignValues.Date.After(now.Add(15 * time.Minute)) {
+ return nil, s3err.ErrRequestNotReadyYet
+ }
+
+ if now.Sub(pSignValues.Date) > pSignValues.Expires {
+ return nil, s3err.ErrExpiredPresignRequest
+ }
+
+ // Save the date and expires.
+ t := pSignValues.Date
+ expireSeconds := int(pSignValues.Expires / time.Second)
+
+ // Construct the query.
+ query.Set("X-Amz-Date", t.Format(iso8601Format))
+ query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
+ query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders))
+ query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, pSignValues.Credential.scope.region))
+
+ // Save other headers available in the request parameters.
+ for k, v := range req.URL.Query() {
+
+ // Handle the metadata in presigned put query string
+ if strings.Contains(strings.ToLower(k), "x-amz-meta-") {
+ query.Set(k, v[0])
+ }
+
+ if strings.HasPrefix(strings.ToLower(k), "x-amz") {
+ continue
+ }
+ query[k] = v
+ }
+
+ // Get the encoded query.
+ encodedQuery := query.Encode()
+
+ // Verify if date query is same.
+ if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ // Verify if expires query is same.
+ if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ // Verify if signed headers query is same.
+ if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ // Verify if credential query is same.
+ if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ // Verify if sha256 payload query is same.
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != "" {
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") {
+ return nil, s3err.ErrContentSHA256Mismatch
+ }
+ }
+
+ /// Verify finally if signature is same.
+
+ // Get canonical request.
+ presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope())
+
+ // Get hmac presigned signing key.
+ presignedSigningKey := getSigningKey(cred.SecretKey,
+ pSignValues.Credential.scope.date,
+ pSignValues.Credential.scope.region,
+ pSignValues.Credential.scope.service)
+
+ // Get new signature.
+ newSignature := getSignature(presignedSigningKey, presignedStringToSign)
+
+ // Verify signature.
+ if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ return identity, s3err.ErrNone
+}
+
+func contains(list []string, elem string) bool {
+ for _, t := range list {
+ if t == elem {
+ return true
+ }
+ }
+ return false
+}
+
+// preSignValues data type represents structued form of AWS Signature V4 query string.
+type preSignValues struct {
+ signValues
+ Date time.Time
+ Expires time.Duration
+}
+
+// Parses signature version '4' query string of the following form.
+//
+// querystring = X-Amz-Algorithm=algorithm
+// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope)
+// querystring += &X-Amz-Date=date
+// querystring += &X-Amz-Expires=timeout interval
+// querystring += &X-Amz-SignedHeaders=signed_headers
+// querystring += &X-Amz-Signature=signature
+//
+// verifies if any of the necessary query params are missing in the presigned request.
+func doesV4PresignParamsExist(query url.Values) s3err.ErrorCode {
+ v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"}
+ for _, v4PresignQueryParam := range v4PresignQueryParams {
+ if _, ok := query[v4PresignQueryParam]; !ok {
+ return s3err.ErrInvalidQueryParams
+ }
+ }
+ return s3err.ErrNone
+}
+
+// Parses all the presigned signature values into separate elements.
+func parsePreSignV4(query url.Values) (psv preSignValues, aec s3err.ErrorCode) {
+ var err s3err.ErrorCode
+ // verify whether the required query params exist.
+ err = doesV4PresignParamsExist(query)
+ if err != s3err.ErrNone {
+ return psv, err
+ }
+
+ // Verify if the query algorithm is supported or not.
+ if query.Get("X-Amz-Algorithm") != signV4Algorithm {
+ return psv, s3err.ErrInvalidQuerySignatureAlgo
+ }
+
+ // Initialize signature version '4' structured header.
+ preSignV4Values := preSignValues{}
+
+ // Save credential.
+ preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential"))
+ if err != s3err.ErrNone {
+ return psv, err
+ }
+
+ var e error
+ // Save date in native time.Time.
+ preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date"))
+ if e != nil {
+ return psv, s3err.ErrMalformedPresignedDate
+ }
+
+ // Save expires in native time.Duration.
+ preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s")
+ if e != nil {
+ return psv, s3err.ErrMalformedExpires
+ }
+
+ if preSignV4Values.Expires < 0 {
+ return psv, s3err.ErrNegativeExpires
+ }
+
+ // Check if Expiry time is less than 7 days (value in seconds).
+ if preSignV4Values.Expires.Seconds() > 604800 {
+ return psv, s3err.ErrMaximumExpires
+ }
+
+ // Save signed headers.
+ preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders"))
+ if err != s3err.ErrNone {
+ return psv, err
+ }
+
+ // Save signature.
+ preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature"))
+ if err != s3err.ErrNone {
+ return psv, err
+ }
+
+ // Return structed form of signature query string.
+ return preSignV4Values, s3err.ErrNone
+}
+
+// extractSignedHeaders extract signed headers from Authorization header
+func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, s3err.ErrorCode) {
+ reqHeaders := r.Header
+ // find whether "host" is part of list of signed headers.
+ // if not return ErrUnsignedHeaders. "host" is mandatory.
+ if !contains(signedHeaders, "host") {
+ return nil, s3err.ErrUnsignedHeaders
+ }
+ extractedSignedHeaders := make(http.Header)
+ for _, header := range signedHeaders {
+ // `host` will not be found in the headers, can be found in r.Host.
+ // but its alway necessary that the list of signed headers containing host in it.
+ val, ok := reqHeaders[http.CanonicalHeaderKey(header)]
+ if ok {
+ for _, enc := range val {
+ extractedSignedHeaders.Add(header, enc)
+ }
+ continue
+ }
+ switch header {
+ case "expect":
+ // Golang http server strips off 'Expect' header, if the
+ // client sent this as part of signed headers we need to
+ // handle otherwise we would see a signature mismatch.
+ // `aws-cli` sets this as part of signed headers.
+ //
+ // According to
+ // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20
+ // Expect header is always of form:
+ //
+ // Expect = "Expect" ":" 1#expectation
+ // expectation = "100-continue" | expectation-extension
+ //
+ // So it safe to assume that '100-continue' is what would
+ // be sent, for the time being keep this work around.
+ // Adding a *TODO* to remove this later when Golang server
+ // doesn't filter out the 'Expect' header.
+ extractedSignedHeaders.Set(header, "100-continue")
+ case "host":
+ // Go http server removes "host" from Request.Header
+ extractedSignedHeaders.Set(header, r.Host)
+ case "transfer-encoding":
+ for _, enc := range r.TransferEncoding {
+ extractedSignedHeaders.Add(header, enc)
+ }
+ case "content-length":
+ // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation.
+ // But some clients deviate from this rule. Hence we consider Content-Length for signature
+ // calculation to be compatible with such clients.
+ extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10))
+ default:
+ return nil, s3err.ErrUnsignedHeaders
+ }
+ }
+ return extractedSignedHeaders, s3err.ErrNone
+}
+
+// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
+func getSignedHeaders(signedHeaders http.Header) string {
+ var headers []string
+ for k := range signedHeaders {
+ headers = append(headers, strings.ToLower(k))
+ }
+ sort.Strings(headers)
+ return strings.Join(headers, ";")
+}
+
+// getScope generate a string of a specific date, an AWS region, and a service.
+func getScope(t time.Time, region string) string {
+ scope := strings.Join([]string{
+ t.Format(yyyymmdd),
+ region,
+ "s3",
+ "aws4_request",
+ }, "/")
+ return scope
+}
+
+// getCanonicalRequest generate a canonical request of style
+//
+// canonicalRequest =
+// <HTTPMethod>\n
+// <CanonicalURI>\n
+// <CanonicalQueryString>\n
+// <CanonicalHeaders>\n
+// <SignedHeaders>\n
+// <HashedPayload>
+//
+func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string {
+ rawQuery := strings.Replace(queryStr, "+", "%20", -1)
+ encodedPath := encodePath(urlPath)
+ canonicalRequest := strings.Join([]string{
+ method,
+ encodedPath,
+ rawQuery,
+ getCanonicalHeaders(extractedSignedHeaders),
+ getSignedHeaders(extractedSignedHeaders),
+ payload,
+ }, "\n")
+ return canonicalRequest
+}
+
+// getStringToSign a string based on selected query values.
+func getStringToSign(canonicalRequest string, t time.Time, scope string) string {
+ stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
+ stringToSign = stringToSign + scope + "\n"
+ canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
+ stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
+ return stringToSign
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getSigningKey hmac seed to calculate final signature.
+func getSigningKey(secretKey string, t time.Time, region string, service string) []byte {
+ date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd)))
+ regionBytes := sumHMAC(date, []byte(region))
+ serviceBytes := sumHMAC(regionBytes, []byte(service))
+ signingKey := sumHMAC(serviceBytes, []byte("aws4_request"))
+ return signingKey
+}
+
+// getSignature final signature in hexadecimal form.
+func getSignature(signingKey []byte, stringToSign string) string {
+ return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+}
+
+// getCanonicalHeaders generate a list of request headers with their values
+func getCanonicalHeaders(signedHeaders http.Header) string {
+ var headers []string
+ vals := make(http.Header)
+ for k, vv := range signedHeaders {
+ headers = append(headers, strings.ToLower(k))
+ vals[strings.ToLower(k)] = vv
+ }
+ sort.Strings(headers)
+
+ var buf bytes.Buffer
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(signV4TrimAll(v))
+ }
+ buf.WriteByte('\n')
+ }
+ return buf.String()
+}
+
+// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
+// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+func signV4TrimAll(input string) string {
+ // Compress adjacent spaces (a space is determined by
+ // unicode.IsSpace() internally here) to one space and return
+ return strings.Join(strings.Fields(input), " ")
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func encodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
+
+// compareSignatureV4 returns true if and only if both signatures
+// are equal. The signatures are expected to be HEX encoded strings
+// according to the AWS S3 signature V4 spec.
+func compareSignatureV4(sig1, sig2 string) bool {
+ // The CTC using []byte(str) works because the hex encoding
+ // is unique for a sequence of bytes. See also compareSignatureV2.
+ return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1
+}
diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go
new file mode 100644
index 000000000..b47cd5f2d
--- /dev/null
+++ b/weed/s3api/auto_signature_v4_test.go
@@ -0,0 +1,421 @@
+package s3api
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+ "unicode/utf8"
+)
+
+// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection.
+func TestIsRequestPresignedSignatureV4(t *testing.T) {
+ testCases := []struct {
+ inputQueryKey string
+ inputQueryValue string
+ expectedResult bool
+ }{
+ // Test case - 1.
+ // Test case with query key ""X-Amz-Credential" set.
+ {"", "", false},
+ // Test case - 2.
+ {"X-Amz-Credential", "", true},
+ // Test case - 3.
+ {"X-Amz-Content-Sha256", "", false},
+ }
+
+ for i, testCase := range testCases {
+ // creating an input HTTP request.
+ // Only the query parameters are relevant for this particular test.
+ inputReq, err := http.NewRequest("GET", "http://example.com", nil)
+ if err != nil {
+ t.Fatalf("Error initializing input HTTP request: %v", err)
+ }
+ q := inputReq.URL.Query()
+ q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
+ inputReq.URL.RawQuery = q.Encode()
+
+ actualResult := isRequestPresignedSignatureV4(inputReq)
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult)
+ }
+ }
+}
+
+// Tests is requested authenticated function, tests replies for s3 errors.
+func TestIsReqAuthenticated(t *testing.T) {
+ option := S3ApiServerOption{}
+ iam := NewIdentityAccessManagement(&option)
+ iam.identities = []*Identity{
+ {
+ Name: "someone",
+ Credentials: []*Credential{
+ {
+ AccessKey: "access_key_1",
+ SecretKey: "secret_key_1",
+ },
+ },
+ Actions: nil,
+ },
+ }
+
+ // List of test cases for validating http request authentication.
+ testCases := []struct {
+ req *http.Request
+ s3Error s3err.ErrorCode
+ }{
+ // When request is unsigned, access denied is returned.
+ {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrAccessDenied},
+ // When request is properly signed, error is none.
+ {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrNone},
+ }
+
+ // Validates all testcases.
+ for i, testCase := range testCases {
+ if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error {
+ ioutil.ReadAll(testCase.req.Body)
+ t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error)
+ }
+ }
+}
+
+func TestCheckAdminRequestAuthType(t *testing.T) {
+ option := S3ApiServerOption{}
+ iam := NewIdentityAccessManagement(&option)
+ iam.identities = []*Identity{
+ {
+ Name: "someone",
+ Credentials: []*Credential{
+ {
+ AccessKey: "access_key_1",
+ SecretKey: "secret_key_1",
+ },
+ },
+ Actions: nil,
+ },
+ }
+
+ testCases := []struct {
+ Request *http.Request
+ ErrCode s3err.ErrorCode
+ }{
+ {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrAccessDenied},
+ {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone},
+ {Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone},
+ }
+ for i, testCase := range testCases {
+ if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode {
+ t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
+ }
+ }
+}
+
+// Provides a fully populated http request instance, fails otherwise.
+func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req, err := newTestRequest(method, urlStr, contentLength, body)
+ if err != nil {
+ t.Fatalf("Unable to initialize new http request %s", err)
+ }
+ return req
+}
+
+// This is similar to mustNewRequest but additionally the request
+// is signed with AWS Signature V4, fails if not able to do so.
+func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req := mustNewRequest(method, urlStr, contentLength, body, t)
+ cred := &Credential{"access_key_1", "secret_key_1"}
+ if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
+ t.Fatalf("Unable to inititalized new signed http request %s", err)
+ }
+ return req
+}
+
+// This is similar to mustNewRequest but additionally the request
+// is presigned with AWS Signature V4, fails if not able to do so.
+func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req := mustNewRequest(method, urlStr, contentLength, body, t)
+ cred := &Credential{"access_key_1", "secret_key_1"}
+ if err := preSignV4(req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil {
+ t.Fatalf("Unable to inititalized new signed http request %s", err)
+ }
+ return req
+}
+
+// Returns new HTTP request object.
+func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) {
+ if method == "" {
+ method = "POST"
+ }
+
+ // Save for subsequent use
+ var hashedPayload string
+ var md5Base64 string
+ switch {
+ case body == nil:
+ hashedPayload = getSHA256Hash([]byte{})
+ default:
+ payloadBytes, err := ioutil.ReadAll(body)
+ if err != nil {
+ return nil, err
+ }
+ hashedPayload = getSHA256Hash(payloadBytes)
+ md5Base64 = getMD5HashBase64(payloadBytes)
+ }
+ // Seek back to beginning.
+ if body != nil {
+ body.Seek(0, 0)
+ } else {
+ body = bytes.NewReader([]byte(""))
+ }
+ req, err := http.NewRequest(method, urlStr, body)
+ if err != nil {
+ return nil, err
+ }
+ if md5Base64 != "" {
+ req.Header.Set("Content-Md5", md5Base64)
+ }
+ req.Header.Set("x-amz-content-sha256", hashedPayload)
+
+ // Add Content-Length
+ req.ContentLength = contentLength
+
+ return req, nil
+}
+
+// getSHA256Hash returns SHA-256 hash in hex encoding of given data.
+func getSHA256Hash(data []byte) string {
+ return hex.EncodeToString(getSHA256Sum(data))
+}
+
+// getMD5HashBase64 returns MD5 hash in base64 encoding of given data.
+func getMD5HashBase64(data []byte) string {
+ return base64.StdEncoding.EncodeToString(getMD5Sum(data))
+}
+
+// getSHA256Hash returns SHA-256 sum of given data.
+func getSHA256Sum(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getMD5Sum returns MD5 sum of given data.
+func getMD5Sum(data []byte) []byte {
+ hash := md5.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getMD5Hash returns MD5 hash in hex encoding of given data.
+func getMD5Hash(data []byte) string {
+ return hex.EncodeToString(getMD5Sum(data))
+}
+
+var ignoredHeaders = map[string]bool{
+ "Authorization": true,
+ "Content-Type": true,
+ "Content-Length": true,
+ "User-Agent": true,
+}
+
+// Sign given request using Signature V4.
+func signRequestV4(req *http.Request, accessKey, secretKey string) error {
+ // Get hashed payload.
+ hashedPayload := req.Header.Get("x-amz-content-sha256")
+ if hashedPayload == "" {
+ return fmt.Errorf("Invalid hashed payload")
+ }
+
+ currTime := time.Now()
+
+ // Set x-amz-date.
+ req.Header.Set("x-amz-date", currTime.Format(iso8601Format))
+
+ // Get header map.
+ headerMap := make(map[string][]string)
+ for k, vv := range req.Header {
+ // If request header key is not in ignored headers, then add it.
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok {
+ headerMap[strings.ToLower(k)] = vv
+ }
+ }
+
+ // Get header keys.
+ headers := []string{"host"}
+ for k := range headerMap {
+ headers = append(headers, k)
+ }
+ sort.Strings(headers)
+
+ region := "us-east-1"
+
+ // Get canonical headers.
+ var buf bytes.Buffer
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ switch {
+ case k == "host":
+ buf.WriteString(req.URL.Host)
+ fallthrough
+ default:
+ for idx, v := range headerMap[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(v)
+ }
+ buf.WriteByte('\n')
+ }
+ }
+ canonicalHeaders := buf.String()
+
+ // Get signed headers.
+ signedHeaders := strings.Join(headers, ";")
+
+ // Get canonical query string.
+ req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
+
+ // Get canonical URI.
+ canonicalURI := EncodePath(req.URL.Path)
+
+ // Get canonical request.
+ // canonicalRequest =
+ // <HTTPMethod>\n
+ // <CanonicalURI>\n
+ // <CanonicalQueryString>\n
+ // <CanonicalHeaders>\n
+ // <SignedHeaders>\n
+ // <HashedPayload>
+ //
+ canonicalRequest := strings.Join([]string{
+ req.Method,
+ canonicalURI,
+ req.URL.RawQuery,
+ canonicalHeaders,
+ signedHeaders,
+ hashedPayload,
+ }, "\n")
+
+ // Get scope.
+ scope := strings.Join([]string{
+ currTime.Format(yyyymmdd),
+ region,
+ "s3",
+ "aws4_request",
+ }, "/")
+
+ stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
+ stringToSign = stringToSign + scope + "\n"
+ stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest))
+
+ date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
+ regionHMAC := sumHMAC(date, []byte(region))
+ service := sumHMAC(regionHMAC, []byte("s3"))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+
+ signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+
+ // final Authorization header
+ parts := []string{
+ "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope,
+ "SignedHeaders=" + signedHeaders,
+ "Signature=" + signature,
+ }
+ auth := strings.Join(parts, ", ")
+ req.Header.Set("Authorization", auth)
+
+ return nil
+}
+
+// preSignV4 presign the request, in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
+func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return errors.New("Presign cannot be generated without access and secret keys")
+ }
+
+ region := "us-east-1"
+ date := time.Now().UTC()
+ scope := getScope(date, region)
+ credential := fmt.Sprintf("%s/%s", accessKeyID, scope)
+
+ // Set URL query.
+ query := req.URL.Query()
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+ query.Set("X-Amz-Date", date.Format(iso8601Format))
+ query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
+ query.Set("X-Amz-SignedHeaders", "host")
+ query.Set("X-Amz-Credential", credential)
+ query.Set("X-Amz-Content-Sha256", unsignedPayload)
+
+ // "host" is the only header required to be signed for Presigned URLs.
+ extractedSignedHeaders := make(http.Header)
+ extractedSignedHeaders.Set("host", req.Host)
+
+ queryStr := strings.Replace(query.Encode(), "+", "%20", -1)
+ canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method)
+ stringToSign := getStringToSign(canonicalRequest, date, scope)
+ signingKey := getSigningKey(secretAccessKey, date, region, "s3")
+ signature := getSignature(signingKey, stringToSign)
+
+ req.URL.RawQuery = query.Encode()
+
+ // Add signature header to RawQuery.
+ req.URL.RawQuery += "&X-Amz-Signature=" + url.QueryEscape(signature)
+
+ // Construct the final presigned URL.
+ return nil
+}
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func EncodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go
index 061fd4a92..b163ec2f6 100644
--- a/weed/s3api/chunked_reader_v4.go
+++ b/weed/s3api/chunked_reader_v4.go
@@ -21,17 +21,116 @@ package s3api
import (
"bufio"
"bytes"
+ "crypto/sha256"
+ "encoding/hex"
"errors"
- "github.com/dustin/go-humanize"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "hash"
"io"
"net/http"
-)
+ "time"
-// Streaming AWS Signature Version '4' constants.
-const (
- streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
+ "github.com/dustin/go-humanize"
)
+// getChunkSignature - get chunk signature.
+func getChunkSignature(secretKey string, seedSignature string, region string, date time.Time, hashedChunk string) string {
+
+ // Calculate string to sign.
+ stringToSign := signV4ChunkedAlgorithm + "\n" +
+ date.Format(iso8601Format) + "\n" +
+ getScope(date, region) + "\n" +
+ seedSignature + "\n" +
+ emptySHA256 + "\n" +
+ hashedChunk
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(secretKey, date, region, "s3")
+
+ // Calculate signature.
+ newSignature := getSignature(signingKey, stringToSign)
+
+ return newSignature
+}
+
+// calculateSeedSignature - Calculate seed signature in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
+// returns signature, error otherwise if the signature mismatches or any other
+// error while parsing and validating.
+func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode s3err.ErrorCode) {
+
+ // Copy request.
+ req := *r
+
+ // Save authorization header.
+ v4Auth := req.Header.Get("Authorization")
+
+ // Parse signature version '4' header.
+ signV4Values, errCode := parseSignV4(v4Auth)
+ if errCode != s3err.ErrNone {
+ return nil, "", "", time.Time{}, errCode
+ }
+
+ // Payload streaming.
+ payload := streamingContentSHA256
+
+ // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
+ if payload != req.Header.Get("X-Amz-Content-Sha256") {
+ return nil, "", "", time.Time{}, s3err.ErrContentSHA256Mismatch
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r)
+ if errCode != s3err.ErrNone {
+ return nil, "", "", time.Time{}, errCode
+ }
+ // Verify if the access key id matches.
+ _, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
+ if !found {
+ return nil, "", "", time.Time{}, s3err.ErrInvalidAccessKeyID
+ }
+
+ // Verify if region is valid.
+ region = signV4Values.Credential.scope.region
+
+ // Extract date, if not present throw error.
+ var dateStr string
+ if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" {
+ if dateStr = r.Header.Get("Date"); dateStr == "" {
+ return nil, "", "", time.Time{}, s3err.ErrMissingDateHeader
+ }
+ }
+ // Parse date header.
+ var err error
+ date, err = time.Parse(iso8601Format, dateStr)
+ if err != nil {
+ return nil, "", "", time.Time{}, s3err.ErrMalformedDate
+ }
+
+ // Query string.
+ queryStr := req.URL.Query().Encode()
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope())
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, "s3")
+
+ // Calculate signature.
+ newSignature := getSignature(signingKey, stringToSign)
+
+ // Verify if signature match.
+ if !compareSignatureV4(newSignature, signV4Values.Signature) {
+ return nil, "", "", time.Time{}, s3err.ErrSignatureDoesNotMatch
+ }
+
+ // Return caculated signature.
+ return cred, newSignature, region, date, s3err.ErrNone
+}
+
const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB
// lineTooLong is generated as chunk header is bigger than 4KiB.
@@ -43,22 +142,36 @@ var errMalformedEncoding = errors.New("malformed chunked encoding")
// newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r
// out of HTTP "chunked" format before returning it.
// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read.
-func newSignV4ChunkedReader(req *http.Request) io.ReadCloser {
- return &s3ChunkedReader{
- reader: bufio.NewReader(req.Body),
- state: readChunkHeader,
+func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, s3err.ErrorCode) {
+ ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req)
+ if errCode != s3err.ErrNone {
+ return nil, errCode
}
+ return &s3ChunkedReader{
+ cred: ident,
+ reader: bufio.NewReader(req.Body),
+ seedSignature: seedSignature,
+ seedDate: seedDate,
+ region: region,
+ chunkSHA256Writer: sha256.New(),
+ state: readChunkHeader,
+ }, s3err.ErrNone
}
// Represents the overall state that is required for decoding a
// AWS Signature V4 chunked reader.
type s3ChunkedReader struct {
- reader *bufio.Reader
- state chunkState
- lastChunk bool
- chunkSignature string
- n uint64 // Unread bytes in chunk
- err error
+ cred *Credential
+ reader *bufio.Reader
+ seedSignature string
+ seedDate time.Time
+ region string
+ state chunkState
+ lastChunk bool
+ chunkSignature string
+ chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data.
+ n uint64 // Unread bytes in chunk
+ err error
}
// Read chunk reads the chunk token signature portion.
@@ -157,6 +270,9 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
return 0, cr.err
}
+ // Calculate sha256.
+ cr.chunkSHA256Writer.Write(rbuf[:n0])
+
// Update the bytes read into request buffer so far.
n += n0
buf = buf[n0:]
@@ -169,6 +285,19 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
continue
}
case verifyChunk:
+ // Calculate the hashed chunk.
+ hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))
+ // Calculate the chunk signature.
+ newSignature := getChunkSignature(cr.cred.SecretKey, cr.seedSignature, cr.region, cr.seedDate, hashedChunk)
+ if !compareSignatureV4(cr.chunkSignature, newSignature) {
+ // Chunk signature doesn't match we return signature does not match.
+ cr.err = errors.New("chunk signature does not match")
+ return 0, cr.err
+ }
+ // Newly calculated signature becomes the seed for the next chunk
+ // this follows the chaining.
+ cr.seedSignature = newSignature
+ cr.chunkSHA256Writer.Reset()
if cr.lastChunk {
cr.state = eofChunk
} else {
diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go
index d3bde66ee..f882592c1 100644
--- a/weed/s3api/filer_multipart.go
+++ b/weed/s3api/filer_multipart.go
@@ -1,9 +1,9 @@
package s3api
import (
- "context"
"encoding/xml"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"path/filepath"
"strconv"
"strings"
@@ -11,10 +11,11 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/google/uuid"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/google/uuid"
)
type InitiateMultipartUploadResult struct {
@@ -22,18 +23,21 @@ type InitiateMultipartUploadResult struct {
s3.CreateMultipartUploadOutput
}
-func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) {
+func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) {
+
+ glog.V(2).Infof("createMultipartUpload input %v", input)
+
uploadId, _ := uuid.NewRandom()
uploadIdString := uploadId.String()
- if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
+ if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
entry.Extended["key"] = []byte(*input.Key)
}); err != nil {
glog.Errorf("NewMultipartUpload error: %v", err)
- return nil, ErrInternalError
+ return nil, s3err.ErrInternalError
}
output = &InitiateMultipartUploadResult{
@@ -52,14 +56,16 @@ type CompleteMultipartUploadResult struct {
s3.CompleteMultipartUploadOutput
}
-func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) {
+func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) {
+
+ glog.V(2).Infof("completeMultipartUpload input %v", input)
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
- entries, err := s3a.list(ctx, uploadDirectory, "", "", false, 0)
- if err != nil {
- glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err)
- return nil, ErrNoSuchUpload
+ entries, _, err := s3a.list(uploadDirectory, "", "", false, 0)
+ if err != nil || len(entries) == 0 {
+ glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries))
+ return nil, s3err.ErrNoSuchUpload
}
var finalParts []*filer_pb.FileChunk
@@ -69,11 +75,12 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C
if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
for _, chunk := range entry.Chunks {
p := &filer_pb.FileChunk{
- FileId: chunk.GetFileIdString(),
- Offset: offset,
- Size: chunk.Size,
- Mtime: chunk.Mtime,
- ETag: chunk.ETag,
+ FileId: chunk.GetFileIdString(),
+ Offset: offset,
+ Size: chunk.Size,
+ Mtime: chunk.Mtime,
+ CipherKey: chunk.CipherKey,
+ ETag: chunk.ETag,
}
finalParts = append(finalParts, p)
offset += int64(chunk.Size)
@@ -96,78 +103,103 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C
dirName = dirName[:len(dirName)-1]
}
- err = s3a.mkFile(ctx, dirName, entryName, finalParts)
+ err = s3a.mkFile(dirName, entryName, finalParts)
if err != nil {
glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
- return nil, ErrInternalError
+ return nil, s3err.ErrInternalError
}
output = &CompleteMultipartUploadResult{
CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)),
Bucket: input.Bucket,
- ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""),
+ ETag: aws.String("\"" + filer.ETagChunks(finalParts) + "\""),
Key: objectKey(input.Key),
},
}
- if err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil {
+ if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil {
glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
}
return
}
-func (s3a *S3ApiServer) abortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) {
+func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code s3err.ErrorCode) {
+
+ glog.V(2).Infof("abortMultipartUpload input %v", input)
- exists, err := s3a.exists(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
+ exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
if err != nil {
glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
- return nil, ErrNoSuchUpload
+ return nil, s3err.ErrNoSuchUpload
}
if exists {
- err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true)
+ err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true)
}
if err != nil {
glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
- return nil, ErrInternalError
+ return nil, s3err.ErrInternalError
}
- return &s3.AbortMultipartUploadOutput{}, ErrNone
+ return &s3.AbortMultipartUploadOutput{}, s3err.ErrNone
}
type ListMultipartUploadsResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult"`
- s3.ListMultipartUploadsOutput
+
+ // copied from s3.ListMultipartUploadsOutput, the Uploads is not converting to <Upload></Upload>
+ Bucket *string `type:"string"`
+ Delimiter *string `type:"string"`
+ EncodingType *string `type:"string" enum:"EncodingType"`
+ IsTruncated *bool `type:"boolean"`
+ KeyMarker *string `type:"string"`
+ MaxUploads *int64 `type:"integer"`
+ NextKeyMarker *string `type:"string"`
+ NextUploadIdMarker *string `type:"string"`
+ Prefix *string `type:"string"`
+ UploadIdMarker *string `type:"string"`
+ Upload []*s3.MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
}
-func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) {
+func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code s3err.ErrorCode) {
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+
+ glog.V(2).Infof("listMultipartUploads input %v", input)
output = &ListMultipartUploadsResult{
- ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{
- Bucket: input.Bucket,
- Delimiter: input.Delimiter,
- EncodingType: input.EncodingType,
- KeyMarker: input.KeyMarker,
- MaxUploads: input.MaxUploads,
- Prefix: input.Prefix,
- },
+ Bucket: input.Bucket,
+ Delimiter: input.Delimiter,
+ EncodingType: input.EncodingType,
+ KeyMarker: input.KeyMarker,
+ MaxUploads: input.MaxUploads,
+ Prefix: input.Prefix,
}
- entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads))
+ entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, uint32(*input.MaxUploads))
if err != nil {
glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
return
}
+ output.IsTruncated = aws.Bool(!isLast)
for _, entry := range entries {
if entry.Extended != nil {
- key := entry.Extended["key"]
- output.Uploads = append(output.Uploads, &s3.MultipartUpload{
- Key: objectKey(aws.String(string(key))),
+ key := string(entry.Extended["key"])
+ if *input.KeyMarker != "" && *input.KeyMarker != key {
+ continue
+ }
+ if *input.Prefix != "" && !strings.HasPrefix(key, *input.Prefix) {
+ continue
+ }
+ output.Upload = append(output.Upload, &s3.MultipartUpload{
+ Key: objectKey(aws.String(key)),
UploadId: aws.String(entry.Name),
})
+ if !isLast {
+ output.NextUploadIdMarker = aws.String(entry.Name)
+ }
}
}
@@ -176,27 +208,41 @@ func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.List
type ListPartsResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult"`
- s3.ListPartsOutput
+
+ // copied from s3.ListPartsOutput, the Parts is not converting to <Part></Part>
+ Bucket *string `type:"string"`
+ IsTruncated *bool `type:"boolean"`
+ Key *string `min:"1" type:"string"`
+ MaxParts *int64 `type:"integer"`
+ NextPartNumberMarker *int64 `type:"integer"`
+ PartNumberMarker *int64 `type:"integer"`
+ Part []*s3.Part `locationName:"Part" type:"list" flattened:"true"`
+ StorageClass *string `type:"string" enum:"StorageClass"`
+ UploadId *string `type:"string"`
}
-func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) {
+func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code s3err.ErrorCode) {
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+
+ glog.V(2).Infof("listObjectParts input %v", input)
+
output = &ListPartsResult{
- ListPartsOutput: s3.ListPartsOutput{
- Bucket: input.Bucket,
- Key: objectKey(input.Key),
- UploadId: input.UploadId,
- MaxParts: input.MaxParts, // the maximum number of parts to return.
- PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive
- },
+ Bucket: input.Bucket,
+ Key: objectKey(input.Key),
+ UploadId: input.UploadId,
+ MaxParts: input.MaxParts, // the maximum number of parts to return.
+ PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive
+ StorageClass: aws.String("STANDARD"),
}
- entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId,
- "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts))
+ entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts))
if err != nil {
glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
- return nil, ErrNoSuchUpload
+ return nil, s3err.ErrNoSuchUpload
}
+ output.IsTruncated = aws.Bool(!isLast)
+
for _, entry := range entries {
if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
partNumberString := entry.Name[:len(entry.Name)-len(".part")]
@@ -205,12 +251,15 @@ func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListParts
glog.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err)
continue
}
- output.Parts = append(output.Parts, &s3.Part{
+ output.Part = append(output.Part, &s3.Part{
PartNumber: aws.Int64(int64(partNumber)),
- LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)),
- Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))),
- ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""),
+ LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()),
+ Size: aws.Int64(int64(filer.FileSize(entry))),
+ ETag: aws.String("\"" + filer.ETag(entry) + "\""),
})
+ if !isLast {
+ output.NextPartNumberMarker = aws.Int64(int64(partNumber))
+ }
}
}
diff --git a/weed/s3api/filer_multipart_test.go b/weed/s3api/filer_multipart_test.go
index 835665dd6..f2568b6bc 100644
--- a/weed/s3api/filer_multipart_test.go
+++ b/weed/s3api/filer_multipart_test.go
@@ -4,6 +4,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"testing"
+ "time"
)
func TestInitiateMultipartUploadResult(t *testing.T) {
@@ -24,3 +25,25 @@ func TestInitiateMultipartUploadResult(t *testing.T) {
}
}
+
+func TestListPartsResult(t *testing.T) {
+
+ expected := `<?xml version="1.0" encoding="UTF-8"?>
+<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Part><ETag>&#34;12345678&#34;</ETag><LastModified>1970-01-01T00:00:00Z</LastModified><PartNumber>1</PartNumber><Size>123</Size></Part></ListPartsResult>`
+ response := &ListPartsResult{
+ Part: []*s3.Part{
+ {
+ PartNumber: aws.Int64(int64(1)),
+ LastModified: aws.Time(time.Unix(0, 0).UTC()),
+ Size: aws.Int64(int64(123)),
+ ETag: aws.String("\"12345678\""),
+ },
+ },
+ }
+
+ encoded := string(encodeResponse(response))
+ if encoded != expected {
+ t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected)
+ }
+
+}
diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go
index ed9612d35..1803332a3 100644
--- a/weed/s3api/filer_util.go
+++ b/weed/s3api/filer_util.go
@@ -3,164 +3,91 @@ package s3api
import (
"context"
"fmt"
- "io"
- "os"
- "strings"
- "time"
-
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "strings"
)
-func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
- return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- entry := &filer_pb.Entry{
- Name: dirName,
- IsDirectory: true,
- Attributes: &filer_pb.FuseAttributes{
- Mtime: time.Now().Unix(),
- Crtime: time.Now().Unix(),
- FileMode: uint32(0777 | os.ModeDir),
- Uid: OS_UID,
- Gid: OS_GID,
- },
- }
+func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
- if fn != nil {
- fn(entry)
- }
+ return filer_pb.Mkdir(s3a, parentDirectoryPath, dirName, fn)
- request := &filer_pb.CreateEntryRequest{
- Directory: parentDirectoryPath,
- Entry: entry,
- }
+}
- glog.V(1).Infof("mkdir: %v", request)
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.V(0).Infof("mkdir %v: %v", request, err)
- return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
- }
+func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
- return nil
- })
-}
+ return filer_pb.MkFile(s3a, parentDirectoryPath, fileName, chunks)
-func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
- return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- entry := &filer_pb.Entry{
- Name: fileName,
- IsDirectory: false,
- Attributes: &filer_pb.FuseAttributes{
- Mtime: time.Now().Unix(),
- Crtime: time.Now().Unix(),
- FileMode: uint32(0770),
- Uid: OS_UID,
- Gid: OS_GID,
- },
- Chunks: chunks,
- }
+}
- request := &filer_pb.CreateEntryRequest{
- Directory: parentDirectoryPath,
- Entry: entry,
- }
+func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit uint32) (entries []*filer_pb.Entry, isLast bool, err error) {
- glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.V(0).Infof("create file %v:%v", request, err)
- return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
+ err = filer_pb.List(s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLastEntry bool) error {
+ entries = append(entries, entry)
+ if isLastEntry {
+ isLast = true
}
-
return nil
- })
-}
+ }, startFrom, inclusive, limit)
-func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) {
-
- err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ if len(entries) == 0 {
+ isLast = true
+ }
- request := &filer_pb.ListEntriesRequest{
- Directory: parentDirectoryPath,
- Prefix: prefix,
- StartFromFileName: startFrom,
- InclusiveStartFrom: inclusive,
- Limit: uint32(limit),
- }
+ return
- glog.V(4).Infof("read directory: %v", request)
- stream, err := client.ListEntries(ctx, request)
- if err != nil {
- glog.V(0).Infof("read directory %v: %v", request, err)
- return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err)
- }
+}
- for {
- resp, recvErr := stream.Recv()
- if recvErr != nil {
- if recvErr == io.EOF {
- break
- } else {
- return recvErr
- }
- }
+func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDeleteData, isRecursive bool) error {
- entries = append(entries, resp.Entry)
+ return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
+ if err != nil {
+ return err
}
return nil
})
- return
-
}
-func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error {
-
- return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.DeleteEntryRequest{
- Directory: parentDirectoryPath,
- Name: entryName,
- IsDeleteData: isDeleteData,
- IsRecursive: isRecursive,
- }
+func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath string, entryName string, isDeleteData bool, isRecursive bool) error {
+ request := &filer_pb.DeleteEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: entryName,
+ IsDeleteData: isDeleteData,
+ IsRecursive: isRecursive,
+ }
- glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
- if _, err := client.DeleteEntry(ctx, request); err != nil {
- glog.V(0).Infof("delete entry %v: %v", request, err)
- return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
+ glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
+ if resp, err := client.DeleteEntry(context.Background(), request); err != nil {
+ glog.V(0).Infof("delete entry %v: %v", request, err)
+ return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
+ } else {
+ if resp.Error != "" {
+ return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, resp.Error)
}
-
- return nil
- })
-
+ }
+ return nil
}
-func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
+func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
- err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ return filer_pb.Exists(s3a, parentDirectoryPath, entryName, isDirectory)
- request := &filer_pb.LookupDirectoryEntryRequest{
- Directory: parentDirectoryPath,
- Name: entryName,
- }
+}
- glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
- if err != nil {
- glog.V(0).Infof("exists entry %v: %v", request, err)
- return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
- }
+func (s3a *S3ApiServer) touch(parentDirectoryPath string, entryName string, entry *filer_pb.Entry) (err error) {
- exists = resp.Entry.IsDirectory == isDirectory
+ return filer_pb.Touch(s3a, parentDirectoryPath, entryName, entry)
- return nil
- })
+}
- return
+func (s3a *S3ApiServer) getEntry(parentDirectoryPath, entryName string) (entry *filer_pb.Entry, err error) {
+ fullPath := util.NewFullPath(parentDirectoryPath, entryName)
+ return filer_pb.GetEntry(s3a, fullPath)
}
func objectKey(key *string) *string {
diff --git a/weed/s3api/filer_util_tags.go b/weed/s3api/filer_util_tags.go
new file mode 100644
index 000000000..75d3b37d0
--- /dev/null
+++ b/weed/s3api/filer_util_tags.go
@@ -0,0 +1,105 @@
+package s3api
+
+import (
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
+)
+
+const (
+ S3TAG_PREFIX = xhttp.AmzObjectTagging + "-"
+)
+
+func (s3a *S3ApiServer) getTags(parentDirectoryPath string, entryName string) (tags map[string]string, err error) {
+
+ err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: entryName,
+ })
+ if err != nil {
+ return err
+ }
+ tags = make(map[string]string)
+ for k, v := range resp.Entry.Extended {
+ if strings.HasPrefix(k, S3TAG_PREFIX) {
+ tags[k[len(S3TAG_PREFIX):]] = string(v)
+ }
+ }
+ return nil
+ })
+ return
+}
+
+func (s3a *S3ApiServer) setTags(parentDirectoryPath string, entryName string, tags map[string]string) (err error) {
+
+ return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: entryName,
+ })
+ if err != nil {
+ return err
+ }
+
+ for k, _ := range resp.Entry.Extended {
+ if strings.HasPrefix(k, S3TAG_PREFIX) {
+ delete(resp.Entry.Extended, k)
+ }
+ }
+
+ if resp.Entry.Extended == nil {
+ resp.Entry.Extended = make(map[string][]byte)
+ }
+ for k, v := range tags {
+ resp.Entry.Extended[S3TAG_PREFIX+k] = []byte(v)
+ }
+
+ return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{
+ Directory: parentDirectoryPath,
+ Entry: resp.Entry,
+ IsFromOtherCluster: false,
+ Signatures: nil,
+ })
+
+ })
+
+}
+
+func (s3a *S3ApiServer) rmTags(parentDirectoryPath string, entryName string) (err error) {
+
+ return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: entryName,
+ })
+ if err != nil {
+ return err
+ }
+
+ hasDeletion := false
+ for k, _ := range resp.Entry.Extended {
+ if strings.HasPrefix(k, S3TAG_PREFIX) {
+ delete(resp.Entry.Extended, k)
+ hasDeletion = true
+ }
+ }
+
+ if !hasDeletion {
+ return nil
+ }
+
+ return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{
+ Directory: parentDirectoryPath,
+ Entry: resp.Entry,
+ IsFromOtherCluster: false,
+ Signatures: nil,
+ })
+
+ })
+
+}
diff --git a/weed/s3api/http/header.go b/weed/s3api/http/header.go
new file mode 100644
index 000000000..6614b0af0
--- /dev/null
+++ b/weed/s3api/http/header.go
@@ -0,0 +1,36 @@
+/*
+ * MinIO Cloud Storage, (C) 2019 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package http
+
+// Standard S3 HTTP request constants
+const (
+ // S3 storage class
+ AmzStorageClass = "x-amz-storage-class"
+
+ // S3 user-defined metadata
+ AmzUserMetaPrefix = "X-Amz-Meta-"
+
+ // S3 object tagging
+ AmzObjectTagging = "X-Amz-Tagging"
+ AmzTagCount = "x-amz-tagging-count"
+)
+
+// Non-Standard S3 HTTP request constants
+const (
+ AmzIdentityId = "s3-identity-id"
+ AmzIsAdmin = "s3-is-admin" // only set to http request header as a context
+)
diff --git a/weed/s3api/policy/post-policy.go b/weed/s3api/policy/post-policy.go
new file mode 100644
index 000000000..5ef8d397d
--- /dev/null
+++ b/weed/s3api/policy/post-policy.go
@@ -0,0 +1,321 @@
+package policy
+
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+ "encoding/base64"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "net/http"
+ "strings"
+ "time"
+)
+
+// expirationDateFormat date format for expiration key in json policy.
+const expirationDateFormat = "2006-01-02T15:04:05.999Z"
+
+// policyCondition explanation:
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
+//
+// Example:
+//
+// policyCondition {
+// matchType: "$eq",
+// key: "$Content-Type",
+// value: "image/png",
+// }
+//
+type policyCondition struct {
+ matchType string
+ condition string
+ value string
+}
+
+// PostPolicy - Provides strict static type conversion and validation
+// for Amazon S3's POST policy JSON string.
+type PostPolicy struct {
+ // Expiration date and time of the POST policy.
+ expiration time.Time
+ // Collection of different policy conditions.
+ conditions []policyCondition
+ // ContentLengthRange minimum and maximum allowable size for the
+ // uploaded content.
+ contentLengthRange struct {
+ min int64
+ max int64
+ }
+
+ // Post form data.
+ formData map[string]string
+}
+
+// NewPostPolicy - Instantiate new post policy.
+func NewPostPolicy() *PostPolicy {
+ p := &PostPolicy{}
+ p.conditions = make([]policyCondition, 0)
+ p.formData = make(map[string]string)
+ return p
+}
+
+// SetExpires - Sets expiration time for the new policy.
+func (p *PostPolicy) SetExpires(t time.Time) error {
+ if t.IsZero() {
+ return errInvalidArgument("No expiry time set.")
+ }
+ p.expiration = t
+ return nil
+}
+
+// SetKey - Sets an object name for the policy based upload.
+func (p *PostPolicy) SetKey(key string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return errInvalidArgument("Object name is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$key",
+ value: key,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["key"] = key
+ return nil
+}
+
+// SetKeyStartsWith - Sets an object name that an policy based upload
+// can start with.
+func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
+ if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
+ return errInvalidArgument("Object prefix is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "starts-with",
+ condition: "$key",
+ value: keyStartsWith,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["key"] = keyStartsWith
+ return nil
+}
+
+// SetBucket - Sets bucket at which objects will be uploaded to.
+func (p *PostPolicy) SetBucket(bucketName string) error {
+ if strings.TrimSpace(bucketName) == "" || bucketName == "" {
+ return errInvalidArgument("Bucket name is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$bucket",
+ value: bucketName,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["bucket"] = bucketName
+ return nil
+}
+
+// SetCondition - Sets condition for credentials, date and algorithm
+func (p *PostPolicy) SetCondition(matchType, condition, value string) error {
+ if strings.TrimSpace(value) == "" || value == "" {
+ return errInvalidArgument("No value specified for condition")
+ }
+
+ policyCond := policyCondition{
+ matchType: matchType,
+ condition: "$" + condition,
+ value: value,
+ }
+ if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" {
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[condition] = value
+ return nil
+ }
+ return errInvalidArgument("Invalid condition in policy")
+}
+
+// SetContentType - Sets content-type of the object for this policy
+// based upload.
+func (p *PostPolicy) SetContentType(contentType string) error {
+ if strings.TrimSpace(contentType) == "" || contentType == "" {
+ return errInvalidArgument("No content type specified.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$Content-Type",
+ value: contentType,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["Content-Type"] = contentType
+ return nil
+}
+
+// SetContentLengthRange - Set new min and max content length
+// condition for all incoming uploads.
+func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
+ if min > max {
+ return errInvalidArgument("Minimum limit is larger than maximum limit.")
+ }
+ if min < 0 {
+ return errInvalidArgument("Minimum limit cannot be negative.")
+ }
+ if max < 0 {
+ return errInvalidArgument("Maximum limit cannot be negative.")
+ }
+ p.contentLengthRange.min = min
+ p.contentLengthRange.max = max
+ return nil
+}
+
+// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy
+// based upload.
+func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error {
+ if strings.TrimSpace(redirect) == "" || redirect == "" {
+ return errInvalidArgument("Redirect is empty")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$success_action_redirect",
+ value: redirect,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["success_action_redirect"] = redirect
+ return nil
+}
+
+// SetSuccessStatusAction - Sets the status success code of the object for this policy
+// based upload.
+func (p *PostPolicy) SetSuccessStatusAction(status string) error {
+ if strings.TrimSpace(status) == "" || status == "" {
+ return errInvalidArgument("Status is empty")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$success_action_status",
+ value: status,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["success_action_status"] = status
+ return nil
+}
+
+// SetUserMetadata - Set user metadata as a key/value couple.
+// Can be retrieved through a HEAD request or an event.
+func (p *PostPolicy) SetUserMetadata(key string, value string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return errInvalidArgument("Key is empty")
+ }
+ if strings.TrimSpace(value) == "" || value == "" {
+ return errInvalidArgument("Value is empty")
+ }
+ headerName := fmt.Sprintf("x-amz-meta-%s", key)
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: fmt.Sprintf("$%s", headerName),
+ value: value,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[headerName] = value
+ return nil
+}
+
+// SetUserData - Set user data as a key/value couple.
+// Can be retrieved through a HEAD request or an event.
+func (p *PostPolicy) SetUserData(key string, value string) error {
+ if key == "" {
+ return errInvalidArgument("Key is empty")
+ }
+ if value == "" {
+ return errInvalidArgument("Value is empty")
+ }
+ headerName := fmt.Sprintf("x-amz-%s", key)
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: fmt.Sprintf("$%s", headerName),
+ value: value,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[headerName] = value
+ return nil
+}
+
+// addNewPolicy - internal helper to validate adding new policies.
+func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
+ if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
+ return errInvalidArgument("Policy fields are empty.")
+ }
+ p.conditions = append(p.conditions, policyCond)
+ return nil
+}
+
+// String function for printing policy in json formatted string.
+func (p PostPolicy) String() string {
+ return string(p.marshalJSON())
+}
+
+// marshalJSON - Provides Marshaled JSON in bytes.
+func (p PostPolicy) marshalJSON() []byte {
+ expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
+ var conditionsStr string
+ conditions := []string{}
+ for _, po := range p.conditions {
+ conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
+ }
+ if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
+ conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
+ p.contentLengthRange.min, p.contentLengthRange.max))
+ }
+ if len(conditions) > 0 {
+ conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
+ }
+ retStr := "{"
+ retStr = retStr + expirationStr + ","
+ retStr = retStr + conditionsStr
+ retStr = retStr + "}"
+ return []byte(retStr)
+}
+
+// base64 - Produces base64 of PostPolicy's Marshaled json.
+func (p PostPolicy) base64() string {
+ return base64.StdEncoding.EncodeToString(p.marshalJSON())
+}
+
+// errInvalidArgument - Invalid argument response.
+func errInvalidArgument(message string) error {
+ return s3err.RESTErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: message,
+ RequestID: "minio",
+ }
+}
diff --git a/weed/s3api/policy/post-policy_test.go b/weed/s3api/policy/post-policy_test.go
new file mode 100644
index 000000000..ce241b723
--- /dev/null
+++ b/weed/s3api/policy/post-policy_test.go
@@ -0,0 +1,378 @@
+package policy
+
+/*
+ * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ iso8601DateFormat = "20060102T150405Z"
+ iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
+)
+
+func newPostPolicyBytesV4WithContentRange(credential, bucketName, objectKey string, expiration time.Time) []byte {
+ t := time.Now().UTC()
+ // Add the expiration date.
+ expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat))
+ // Add the bucket condition, only accept buckets equal to the one passed.
+ bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName)
+ // Add the key condition, only accept keys equal to the one passed.
+ keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s/upload.txt"]`, objectKey)
+ // Add content length condition, only accept content sizes of a given length.
+ contentLengthCondStr := `["content-length-range", 1024, 1048576]`
+ // Add the algorithm condition, only accept AWS SignV4 Sha256.
+ algorithmConditionStr := `["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"]`
+ // Add the date condition, only accept the current date.
+ dateConditionStr := fmt.Sprintf(`["eq", "$x-amz-date", "%s"]`, t.Format(iso8601DateFormat))
+ // Add the credential string, only accept the credential passed.
+ credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential)
+ // Add the meta-uuid string, set to 1234
+ uuidConditionStr := fmt.Sprintf(`["eq", "$x-amz-meta-uuid", "%s"]`, "1234")
+
+ // Combine all conditions into one string.
+ conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s, %s]`, bucketConditionStr,
+ keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr)
+ retStr := "{"
+ retStr = retStr + expirationStr + ","
+ retStr = retStr + conditionStr
+ retStr = retStr + "}"
+
+ return []byte(retStr)
+}
+
+// newPostPolicyBytesV4 - creates a bare bones postpolicy string with key and bucket matches.
+func newPostPolicyBytesV4(credential, bucketName, objectKey string, expiration time.Time) []byte {
+ t := time.Now().UTC()
+ // Add the expiration date.
+ expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat))
+ // Add the bucket condition, only accept buckets equal to the one passed.
+ bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName)
+ // Add the key condition, only accept keys equal to the one passed.
+ keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s/upload.txt"]`, objectKey)
+ // Add the algorithm condition, only accept AWS SignV4 Sha256.
+ algorithmConditionStr := `["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"]`
+ // Add the date condition, only accept the current date.
+ dateConditionStr := fmt.Sprintf(`["eq", "$x-amz-date", "%s"]`, t.Format(iso8601DateFormat))
+ // Add the credential string, only accept the credential passed.
+ credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential)
+ // Add the meta-uuid string, set to 1234
+ uuidConditionStr := fmt.Sprintf(`["eq", "$x-amz-meta-uuid", "%s"]`, "1234")
+
+ // Combine all conditions into one string.
+ conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr)
+ retStr := "{"
+ retStr = retStr + expirationStr + ","
+ retStr = retStr + conditionStr
+ retStr = retStr + "}"
+
+ return []byte(retStr)
+}
+
+// newPostPolicyBytesV2 - creates a bare bones postpolicy string with key and bucket matches.
+func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) []byte {
+ // Add the expiration date.
+ expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat))
+ // Add the bucket condition, only accept buckets equal to the one passed.
+ bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName)
+ // Add the key condition, only accept keys equal to the one passed.
+ keyConditionStr := fmt.Sprintf(`["starts-with", "$key", "%s/upload.txt"]`, objectKey)
+
+ // Combine all conditions into one string.
+ conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr)
+ retStr := "{"
+ retStr = retStr + expirationStr + ","
+ retStr = retStr + conditionStr
+ retStr = retStr + "}"
+
+ return []byte(retStr)
+}
+
+// Wrapper for calling TestPostPolicyBucketHandler tests for both Erasure multiple disks and single node setup.
+
+// testPostPolicyBucketHandler - Tests validate post policy handler uploading objects.
+
+// Wrapper for calling TestPostPolicyBucketHandlerRedirect tests for both Erasure multiple disks and single node setup.
+
+// testPostPolicyBucketHandlerRedirect tests POST Object when success_action_redirect is specified
+
+// postPresignSignatureV4 - presigned signature for PostPolicy requests.
+func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
+ // Get signining key.
+ signingkey := getSigningKey(secretAccessKey, t, location)
+ // Calculate signature.
+ signature := getSignature(signingkey, policyBase64)
+ return signature
+}
+
+// copied from auth_signature_v4.go to break import loop
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// copied from auth_signature_v4.go to break import loop
+// getSigningKey hmac seed to calculate final signature.
+func getSigningKey(secretKey string, t time.Time, region string) []byte {
+ date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format("20060102")))
+ regionBytes := sumHMAC(date, []byte(region))
+ service := sumHMAC(regionBytes, []byte("s3"))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+ return signingKey
+}
+
+// copied from auth_signature_v4.go to break import loop
+// getSignature final signature in hexadecimal form.
+func getSignature(signingKey []byte, stringToSign string) string {
+ return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+}
+
+// copied from auth_signature_v4.go to break import loop
+func calculateSignatureV2(stringToSign string, secret string) string {
+ hm := hmac.New(sha1.New, []byte(secret))
+ hm.Write([]byte(stringToSign))
+ return base64.StdEncoding.EncodeToString(hm.Sum(nil))
+}
+
+func newPostRequestV2(endPoint, bucketName, objectName string, accessKey, secretKey string) (*http.Request, error) {
+ // Expire the request five minutes from now.
+ expirationTime := time.Now().UTC().Add(time.Minute * 5)
+ // Create a new post policy.
+ policy := newPostPolicyBytesV2(bucketName, objectName, expirationTime)
+ // Only need the encoding.
+ encodedPolicy := base64.StdEncoding.EncodeToString(policy)
+
+ // Presign with V4 signature based on the policy.
+ signature := calculateSignatureV2(encodedPolicy, secretKey)
+
+ formData := map[string]string{
+ "AWSAccessKeyId": accessKey,
+ "bucket": bucketName,
+ "key": objectName + "/${filename}",
+ "policy": encodedPolicy,
+ "signature": signature,
+ }
+
+ // Create the multipart form.
+ var buf bytes.Buffer
+ w := multipart.NewWriter(&buf)
+
+ // Set the normal formData
+ for k, v := range formData {
+ w.WriteField(k, v)
+ }
+ // Set the File formData
+ writer, err := w.CreateFormFile("file", "upload.txt")
+ if err != nil {
+ // return nil, err
+ return nil, err
+ }
+ writer.Write([]byte("hello world"))
+ // Close before creating the new request.
+ w.Close()
+
+ // Set the body equal to the created policy.
+ reader := bytes.NewReader(buf.Bytes())
+
+ req, err := http.NewRequest(http.MethodPost, makeTestTargetURL(endPoint, bucketName, "", nil), reader)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set form content-type.
+ req.Header.Set("Content-Type", w.FormDataContentType())
+ return req, nil
+}
+
+func buildGenericPolicy(t time.Time, accessKey, region, bucketName, objectName string, contentLengthRange bool) []byte {
+ // Expire the request five minutes from now.
+ expirationTime := t.Add(time.Minute * 5)
+
+ credStr := getCredentialString(accessKey, region, t)
+ // Create a new post policy.
+ policy := newPostPolicyBytesV4(credStr, bucketName, objectName, expirationTime)
+ if contentLengthRange {
+ policy = newPostPolicyBytesV4WithContentRange(credStr, bucketName, objectName, expirationTime)
+ }
+ return policy
+}
+
+func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, region string,
+ t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) {
+ // Get the user credential.
+ credStr := getCredentialString(accessKey, region, t)
+
+ // Only need the encoding.
+ encodedPolicy := base64.StdEncoding.EncodeToString(policy)
+
+ if corruptedB64 {
+ encodedPolicy = "%!~&" + encodedPolicy
+ }
+
+ // Presign with V4 signature based on the policy.
+ signature := postPresignSignatureV4(encodedPolicy, t, secretKey, region)
+
+ formData := map[string]string{
+ "bucket": bucketName,
+ "key": objectName + "/${filename}",
+ "x-amz-credential": credStr,
+ "policy": encodedPolicy,
+ "x-amz-signature": signature,
+ "x-amz-date": t.Format(iso8601DateFormat),
+ "x-amz-algorithm": "AWS4-HMAC-SHA256",
+ "x-amz-meta-uuid": "1234",
+ "Content-Encoding": "gzip",
+ }
+
+ // Add form data
+ for k, v := range addFormData {
+ formData[k] = v
+ }
+
+ // Create the multipart form.
+ var buf bytes.Buffer
+ w := multipart.NewWriter(&buf)
+
+ // Set the normal formData
+ for k, v := range formData {
+ w.WriteField(k, v)
+ }
+ // Set the File formData but don't if we want send an incomplete multipart request
+ if !corruptedMultipart {
+ writer, err := w.CreateFormFile("file", "upload.txt")
+ if err != nil {
+ // return nil, err
+ return nil, err
+ }
+ writer.Write(objData)
+ // Close before creating the new request.
+ w.Close()
+ }
+
+ // Set the body equal to the created policy.
+ reader := bytes.NewReader(buf.Bytes())
+
+ req, err := http.NewRequest(http.MethodPost, makeTestTargetURL(endPoint, bucketName, "", nil), reader)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set form content-type.
+ req.Header.Set("Content-Type", w.FormDataContentType())
+ return req, nil
+}
+
+func newPostRequestV4WithContentLength(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) {
+ t := time.Now().UTC()
+ region := "us-east-1"
+ policy := buildGenericPolicy(t, accessKey, region, bucketName, objectName, true)
+ return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, region, t, policy, nil, false, false)
+}
+
+func newPostRequestV4(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) {
+ t := time.Now().UTC()
+ region := "us-east-1"
+ policy := buildGenericPolicy(t, accessKey, region, bucketName, objectName, false)
+ return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, region, t, policy, nil, false, false)
+}
+
+// construct URL for http requests for bucket operations.
+func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string {
+ urlStr := endPoint + "/"
+ if bucketName != "" {
+ urlStr = urlStr + bucketName + "/"
+ }
+ if objectName != "" {
+ urlStr = urlStr + EncodePath(objectName)
+ }
+ if len(queryValues) > 0 {
+ urlStr = urlStr + "?" + queryValues.Encode()
+ }
+ return urlStr
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func EncodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
+
+// getCredentialString generate a credential string.
+func getCredentialString(accessKeyID, location string, t time.Time) string {
+ return accessKeyID + "/" + getScope(t, location)
+}
+
+// getScope generate a string of a specific date, an AWS region, and a service.
+func getScope(t time.Time, region string) string {
+ scope := strings.Join([]string{
+ t.Format("20060102"),
+ region,
+ string("s3"),
+ "aws4_request",
+ }, "/")
+ return scope
+}
diff --git a/weed/s3api/policy/postpolicyform.go b/weed/s3api/policy/postpolicyform.go
new file mode 100644
index 000000000..3a6f3a882
--- /dev/null
+++ b/weed/s3api/policy/postpolicyform.go
@@ -0,0 +1,276 @@
+package policy
+
+/*
+ * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// startWithConds - map which indicates if a given condition supports starts-with policy operator
+var startsWithConds = map[string]bool{
+ "$acl": true,
+ "$bucket": false,
+ "$cache-control": true,
+ "$content-type": true,
+ "$content-disposition": true,
+ "$content-encoding": true,
+ "$expires": true,
+ "$key": true,
+ "$success_action_redirect": true,
+ "$redirect": true,
+ "$success_action_status": false,
+ "$x-amz-algorithm": false,
+ "$x-amz-credential": false,
+ "$x-amz-date": false,
+}
+
+// Add policy conditionals.
+const (
+ policyCondEqual = "eq"
+ policyCondStartsWith = "starts-with"
+ policyCondContentLength = "content-length-range"
+)
+
+// toString - Safely convert interface to string without causing panic.
+func toString(val interface{}) string {
+ switch v := val.(type) {
+ case string:
+ return v
+ default:
+ return ""
+ }
+}
+
+// toLowerString - safely convert interface to lower string
+func toLowerString(val interface{}) string {
+ return strings.ToLower(toString(val))
+}
+
+// toInteger _ Safely convert interface to integer without causing panic.
+func toInteger(val interface{}) (int64, error) {
+ switch v := val.(type) {
+ case float64:
+ return int64(v), nil
+ case int64:
+ return v, nil
+ case int:
+ return int64(v), nil
+ case string:
+ i, err := strconv.Atoi(v)
+ return int64(i), err
+ default:
+ return 0, errors.New("Invalid number format")
+ }
+}
+
+// isString - Safely check if val is of type string without causing panic.
+func isString(val interface{}) bool {
+ _, ok := val.(string)
+ return ok
+}
+
+// ContentLengthRange - policy content-length-range field.
+type contentLengthRange struct {
+ Min int64
+ Max int64
+ Valid bool // If content-length-range was part of policy
+}
+
+// PostPolicyForm provides strict static type conversion and validation for Amazon S3's POST policy JSON string.
+type PostPolicyForm struct {
+ Expiration time.Time // Expiration date and time of the POST policy.
+ Conditions struct { // Conditional policy structure.
+ Policies []struct {
+ Operator string
+ Key string
+ Value string
+ }
+ ContentLengthRange contentLengthRange
+ }
+}
+
+// ParsePostPolicyForm - Parse JSON policy string into typed PostPolicyForm structure.
+func ParsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) {
+ // Convert po into interfaces and
+ // perform strict type conversion using reflection.
+ var rawPolicy struct {
+ Expiration string `json:"expiration"`
+ Conditions []interface{} `json:"conditions"`
+ }
+
+ err := json.Unmarshal([]byte(policy), &rawPolicy)
+ if err != nil {
+ return ppf, err
+ }
+
+ parsedPolicy := PostPolicyForm{}
+
+ // Parse expiry time.
+ parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
+ if err != nil {
+ return ppf, err
+ }
+
+ // Parse conditions.
+ for _, val := range rawPolicy.Conditions {
+ switch condt := val.(type) {
+ case map[string]interface{}: // Handle key:value map types.
+ for k, v := range condt {
+ if !isString(v) { // Pre-check value type.
+ // All values must be of type string.
+ return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", reflect.TypeOf(condt).String(), condt)
+ }
+ // {"acl": "public-read" } is an alternate way to indicate - [ "eq", "$acl", "public-read" ]
+ // In this case we will just collapse this into "eq" for all use cases.
+ parsedPolicy.Conditions.Policies = append(parsedPolicy.Conditions.Policies, struct {
+ Operator string
+ Key string
+ Value string
+ }{
+ policyCondEqual, "$" + strings.ToLower(k), toString(v),
+ })
+ }
+ case []interface{}: // Handle array types.
+ if len(condt) != 3 { // Return error if we have insufficient elements.
+ return parsedPolicy, fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form", condt, reflect.TypeOf(condt).String())
+ }
+ switch toLowerString(condt[0]) {
+ case policyCondEqual, policyCondStartsWith:
+ for _, v := range condt { // Pre-check all values for type.
+ if !isString(v) {
+ // All values must be of type string.
+ return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", reflect.TypeOf(condt).String(), condt)
+ }
+ }
+ operator, matchType, value := toLowerString(condt[0]), toLowerString(condt[1]), toString(condt[2])
+ if !strings.HasPrefix(matchType, "$") {
+ return parsedPolicy, fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", operator, matchType, value)
+ }
+ parsedPolicy.Conditions.Policies = append(parsedPolicy.Conditions.Policies, struct {
+ Operator string
+ Key string
+ Value string
+ }{
+ operator, matchType, value,
+ })
+ case policyCondContentLength:
+ min, err := toInteger(condt[1])
+ if err != nil {
+ return parsedPolicy, err
+ }
+
+ max, err := toInteger(condt[2])
+ if err != nil {
+ return parsedPolicy, err
+ }
+
+ parsedPolicy.Conditions.ContentLengthRange = contentLengthRange{
+ Min: min,
+ Max: max,
+ Valid: true,
+ }
+ default:
+ // Condition should be valid.
+ return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form",
+ reflect.TypeOf(condt).String(), condt)
+ }
+ default:
+ return parsedPolicy, fmt.Errorf("Unknown field %s of type %s found in POST policy form",
+ condt, reflect.TypeOf(condt).String())
+ }
+ }
+ return parsedPolicy, nil
+}
+
+// checkPolicyCond returns a boolean to indicate if a condition is satisified according
+// to the passed operator
+func checkPolicyCond(op string, input1, input2 string) bool {
+ switch op {
+ case policyCondEqual:
+ return input1 == input2
+ case policyCondStartsWith:
+ return strings.HasPrefix(input1, input2)
+ }
+ return false
+}
+
+// CheckPostPolicy - apply policy conditions and validate input values.
+// (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html)
+func CheckPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) error {
+ // Check if policy document expiry date is still not reached
+ if !postPolicyForm.Expiration.After(time.Now().UTC()) {
+ return fmt.Errorf("Invalid according to Policy: Policy expired")
+ }
+ // map to store the metadata
+ metaMap := make(map[string]string)
+ for _, policy := range postPolicyForm.Conditions.Policies {
+ if strings.HasPrefix(policy.Key, "$x-amz-meta-") {
+ formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(policy.Key, "$"))
+ metaMap[formCanonicalName] = policy.Value
+ }
+ }
+ // Check if any extra metadata field is passed as input
+ for key := range formValues {
+ if strings.HasPrefix(key, "X-Amz-Meta-") {
+ if _, ok := metaMap[key]; !ok {
+ return fmt.Errorf("Invalid according to Policy: Extra input fields: %s", key)
+ }
+ }
+ }
+
+ // Flag to indicate if all policies conditions are satisfied
+ var condPassed bool
+
+ // Iterate over policy conditions and check them against received form fields
+ for _, policy := range postPolicyForm.Conditions.Policies {
+ // Form fields names are in canonical format, convert conditions names
+ // to canonical for simplification purpose, so `$key` will become `Key`
+ formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(policy.Key, "$"))
+ // Operator for the current policy condition
+ op := policy.Operator
+ // If the current policy condition is known
+ if startsWithSupported, condFound := startsWithConds[policy.Key]; condFound {
+ // Check if the current condition supports starts-with operator
+ if op == policyCondStartsWith && !startsWithSupported {
+ return fmt.Errorf("Invalid according to Policy: Policy Condition failed")
+ }
+ // Check if current policy condition is satisfied
+ condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value)
+ if !condPassed {
+ return fmt.Errorf("Invalid according to Policy: Policy Condition failed")
+ }
+ } else {
+ // This covers all conditions X-Amz-Meta-* and X-Amz-*
+ if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") {
+ // Check if policy condition is satisfied
+ condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value)
+ if !condPassed {
+ return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value)
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/weed/s3api/policy/postpolicyform_test.go b/weed/s3api/policy/postpolicyform_test.go
new file mode 100644
index 000000000..1a9d78b0e
--- /dev/null
+++ b/weed/s3api/policy/postpolicyform_test.go
@@ -0,0 +1,106 @@
+package policy
+
+/*
+ * MinIO Cloud Storage, (C) 2016 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "testing"
+ "time"
+)
+
+// Test Post Policy parsing and checking conditions
+func TestPostPolicyForm(t *testing.T) {
+ pp := NewPostPolicy()
+ pp.SetBucket("testbucket")
+ pp.SetContentType("image/jpeg")
+ pp.SetUserMetadata("uuid", "14365123651274")
+ pp.SetKeyStartsWith("user/user1/filename")
+ pp.SetContentLengthRange(1048579, 10485760)
+ pp.SetSuccessStatusAction("201")
+
+ type testCase struct {
+ Bucket string
+ Key string
+ XAmzDate string
+ XAmzAlgorithm string
+ XAmzCredential string
+ XAmzMetaUUID string
+ ContentType string
+ SuccessActionStatus string
+ Policy string
+ Expired bool
+ expectedErr error
+ }
+
+ testCases := []testCase{
+ // Everything is fine with this test
+ {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: nil},
+ // Expired policy document
+ {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Expired: true, expectedErr: fmt.Errorf("Invalid according to Policy: Policy expired")},
+ // Different AMZ date
+ {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "2017T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
+ // Key which doesn't start with user/user1/filename
+ {Bucket: "testbucket", Key: "myfile.txt", XAmzDate: "20160727T000000Z", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
+ // Incorrect bucket name.
+ {Bucket: "incorrect", Key: "user/user1/filename/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
+ // Incorrect key name
+ {Bucket: "testbucket", Key: "incorrect", XAmzDate: "20160727T000000Z", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
+ // Incorrect date
+ {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "incorrect", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
+ // Incorrect ContentType
+ {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "incorrect", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
+ // Incorrect Metadata
+ {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "151274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed: [eq, $x-amz-meta-uuid, 14365123651274]")},
+ }
+ // Validate all the test cases.
+ for i, tt := range testCases {
+ formValues := make(http.Header)
+ formValues.Set("Bucket", tt.Bucket)
+ formValues.Set("Key", tt.Key)
+ formValues.Set("Content-Type", tt.ContentType)
+ formValues.Set("X-Amz-Date", tt.XAmzDate)
+ formValues.Set("X-Amz-Meta-Uuid", tt.XAmzMetaUUID)
+ formValues.Set("X-Amz-Algorithm", tt.XAmzAlgorithm)
+ formValues.Set("X-Amz-Credential", tt.XAmzCredential)
+ if tt.Expired {
+ // Expired already.
+ pp.SetExpires(time.Now().UTC().AddDate(0, 0, -10))
+ } else {
+ // Expires in 10 days.
+ pp.SetExpires(time.Now().UTC().AddDate(0, 0, 10))
+ }
+
+ formValues.Set("Policy", base64.StdEncoding.EncodeToString([]byte(pp.String())))
+ formValues.Set("Success_action_status", tt.SuccessActionStatus)
+ policyBytes, err := base64.StdEncoding.DecodeString(base64.StdEncoding.EncodeToString([]byte(pp.String())))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ postPolicyForm, err := ParsePostPolicyForm(string(policyBytes))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = CheckPostPolicy(formValues, postPolicyForm)
+ if err != nil && tt.expectedErr != nil && err.Error() != tt.expectedErr.Error() {
+ t.Fatalf("Test %d:, Expected %s, got %s", i+1, tt.expectedErr.Error(), err.Error())
+ }
+ }
+}
diff --git a/weed/s3api/s3_constants/s3_actions.go b/weed/s3api/s3_constants/s3_actions.go
new file mode 100644
index 000000000..4e484ac98
--- /dev/null
+++ b/weed/s3api/s3_constants/s3_actions.go
@@ -0,0 +1,9 @@
+package s3_constants
+
+const (
+ ACTION_READ = "Read"
+ ACTION_WRITE = "Write"
+ ACTION_ADMIN = "Admin"
+ ACTION_TAGGING = "Tagging"
+ ACTION_LIST = "List"
+)
diff --git a/weed/s3api/s3api_auth.go b/weed/s3api/s3api_auth.go
index b680fe1e1..bf5cf5fab 100644
--- a/weed/s3api/s3api_auth.go
+++ b/weed/s3api/s3api_auth.go
@@ -9,6 +9,8 @@ import (
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
signV2Algorithm = "AWS"
+ iso8601Format = "20060102T150405Z"
+ yyyymmdd = "20060102"
)
// Verify if request has JWT.
@@ -23,8 +25,8 @@ func isRequestSignatureV4(r *http.Request) bool {
// Verify if request has AWS Signature Version '2'.
func isRequestSignatureV2(r *http.Request) bool {
- return (!strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) &&
- strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm))
+ return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) &&
+ strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm)
}
// Verify if request has AWS PreSign Version '4'.
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index 492d94616..48e8cb047 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -4,21 +4,19 @@ import (
"context"
"encoding/xml"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"math"
"net/http"
- "os"
"time"
+ xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/gorilla/mux"
-)
-
-var (
- OS_UID = uint32(os.Getuid())
- OS_GID = uint32(os.Getgid())
)
type ListAllMyBucketsResult struct {
@@ -29,29 +27,44 @@ type ListAllMyBucketsResult struct {
func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
+ var identity *Identity
+ var s3Err s3err.ErrorCode
+ if s3a.iam.isEnabled() {
+ identity, s3Err = s3a.iam.authUser(r)
+ if s3Err != s3err.ErrNone {
+ writeErrorResponse(w, s3Err, r.URL)
+ return
+ }
+ }
+
var response ListAllMyBucketsResult
- entries, err := s3a.list(context.Background(), s3a.option.BucketsPath, "", "", false, math.MaxInt32)
+ entries, _, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32)
if err != nil {
- writeErrorResponse(w, ErrInternalError, r.URL)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
+ identityId := r.Header.Get(xhttp.AmzIdentityId)
+
var buckets []*s3.Bucket
for _, entry := range entries {
if entry.IsDirectory {
+ if identity != nil && !identity.canDo(s3_constants.ACTION_LIST, entry.Name) {
+ continue
+ }
buckets = append(buckets, &s3.Bucket{
Name: aws.String(entry.Name),
- CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0)),
+ CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0).UTC()),
})
}
}
response = ListAllMyBucketsResult{
Owner: &s3.Owner{
- ID: aws.String(""),
- DisplayName: aws.String(""),
+ ID: aws.String(identityId),
+ DisplayName: aws.String(identityId),
},
Buckets: buckets,
}
@@ -61,12 +74,51 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
+ bucket, _ := getBucketAndObject(r)
+
+ // avoid duplicated buckets
+ errCode := s3err.ErrNone
+ if err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if resp, err := client.CollectionList(context.Background(), &filer_pb.CollectionListRequest{
+ IncludeEcVolumes: true,
+ IncludeNormalVolumes: true,
+ }); err != nil {
+ glog.Errorf("list collection: %v", err)
+ return fmt.Errorf("list collections: %v", err)
+ } else {
+ for _, c := range resp.Collections {
+ if bucket == c.Name {
+ errCode = s3err.ErrBucketAlreadyExists
+ break
+ }
+ }
+ }
+ return nil
+ }); err != nil {
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
+ return
+ }
+ if exist, err := s3a.exists(s3a.option.BucketsPath, bucket, true); err == nil && exist {
+ errCode = s3err.ErrBucketAlreadyExists
+ }
+ if errCode != s3err.ErrNone {
+ writeErrorResponse(w, errCode, r.URL)
+ return
+ }
+
+ fn := func(entry *filer_pb.Entry) {
+ if identityId := r.Header.Get(xhttp.AmzIdentityId); identityId != "" {
+ if entry.Extended == nil {
+ entry.Extended = make(map[string][]byte)
+ }
+ entry.Extended[xhttp.AmzIdentityId] = []byte(identityId)
+ }
+ }
// create the folder for bucket, but lazily create actual collection
- if err := s3a.mkdir(context.Background(), s3a.option.BucketsPath, bucket, nil); err != nil {
- writeErrorResponse(w, ErrInternalError, r.URL)
+ if err := s3a.mkdir(s3a.option.BucketsPath, bucket, fn); err != nil {
+ glog.Errorf("PutBucketHandler mkdir: %v", err)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
@@ -75,11 +127,14 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
+ bucket, _ := getBucketAndObject(r)
- ctx := context.Background()
- err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
+ writeErrorResponse(w, err, r.URL)
+ return
+ }
+
+ err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// delete collection
deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{
@@ -87,17 +142,17 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
}
glog.V(1).Infof("delete collection: %v", deleteCollectionRequest)
- if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil {
+ if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil {
return fmt.Errorf("delete collection %s: %v", bucket, err)
}
return nil
})
- err = s3a.rm(ctx, s3a.option.BucketsPath, bucket, true, false, true)
+ err = s3a.rm(s3a.option.BucketsPath, bucket, false, true)
if err != nil {
- writeErrorResponse(w, ErrInternalError, r.URL)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
@@ -106,30 +161,42 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
-
- ctx := context.Background()
+ bucket, _ := getBucketAndObject(r)
- err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
+ writeErrorResponse(w, err, r.URL)
+ return
+ }
- request := &filer_pb.LookupDirectoryEntryRequest{
- Directory: s3a.option.BucketsPath,
- Name: bucket,
- }
+ writeSuccessResponseEmpty(w)
+}
- glog.V(1).Infof("lookup bucket: %v", request)
- if _, err := client.LookupDirectoryEntry(ctx, request); err != nil {
- return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err)
- }
+func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorCode {
+ entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket)
+ if entry == nil || err == filer_pb.ErrNotFound {
+ return s3err.ErrNoSuchBucket
+ }
- return nil
- })
+ if !s3a.hasAccess(r, entry) {
+ return s3err.ErrAccessDenied
+ }
+ return s3err.ErrNone
+}
- if err != nil {
- writeErrorResponse(w, ErrNoSuchBucket, r.URL)
- return
+func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool {
+ isAdmin := r.Header.Get(xhttp.AmzIsAdmin) != ""
+ if isAdmin {
+ return true
+ }
+ if entry.Extended == nil {
+ return true
}
- writeSuccessResponseEmpty(w)
+ identityId := r.Header.Get(xhttp.AmzIdentityId)
+ if id, ok := entry.Extended[xhttp.AmzIdentityId]; ok {
+ if identityId != string(id) {
+ return false
+ }
+ }
+ return true
}
diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go
deleted file mode 100644
index 7ba55ed28..000000000
--- a/weed/s3api/s3api_errors.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package s3api
-
-import (
- "encoding/xml"
- "net/http"
-)
-
-// APIError structure
-type APIError struct {
- Code string
- Description string
- HTTPStatusCode int
-}
-
-// RESTErrorResponse - error response format
-type RESTErrorResponse struct {
- XMLName xml.Name `xml:"Error" json:"-"`
- Code string `xml:"Code" json:"Code"`
- Message string `xml:"Message" json:"Message"`
- Resource string `xml:"Resource" json:"Resource"`
- RequestID string `xml:"RequestId" json:"RequestId"`
-}
-
-// ErrorCode type of error status.
-type ErrorCode int
-
-// Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
-const (
- ErrNone ErrorCode = iota
- ErrMethodNotAllowed
- ErrBucketNotEmpty
- ErrBucketAlreadyExists
- ErrBucketAlreadyOwnedByYou
- ErrNoSuchBucket
- ErrNoSuchUpload
- ErrInvalidBucketName
- ErrInvalidDigest
- ErrInvalidMaxKeys
- ErrInvalidMaxUploads
- ErrInvalidMaxParts
- ErrInvalidPartNumberMarker
- ErrInvalidPart
- ErrInternalError
- ErrNotImplemented
-)
-
-// error code to APIError structure, these fields carry respective
-// descriptions for all the error responses.
-var errorCodeResponse = map[ErrorCode]APIError{
- ErrMethodNotAllowed: {
- Code: "MethodNotAllowed",
- Description: "The specified method is not allowed against this resource.",
- HTTPStatusCode: http.StatusMethodNotAllowed,
- },
- ErrBucketNotEmpty: {
- Code: "BucketNotEmpty",
- Description: "The bucket you tried to delete is not empty",
- HTTPStatusCode: http.StatusConflict,
- },
- ErrBucketAlreadyExists: {
- Code: "BucketAlreadyExists",
- Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.",
- HTTPStatusCode: http.StatusConflict,
- },
- ErrBucketAlreadyOwnedByYou: {
- Code: "BucketAlreadyOwnedByYou",
- Description: "Your previous request to create the named bucket succeeded and you already own it.",
- HTTPStatusCode: http.StatusConflict,
- },
- ErrInvalidBucketName: {
- Code: "InvalidBucketName",
- Description: "The specified bucket is not valid.",
- HTTPStatusCode: http.StatusBadRequest,
- },
- ErrInvalidDigest: {
- Code: "InvalidDigest",
- Description: "The Content-Md5 you specified is not valid.",
- HTTPStatusCode: http.StatusBadRequest,
- },
- ErrInvalidMaxUploads: {
- Code: "InvalidArgument",
- Description: "Argument max-uploads must be an integer between 0 and 2147483647",
- HTTPStatusCode: http.StatusBadRequest,
- },
- ErrInvalidMaxKeys: {
- Code: "InvalidArgument",
- Description: "Argument maxKeys must be an integer between 0 and 2147483647",
- HTTPStatusCode: http.StatusBadRequest,
- },
- ErrInvalidMaxParts: {
- Code: "InvalidArgument",
- Description: "Argument max-parts must be an integer between 0 and 2147483647",
- HTTPStatusCode: http.StatusBadRequest,
- },
- ErrInvalidPartNumberMarker: {
- Code: "InvalidArgument",
- Description: "Argument partNumberMarker must be an integer.",
- HTTPStatusCode: http.StatusBadRequest,
- },
- ErrNoSuchBucket: {
- Code: "NoSuchBucket",
- Description: "The specified bucket does not exist",
- HTTPStatusCode: http.StatusNotFound,
- },
- ErrNoSuchUpload: {
- Code: "NoSuchUpload",
- Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
- HTTPStatusCode: http.StatusNotFound,
- },
- ErrInternalError: {
- Code: "InternalError",
- Description: "We encountered an internal error, please try again.",
- HTTPStatusCode: http.StatusInternalServerError,
- },
-
- ErrInvalidPart: {
- Code: "InvalidPart",
- Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
- HTTPStatusCode: http.StatusBadRequest,
- },
- ErrNotImplemented: {
- Code: "NotImplemented",
- Description: "A header you provided implies functionality that is not implemented",
- HTTPStatusCode: http.StatusNotImplemented,
- },
-}
-
-// getAPIError provides API Error for input API error code.
-func getAPIError(code ErrorCode) APIError {
- return errorCodeResponse[code]
-}
diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go
index 127be07e3..6935c75bd 100644
--- a/weed/s3api/s3api_handlers.go
+++ b/weed/s3api/s3api_handlers.go
@@ -2,17 +2,20 @@ package s3api
import (
"bytes"
- "context"
"encoding/base64"
"encoding/xml"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http"
"net/url"
+ "strconv"
"time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type mimeType string
@@ -37,30 +40,35 @@ func encodeResponse(response interface{}) []byte {
return bytesBuffer.Bytes()
}
-func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&S3ApiServer{})
+
+func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
- return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
}, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption)
}
+func (s3a *S3ApiServer) AdjustedUrl(location *filer_pb.Location) string {
+ return location.Url
+}
// If none of the http routes match respond with MethodNotAllowed
func notFoundHandler(w http.ResponseWriter, r *http.Request) {
glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI)
- writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
+ writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL)
}
-func writeErrorResponse(w http.ResponseWriter, errorCode ErrorCode, reqURL *url.URL) {
- apiError := getAPIError(errorCode)
+func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) {
+ apiError := s3err.GetAPIError(errorCode)
errorResponse := getRESTErrorResponse(apiError, reqURL.Path)
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
}
-func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse {
- return RESTErrorResponse{
+func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse {
+ return s3err.RESTErrorResponse{
Code: err.Code,
Message: err.Description,
Resource: resource,
@@ -70,13 +78,19 @@ func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse {
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
setCommonHeaders(w)
+ if response != nil {
+ w.Header().Set("Content-Length", strconv.Itoa(len(response)))
+ }
if mType != mimeNone {
w.Header().Set("Content-Type", string(mType))
}
w.WriteHeader(statusCode)
if response != nil {
glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response))
- w.Write(response)
+ _, err := w.Write(response)
+ if err != nil {
+ glog.V(0).Infof("write err: %v", err)
+ }
w.(http.Flusher).Flush()
}
}
diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go
new file mode 100644
index 000000000..84a85fd78
--- /dev/null
+++ b/weed/s3api/s3api_object_copy_handlers.go
@@ -0,0 +1,174 @@
+package s3api
+
+import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ weed_server "github.com/chrislusf/seaweedfs/weed/server"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
+
+ dstBucket, dstObject := getBucketAndObject(r)
+
+ // Copy source path.
+ cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
+ if err != nil {
+ // Save unescaped string as is.
+ cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
+ }
+
+ srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
+
+ if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && isReplace(r) {
+ fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject))
+ dir, name := fullPath.DirAndName()
+ entry, err := s3a.getEntry(dir, name)
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
+ }
+ entry.Extended = weed_server.SaveAmzMetaData(r, entry.Extended, isReplace(r))
+ err = s3a.touch(dir, name, entry)
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
+ }
+ writeSuccessResponseXML(w, encodeResponse(CopyObjectResult{
+ ETag: fmt.Sprintf("%x", entry.Attributes.Md5),
+ LastModified: time.Now().UTC(),
+ }))
+ return
+ }
+
+ // If source object is empty or bucket is empty, reply back invalid copy source.
+ if srcObject == "" || srcBucket == "" {
+ writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
+ return
+ }
+
+ if srcBucket == dstBucket && srcObject == dstObject {
+ writeErrorResponse(w, s3err.ErrInvalidCopyDest, r.URL)
+ return
+ }
+
+ dstUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s",
+ s3a.option.Filer, s3a.option.BucketsPath, dstBucket, dstObject, dstBucket)
+ srcUrl := fmt.Sprintf("http://%s%s/%s%s",
+ s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject)
+
+ _, _, resp, err := util.DownloadFile(srcUrl)
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
+ return
+ }
+ defer util.CloseResponse(resp)
+
+ glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
+ etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body)
+
+ if errCode != s3err.ErrNone {
+ writeErrorResponse(w, errCode, r.URL)
+ return
+ }
+
+ setEtag(w, etag)
+
+ response := CopyObjectResult{
+ ETag: etag,
+ LastModified: time.Now().UTC(),
+ }
+
+ writeSuccessResponseXML(w, encodeResponse(response))
+
+}
+
+func pathToBucketAndObject(path string) (bucket, object string) {
+ path = strings.TrimPrefix(path, "/")
+ parts := strings.SplitN(path, "/", 2)
+ if len(parts) == 2 {
+ return parts[0], "/" + parts[1]
+ }
+ return parts[0], "/"
+}
+
+type CopyPartResult struct {
+ LastModified time.Time `xml:"LastModified"`
+ ETag string `xml:"ETag"`
+}
+
+func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
+ // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+ dstBucket, _ := getBucketAndObject(r)
+
+ // Copy source path.
+ cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
+ if err != nil {
+ // Save unescaped string as is.
+ cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
+ }
+
+ srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
+ // If source object is empty or bucket is empty, reply back invalid copy source.
+ if srcObject == "" || srcBucket == "" {
+ writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
+ return
+ }
+
+ uploadID := r.URL.Query().Get("uploadId")
+ partIDString := r.URL.Query().Get("partNumber")
+
+ partID, err := strconv.Atoi(partIDString)
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrInvalidPart, r.URL)
+ return
+ }
+
+ // check partID with maximum part ID for multipart objects
+ if partID > globalMaxPartID {
+ writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL)
+ return
+ }
+
+ rangeHeader := r.Header.Get("x-amz-copy-source-range")
+
+ dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s",
+ s3a.option.Filer, s3a.genUploadsFolder(dstBucket), uploadID, partID, dstBucket)
+ srcUrl := fmt.Sprintf("http://%s%s/%s%s",
+ s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject)
+
+ dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader)
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
+ return
+ }
+ defer dataReader.Close()
+
+ glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
+ etag, errCode := s3a.putToFiler(r, dstUrl, dataReader)
+
+ if errCode != s3err.ErrNone {
+ writeErrorResponse(w, errCode, r.URL)
+ return
+ }
+
+ setEtag(w, etag)
+
+ response := CopyPartResult{
+ ETag: etag,
+ LastModified: time.Now().UTC(),
+ }
+
+ writeSuccessResponseXML(w, encodeResponse(response))
+
+}
+
+func isReplace(r *http.Request) bool {
+ return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE"
+}
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
index 44e93d297..f1a539ac5 100644
--- a/weed/s3api/s3api_object_handlers.go
+++ b/weed/s3api/s3api_object_handlers.go
@@ -3,15 +3,24 @@ package s3api
import (
"crypto/md5"
"encoding/json"
+ "encoding/xml"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"io"
"io/ioutil"
"net/http"
+ "net/url"
+ "sort"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/server"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+
"github.com/gorilla/mux"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_server "github.com/chrislusf/seaweedfs/weed/server"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
var (
@@ -20,6 +29,7 @@ var (
func init() {
client = &http.Client{Transport: &http.Transport{
+ MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
}}
}
@@ -28,50 +38,73 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
_, err := validateContentMd5(r.Header)
if err != nil {
- writeErrorResponse(w, ErrInvalidDigest, r.URL)
+ writeErrorResponse(w, s3err.ErrInvalidDigest, r.URL)
return
}
- rAuthType := getRequestAuthType(r)
dataReader := r.Body
- if rAuthType == authTypeStreamingSigned {
- dataReader = newSignV4ChunkedReader(r)
+ if s3a.iam.isEnabled() {
+ rAuthType := getRequestAuthType(r)
+ var s3ErrCode s3err.ErrorCode
+ switch rAuthType {
+ case authTypeStreamingSigned:
+ dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
+ case authTypeSignedV2, authTypePresignedV2:
+ _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r)
+ case authTypePresigned, authTypeSigned:
+ _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
+ }
+ if s3ErrCode != s3err.ErrNone {
+ writeErrorResponse(w, s3ErrCode, r.URL)
+ return
+ }
}
+ defer dataReader.Close()
- uploadUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s",
- s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket)
+ if strings.HasSuffix(object, "/") {
+ if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil {
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
+ return
+ }
+ } else {
+ uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object))
- etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
+ etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
- if errCode != ErrNone {
- writeErrorResponse(w, errCode, r.URL)
- return
- }
+ if errCode != s3err.ErrNone {
+ writeErrorResponse(w, errCode, r.URL)
+ return
+ }
- setEtag(w, etag)
+ setEtag(w, etag)
+ }
writeSuccessResponseEmpty(w)
}
+func urlPathEscape(object string) string {
+ var escapedParts []string
+ for _, part := range strings.Split(object, "/") {
+ escapedParts = append(escapedParts, url.PathEscape(part))
+ }
+ return strings.Join(escapedParts, "/")
+}
+
func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
if strings.HasSuffix(r.URL.Path, "/") {
- writeErrorResponse(w, ErrNotImplemented, r.URL)
+ writeErrorResponse(w, s3err.ErrNotImplemented, r.URL)
return
}
destUrl := fmt.Sprintf("http://%s%s/%s%s",
- s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
+ s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object))
s3a.proxyToFiler(w, r, destUrl, passThroughResponse)
@@ -79,12 +112,10 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
destUrl := fmt.Sprintf("http://%s%s/%s%s",
- s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
+ s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object))
s3a.proxyToFiler(w, r, destUrl, passThroughResponse)
@@ -92,29 +123,152 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request
func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
- destUrl := fmt.Sprintf("http://%s%s/%s%s",
- s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
+ destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true",
+ s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object))
- s3a.proxyToFiler(w, r, destUrl, func(proxyResonse *http.Response, w http.ResponseWriter) {
- for k, v := range proxyResonse.Header {
+ s3a.proxyToFiler(w, r, destUrl, func(proxyResponse *http.Response, w http.ResponseWriter) {
+ for k, v := range proxyResponse.Header {
w.Header()[k] = v
}
w.WriteHeader(http.StatusNoContent)
})
+}
+
+// / ObjectIdentifier carries key name for the object to delete.
+type ObjectIdentifier struct {
+ ObjectName string `xml:"Key"`
+}
+
+// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted.
+type DeleteObjectsRequest struct {
+ // Element to enable quiet mode for the request
+ Quiet bool
+ // List of objects to be deleted
+ Objects []ObjectIdentifier `xml:"Object"`
+}
+
+// DeleteError structure.
+type DeleteError struct {
+ Code string
+ Message string
+ Key string
+}
+
+// DeleteObjectsResponse container for multiple object deletes.
+type DeleteObjectsResponse struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
+ // Collection of all deleted objects
+ DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
+
+ // Collection of errors deleting certain objects.
+ Errors []DeleteError `xml:"Error,omitempty"`
}
// DeleteMultipleObjectsHandler - Delete multiple objects
func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
- // TODO
- writeErrorResponse(w, ErrNotImplemented, r.URL)
+
+ bucket, _ := getBucketAndObject(r)
+
+ deleteXMLBytes, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
+ return
+ }
+
+ deleteObjects := &DeleteObjectsRequest{}
+ if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
+ writeErrorResponse(w, s3err.ErrMalformedXML, r.URL)
+ return
+ }
+
+ var deletedObjects []ObjectIdentifier
+ var deleteErrors []DeleteError
+
+ directoriesWithDeletion := make(map[string]int)
+
+ s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ // delete file entries
+ for _, object := range deleteObjects.Objects {
+
+ lastSeparator := strings.LastIndex(object.ObjectName, "/")
+ parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.ObjectName, true, false
+ if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) {
+ entryName = object.ObjectName[lastSeparator+1:]
+ parentDirectoryPath = "/" + object.ObjectName[:lastSeparator]
+ }
+ parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath)
+
+ err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
+ if err == nil {
+ directoriesWithDeletion[parentDirectoryPath]++
+ deletedObjects = append(deletedObjects, object)
+ } else if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) {
+ deletedObjects = append(deletedObjects, object)
+ } else {
+ delete(directoriesWithDeletion, parentDirectoryPath)
+ deleteErrors = append(deleteErrors, DeleteError{
+ Code: "",
+ Message: err.Error(),
+ Key: object.ObjectName,
+ })
+ }
+ }
+
+ // purge empty folders, only checking folders with deletions
+ for len(directoriesWithDeletion) > 0 {
+ directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion)
+ }
+
+ return nil
+ })
+
+ deleteResp := DeleteObjectsResponse{}
+ if !deleteObjects.Quiet {
+ deleteResp.DeletedObjects = deletedObjects
+ }
+ deleteResp.Errors = deleteErrors
+
+ writeSuccessResponseXML(w, encodeResponse(deleteResp))
+
}
-func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResonse *http.Response, w http.ResponseWriter)) {
+func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int) {
+ var allDirs []string
+ for dir, _ := range directoriesWithDeletion {
+ allDirs = append(allDirs, dir)
+ }
+ sort.Slice(allDirs, func(i, j int) bool {
+ return len(allDirs[i]) > len(allDirs[j])
+ })
+ newDirectoriesWithDeletion = make(map[string]int)
+ for _, dir := range allDirs {
+ parentDir, dirName := util.FullPath(dir).DirAndName()
+ if parentDir == s3a.option.BucketsPath {
+ continue
+ }
+ if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil {
+ glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err)
+ } else {
+ newDirectoriesWithDeletion[parentDir]++
+ }
+ }
+ return
+}
+
+var passThroughHeaders = []string{
+ "response-cache-control",
+ "response-content-disposition",
+ "response-content-encoding",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
+}
+
+func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter)) {
glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl)
@@ -122,15 +276,27 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
if err != nil {
glog.Errorf("NewRequest %s: %v", destUrl, err)
- writeErrorResponse(w, ErrInternalError, r.URL)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
proxyReq.Header.Set("Host", s3a.option.Filer)
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
- proxyReq.Header.Set("Etag-MD5", "True")
for header, values := range r.Header {
+ // handle s3 related headers
+ passed := false
+ for _, h := range passThroughHeaders {
+ if strings.ToLower(header) == h && len(values) > 0 {
+ proxyReq.Header.Add(header[len("response-"):], values[0])
+ passed = true
+ break
+ }
+ }
+ if passed {
+ continue
+ }
+ // handle other headers
for _, value := range values {
proxyReq.Header.Add(header, value)
}
@@ -140,31 +306,44 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
if postErr != nil {
glog.Errorf("post to filer: %v", postErr)
- writeErrorResponse(w, ErrInternalError, r.URL)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
- defer resp.Body.Close()
+ defer util.CloseResponse(resp)
+
+ if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 {
+ if r.Method != "DELETE" {
+ writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
+ return
+ }
+ }
responseFn(resp, w)
+
}
-func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) {
- for k, v := range proxyResonse.Header {
+
+func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) {
+ for k, v := range proxyResponse.Header {
w.Header()[k] = v
}
- w.WriteHeader(proxyResonse.StatusCode)
- io.Copy(w, proxyResonse.Body)
+ if proxyResponse.Header.Get("Content-Range") != "" && proxyResponse.StatusCode == 200 {
+ w.WriteHeader(http.StatusPartialContent)
+ } else {
+ w.WriteHeader(proxyResponse.StatusCode)
+ }
+ io.Copy(w, proxyResponse.Body)
}
-func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.ReadCloser) (etag string, code ErrorCode) {
+func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code s3err.ErrorCode) {
hash := md5.New()
- var body io.Reader = io.TeeReader(dataReader, hash)
+ var body = io.TeeReader(dataReader, hash)
proxyReq, err := http.NewRequest("PUT", uploadUrl, body)
if err != nil {
glog.Errorf("NewRequest %s: %v", uploadUrl, err)
- return "", ErrInternalError
+ return "", s3err.ErrInternalError
}
proxyReq.Header.Set("Host", s3a.option.Filer)
@@ -178,11 +357,9 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
resp, postErr := client.Do(proxyReq)
- dataReader.Close()
-
if postErr != nil {
glog.Errorf("post to filer: %v", postErr)
- return "", ErrInternalError
+ return "", s3err.ErrInternalError
}
defer resp.Body.Close()
@@ -190,21 +367,21 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
resp_body, ra_err := ioutil.ReadAll(resp.Body)
if ra_err != nil {
- glog.Errorf("upload to filer response read: %v", ra_err)
- return etag, ErrInternalError
+ glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err)
+ return etag, s3err.ErrInternalError
}
var ret weed_server.FilerPostResult
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
- return "", ErrInternalError
+ return "", s3err.ErrInternalError
}
if ret.Error != "" {
glog.Errorf("upload to filer error: %v", ret.Error)
- return "", ErrInternalError
+ return "", filerErrorToS3Error(ret.Error)
}
- return etag, ErrNone
+ return etag, s3err.ErrNone
}
func setEtag(w http.ResponseWriter, etag string) {
@@ -217,10 +394,20 @@ func setEtag(w http.ResponseWriter, etag string) {
}
}
-func getObject(vars map[string]string) string {
- object := vars["object"]
+func getBucketAndObject(r *http.Request) (bucket, object string) {
+ vars := mux.Vars(r)
+ bucket = vars["bucket"]
+ object = vars["object"]
if !strings.HasPrefix(object, "/") {
object = "/" + object
}
- return object
+
+ return
+}
+
+func filerErrorToS3Error(errString string) s3err.ErrorCode {
+ if strings.HasPrefix(errString, "existing ") && strings.HasSuffix(errString, "is a directory") {
+ return s3err.ErrExistingObjectIsDirectory
+ }
+ return s3err.ErrInternalError
}
diff --git a/weed/s3api/s3api_object_handlers_postpolicy.go b/weed/s3api/s3api_object_handlers_postpolicy.go
new file mode 100644
index 000000000..035302ae6
--- /dev/null
+++ b/weed/s3api/s3api_object_handlers_postpolicy.go
@@ -0,0 +1,241 @@
+package s3api
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/s3api/policy"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "github.com/dustin/go-humanize"
+ "github.com/gorilla/mux"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
+
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html
+
+ bucket := mux.Vars(r)["bucket"]
+
+ reader, err := r.MultipartReader()
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL)
+ return
+ }
+ form, err := reader.ReadForm(int64(5 * humanize.MiByte))
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL)
+ return
+ }
+ defer form.RemoveAll()
+
+ fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form)
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL)
+ return
+ }
+ if fileBody == nil {
+ writeErrorResponse(w, s3err.ErrPOSTFileRequired, r.URL)
+ return
+ }
+ defer fileBody.Close()
+
+ formValues.Set("Bucket", bucket)
+
+ if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
+ formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
+ }
+ object := formValues.Get("Key")
+
+ successRedirect := formValues.Get("success_action_redirect")
+ successStatus := formValues.Get("success_action_status")
+ var redirectURL *url.URL
+ if successRedirect != "" {
+ redirectURL, err = url.Parse(successRedirect)
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL)
+ return
+ }
+ }
+
+ // Verify policy signature.
+ errCode := s3a.iam.doesPolicySignatureMatch(formValues)
+ if errCode != s3err.ErrNone {
+ writeErrorResponse(w, errCode, r.URL)
+ return
+ }
+
+ policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL)
+ return
+ }
+
+ // Handle policy if it is set.
+ if len(policyBytes) > 0 {
+
+ postPolicyForm, err := policy.ParsePostPolicyForm(string(policyBytes))
+ if err != nil {
+ writeErrorResponse(w, s3err.ErrPostPolicyConditionInvalidFormat, r.URL)
+ return
+ }
+
+ // Make sure formValues adhere to policy restrictions.
+ if err = policy.CheckPostPolicy(formValues, postPolicyForm); err != nil {
+ w.Header().Set("Location", r.URL.Path)
+ w.WriteHeader(http.StatusTemporaryRedirect)
+ return
+ }
+
+ // Ensure that the object size is within expected range, also the file size
+ // should not exceed the maximum single Put size (5 GiB)
+ lengthRange := postPolicyForm.Conditions.ContentLengthRange
+ if lengthRange.Valid {
+ if fileSize < lengthRange.Min {
+ writeErrorResponse(w, s3err.ErrEntityTooSmall, r.URL)
+ return
+ }
+
+ if fileSize > lengthRange.Max {
+ writeErrorResponse(w, s3err.ErrEntityTooLarge, r.URL)
+ return
+ }
+ }
+ }
+
+ uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object))
+
+ etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody)
+
+ if errCode != s3err.ErrNone {
+ writeErrorResponse(w, errCode, r.URL)
+ return
+ }
+
+ if successRedirect != "" {
+ // Replace raw query params..
+ redirectURL.RawQuery = getRedirectPostRawQuery(bucket, object, etag)
+ w.Header().Set("Location", redirectURL.String())
+ writeResponse(w, http.StatusSeeOther, nil, mimeNone)
+ return
+ }
+
+ setEtag(w, etag)
+
+ // Decide what http response to send depending on success_action_status parameter
+ switch successStatus {
+ case "201":
+ resp := encodeResponse(PostResponse{
+ Bucket: bucket,
+ Key: object,
+ ETag: `"` + etag + `"`,
+ Location: w.Header().Get("Location"),
+ })
+ writeResponse(w, http.StatusCreated, resp, mimeXML)
+ case "200":
+ writeResponse(w, http.StatusOK, nil, mimeNone)
+ default:
+ writeSuccessResponseEmpty(w)
+ }
+
+}
+
+// Extract form fields and file data from a HTTP POST Policy
+func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
+ /// HTML Form values
+ fileName = ""
+
+ // Canonicalize the form values into http.Header.
+ formValues = make(http.Header)
+ for k, v := range form.Value {
+ formValues[http.CanonicalHeaderKey(k)] = v
+ }
+
+ // Validate form values.
+ if err = validateFormFieldSize(formValues); err != nil {
+ return nil, "", 0, nil, err
+ }
+
+ // this means that filename="" was not specified for file key and Go has
+ // an ugly way of handling this situation. Refer here
+ // https://golang.org/src/mime/multipart/formdata.go#L61
+ if len(form.File) == 0 {
+ var b = &bytes.Buffer{}
+ for _, v := range formValues["File"] {
+ b.WriteString(v)
+ }
+ fileSize = int64(b.Len())
+ filePart = ioutil.NopCloser(b)
+ return filePart, fileName, fileSize, formValues, nil
+ }
+
+ // Iterator until we find a valid File field and break
+ for k, v := range form.File {
+ canonicalFormName := http.CanonicalHeaderKey(k)
+ if canonicalFormName == "File" {
+ if len(v) == 0 {
+ return nil, "", 0, nil, errors.New("Invalid arguments specified")
+ }
+ // Fetch fileHeader which has the uploaded file information
+ fileHeader := v[0]
+ // Set filename
+ fileName = fileHeader.Filename
+ // Open the uploaded part
+ filePart, err = fileHeader.Open()
+ if err != nil {
+ return nil, "", 0, nil, err
+ }
+ // Compute file size
+ fileSize, err = filePart.(io.Seeker).Seek(0, 2)
+ if err != nil {
+ return nil, "", 0, nil, err
+ }
+ // Reset Seek to the beginning
+ _, err = filePart.(io.Seeker).Seek(0, 0)
+ if err != nil {
+ return nil, "", 0, nil, err
+ }
+ // File found and ready for reading
+ break
+ }
+ }
+ return filePart, fileName, fileSize, formValues, nil
+}
+
+// Validate form field size for s3 specification requirement.
+func validateFormFieldSize(formValues http.Header) error {
+ // Iterate over form values
+ for k := range formValues {
+ // Check if value's field exceeds S3 limit
+ if int64(len(formValues.Get(k))) > int64(1*humanize.MiByte) {
+ return errors.New("Data size larger than expected")
+ }
+ }
+
+ // Success.
+ return nil
+}
+
+func getRedirectPostRawQuery(bucket, key, etag string) string {
+ redirectValues := make(url.Values)
+ redirectValues.Set("bucket", bucket)
+ redirectValues.Set("key", key)
+ redirectValues.Set("etag", "\""+etag+"\"")
+ return redirectValues.Encode()
+}
+
+// Check to see if Policy is signed correctly.
+func (iam *IdentityAccessManagement) doesPolicySignatureMatch(formValues http.Header) s3err.ErrorCode {
+ // For SignV2 - Signature field will be valid
+ if _, ok := formValues["Signature"]; ok {
+ return iam.doesPolicySignatureV2Match(formValues)
+ }
+ return iam.doesPolicySignatureV4Match(formValues)
+}
diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go
index 72a25e4a5..4ddb24e31 100644
--- a/weed/s3api/s3api_object_multipart_handlers.go
+++ b/weed/s3api/s3api_object_multipart_handlers.go
@@ -1,65 +1,61 @@
package s3api
import (
- "context"
"fmt"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/gorilla/mux"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http"
"net/url"
"strconv"
"strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/s3"
)
const (
- maxObjectList = 1000 // Limit number of objects in a listObjectsResponse.
- maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse.
- maxPartsList = 1000 // Limit number of parts in a listPartsResponse.
- globalMaxPartID = 10000
+ maxObjectListSizeLimit = 10000 // Limit number of objects in a listObjectsResponse.
+ maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
+ maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
+ globalMaxPartID = 100000
)
// NewMultipartUploadHandler - New multipart upload.
func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
- var object, bucket string
- vars := mux.Vars(r)
- bucket = vars["bucket"]
- object = vars["object"]
+ bucket, object := getBucketAndObject(r)
- response, errCode := s3a.createMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{
+ response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
})
- if errCode != ErrNone {
+ glog.V(2).Info("NewMultipartUploadHandler", string(encodeResponse(response)), errCode)
+
+ if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
- // println("NewMultipartUploadHandler", string(encodeResponse(response)))
-
writeSuccessResponseXML(w, encodeResponse(response))
}
// CompleteMultipartUploadHandler - Completes multipart upload.
func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
// Get upload id.
uploadID, _, _, _ := getObjectResources(r.URL.Query())
- response, errCode := s3a.completeMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{
+ response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
UploadId: aws.String(uploadID),
})
- // println("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode)
+ glog.V(2).Info("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode)
- if errCode != ErrNone {
+ if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
@@ -70,25 +66,23 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
// AbortMultipartUploadHandler - Aborts multipart upload.
func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
// Get upload id.
uploadID, _, _, _ := getObjectResources(r.URL.Query())
- response, errCode := s3a.abortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{
+ response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
UploadId: aws.String(uploadID),
})
- if errCode != ErrNone {
+ if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
- // println("AbortMultipartUploadHandler", string(encodeResponse(response)))
+ glog.V(2).Info("AbortMultipartUploadHandler", string(encodeResponse(response)))
writeSuccessResponseXML(w, encodeResponse(response))
@@ -96,23 +90,22 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
// ListMultipartUploadsHandler - Lists multipart uploads.
func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
+ bucket, _ := getBucketAndObject(r)
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query())
if maxUploads < 0 {
- writeErrorResponse(w, ErrInvalidMaxUploads, r.URL)
+ writeErrorResponse(w, s3err.ErrInvalidMaxUploads, r.URL)
return
}
if keyMarker != "" {
// Marker not common with prefix is not implemented.
if !strings.HasPrefix(keyMarker, prefix) {
- writeErrorResponse(w, ErrNotImplemented, r.URL)
+ writeErrorResponse(w, s3err.ErrNotImplemented, r.URL)
return
}
}
- response, errCode := s3a.listMultipartUploads(context.Background(), &s3.ListMultipartUploadsInput{
+ response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{
Bucket: aws.String(bucket),
Delimiter: aws.String(delimiter),
EncodingType: aws.String(encodingType),
@@ -122,34 +115,33 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
UploadIdMarker: aws.String(uploadIDMarker),
})
- if errCode != ErrNone {
+ glog.V(2).Info("ListMultipartUploadsHandler", string(encodeResponse(response)), errCode)
+
+ if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
// TODO handle encodingType
- // println("ListMultipartUploadsHandler", string(encodeResponse(response)))
writeSuccessResponseXML(w, encodeResponse(response))
}
// ListObjectPartsHandler - Lists object parts in a multipart upload.
func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query())
if partNumberMarker < 0 {
- writeErrorResponse(w, ErrInvalidPartNumberMarker, r.URL)
+ writeErrorResponse(w, s3err.ErrInvalidPartNumberMarker, r.URL)
return
}
if maxParts < 0 {
- writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
+ writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL)
return
}
- response, errCode := s3a.listObjectParts(context.Background(), &s3.ListPartsInput{
+ response, errCode := s3a.listObjectParts(&s3.ListPartsInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
MaxParts: aws.Int64(int64(maxParts)),
@@ -157,55 +149,64 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
UploadId: aws.String(uploadID),
})
- if errCode != ErrNone {
+ glog.V(2).Info("ListObjectPartsHandler", string(encodeResponse(response)), errCode)
+
+ if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
- // println("ListObjectPartsHandler", string(encodeResponse(response)))
-
writeSuccessResponseXML(w, encodeResponse(response))
}
// PutObjectPartHandler - Put an object part in a multipart upload.
func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
-
- rAuthType := getRequestAuthType(r)
-
- ctx := context.Background()
+ bucket, _ := getBucketAndObject(r)
uploadID := r.URL.Query().Get("uploadId")
- exists, err := s3a.exists(ctx, s3a.genUploadsFolder(bucket), uploadID, true)
+ exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true)
if !exists {
- writeErrorResponse(w, ErrNoSuchUpload, r.URL)
+ writeErrorResponse(w, s3err.ErrNoSuchUpload, r.URL)
return
}
partIDString := r.URL.Query().Get("partNumber")
partID, err := strconv.Atoi(partIDString)
if err != nil {
- writeErrorResponse(w, ErrInvalidPart, r.URL)
+ writeErrorResponse(w, s3err.ErrInvalidPart, r.URL)
return
}
if partID > globalMaxPartID {
- writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
+ writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL)
return
}
dataReader := r.Body
- if rAuthType == authTypeStreamingSigned {
- dataReader = newSignV4ChunkedReader(r)
+ if s3a.iam.isEnabled() {
+ rAuthType := getRequestAuthType(r)
+ var s3ErrCode s3err.ErrorCode
+ switch rAuthType {
+ case authTypeStreamingSigned:
+ dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
+ case authTypeSignedV2, authTypePresignedV2:
+ _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r)
+ case authTypePresigned, authTypeSigned:
+ _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
+ }
+ if s3ErrCode != s3err.ErrNone {
+ writeErrorResponse(w, s3ErrCode, r.URL)
+ return
+ }
}
+ defer dataReader.Close()
uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s",
- s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket)
+ s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID, bucket)
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
- if errCode != ErrNone {
+ if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
diff --git a/weed/s3api/s3api_object_tagging_handlers.go b/weed/s3api/s3api_object_tagging_handlers.go
new file mode 100644
index 000000000..94719834c
--- /dev/null
+++ b/weed/s3api/s3api_object_tagging_handlers.go
@@ -0,0 +1,117 @@
+package s3api
+
+import (
+ "encoding/xml"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+// GetObjectTaggingHandler - GET object tagging
+// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
+func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
+
+ bucket, object := getBucketAndObject(r)
+
+ target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
+ dir, name := target.DirAndName()
+
+ tags, err := s3a.getTags(dir, name)
+ if err != nil {
+ if err == filer_pb.ErrNotFound {
+ glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
+ writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
+ } else {
+ glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
+ }
+ return
+ }
+
+ writeSuccessResponseXML(w, encodeResponse(FromTags(tags)))
+
+}
+
+// PutObjectTaggingHandler Put object tagging
+// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html
+func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
+
+ bucket, object := getBucketAndObject(r)
+
+ target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
+ dir, name := target.DirAndName()
+
+ tagging := &Tagging{}
+ input, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength))
+ if err != nil {
+ glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
+ return
+ }
+ if err = xml.Unmarshal(input, tagging); err != nil {
+ glog.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err)
+ writeErrorResponse(w, s3err.ErrMalformedXML, r.URL)
+ return
+ }
+ tags := tagging.ToTags()
+ if len(tags) > 10 {
+ glog.Errorf("PutObjectTaggingHandler tags %s: %d tags more than 10", r.URL, len(tags))
+ writeErrorResponse(w, s3err.ErrInvalidTag, r.URL)
+ return
+ }
+ for k, v := range tags {
+ if len(k) > 128 {
+ glog.Errorf("PutObjectTaggingHandler tags %s: tag key %s longer than 128", r.URL, k)
+ writeErrorResponse(w, s3err.ErrInvalidTag, r.URL)
+ return
+ }
+ if len(v) > 256 {
+ glog.Errorf("PutObjectTaggingHandler tags %s: tag value %s longer than 256", r.URL, v)
+ writeErrorResponse(w, s3err.ErrInvalidTag, r.URL)
+ return
+ }
+ }
+
+ if err = s3a.setTags(dir, name, tagging.ToTags()); err != nil {
+ if err == filer_pb.ErrNotFound {
+ glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
+ writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
+ } else {
+ glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
+ }
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+
+}
+
+// DeleteObjectTaggingHandler Delete object tagging
+// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html
+func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
+
+ bucket, object := getBucketAndObject(r)
+
+ target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
+ dir, name := target.DirAndName()
+
+ err := s3a.rmTags(dir, name)
+ if err != nil {
+ if err == filer_pb.ErrNotFound {
+ glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
+ writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
+ } else {
+ glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
+ }
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+}
diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go
index aa6849cbd..739cdd8f9 100644
--- a/weed/s3api/s3api_objects_list_handlers.go
+++ b/weed/s3api/s3api_objects_list_handlers.go
@@ -2,7 +2,9 @@ package s3api
import (
"context"
+ "encoding/xml"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
"io"
"net/http"
"net/url"
@@ -11,51 +13,72 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/gorilla/mux"
+ xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
)
-const (
- maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse.
-)
+type ListBucketResultV2 struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
+ Name string `xml:"Name"`
+ Prefix string `xml:"Prefix"`
+ MaxKeys int `xml:"MaxKeys"`
+ Delimiter string `xml:"Delimiter,omitempty"`
+ IsTruncated bool `xml:"IsTruncated"`
+ Contents []ListEntry `xml:"Contents,omitempty"`
+ CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
+ ContinuationToken string `xml:"ContinuationToken,omitempty"`
+ NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
+ KeyCount int `xml:"KeyCount"`
+ StartAfter string `xml:"StartAfter,omitempty"`
+}
func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
// collect parameters
- vars := mux.Vars(r)
- bucket := vars["bucket"]
-
- glog.V(4).Infof("read v2: %v", vars)
+ bucket, _ := getBucketAndObject(r)
- originalPrefix, marker, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
+ originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
if maxKeys < 0 {
- writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
+ writeErrorResponse(w, s3err.ErrInvalidMaxKeys, r.URL)
return
}
if delimiter != "" && delimiter != "/" {
- writeErrorResponse(w, ErrNotImplemented, r.URL)
+ writeErrorResponse(w, s3err.ErrNotImplemented, r.URL)
return
}
- if marker == "" {
+ marker := continuationToken
+ if continuationToken == "" {
marker = startAfter
}
- ctx := context.Background()
-
- response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker)
+ response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
if err != nil {
- writeErrorResponse(w, ErrInternalError, r.URL)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
+ responseV2 := &ListBucketResultV2{
+ XMLName: response.XMLName,
+ Name: response.Name,
+ CommonPrefixes: response.CommonPrefixes,
+ Contents: response.Contents,
+ ContinuationToken: continuationToken,
+ Delimiter: response.Delimiter,
+ IsTruncated: response.IsTruncated,
+ KeyCount: len(response.Contents) + len(response.CommonPrefixes),
+ MaxKeys: response.MaxKeys,
+ NextContinuationToken: response.NextMarker,
+ Prefix: response.Prefix,
+ StartAfter: startAfter,
+ }
- writeSuccessResponseXML(w, encodeResponse(response))
+ writeSuccessResponseXML(w, encodeResponse(responseV2))
}
func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
@@ -63,121 +86,203 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
// collect parameters
- vars := mux.Vars(r)
- bucket := vars["bucket"]
-
- ctx := context.Background()
+ bucket, _ := getBucketAndObject(r)
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
if maxKeys < 0 {
- writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
+ writeErrorResponse(w, s3err.ErrInvalidMaxKeys, r.URL)
return
}
if delimiter != "" && delimiter != "/" {
- writeErrorResponse(w, ErrNotImplemented, r.URL)
+ writeErrorResponse(w, s3err.ErrNotImplemented, r.URL)
return
}
- response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker)
+ response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
if err != nil {
- writeErrorResponse(w, ErrInternalError, r.URL)
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
writeSuccessResponseXML(w, encodeResponse(response))
}
-func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) {
-
+func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) {
// convert full path prefix into directory name and prefix for entry name
- dir, prefix := filepath.Split(originalPrefix)
- if strings.HasPrefix(dir, "/") {
- dir = dir[1:]
+ reqDir, prefix := filepath.Split(originalPrefix)
+ if strings.HasPrefix(reqDir, "/") {
+ reqDir = reqDir[1:]
+ }
+ bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
+ reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir)
+ if strings.HasSuffix(reqDir, "/") {
+ // remove trailing "/"
+ reqDir = reqDir[:len(reqDir)-1]
}
- // check filer
- err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.ListEntriesRequest{
- Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir),
- Prefix: prefix,
- Limit: uint32(maxKeys + 1),
- StartFromFileName: marker,
- InclusiveStartFrom: false,
- }
-
- stream, err := client.ListEntries(ctx, request)
- if err != nil {
- return fmt.Errorf("list buckets: %v", err)
- }
+ var contents []ListEntry
+ var commonPrefixes []PrefixEntry
+ var isTruncated bool
+ var doErr error
+ var nextMarker string
- var contents []ListEntry
- var commonPrefixes []PrefixEntry
- var counter int
- var lastEntryName string
- var isTruncated bool
-
- for {
- resp, recvErr := stream.Recv()
- if recvErr != nil {
- if recvErr == io.EOF {
- break
- } else {
- return recvErr
- }
- }
+ // check filer
+ err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- entry := resp.Entry
- counter++
- if counter > maxKeys {
- isTruncated = true
- break
- }
- lastEntryName = entry.Name
+ _, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, func(dir string, entry *filer_pb.Entry) {
if entry.IsDirectory {
- if entry.Name != ".uploads" {
+ if delimiter == "/" {
commonPrefixes = append(commonPrefixes, PrefixEntry{
- Prefix: fmt.Sprintf("%s%s/", dir, entry.Name),
+ Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):],
})
}
} else {
+ storageClass := "STANDARD"
+ if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok {
+ storageClass = string(v)
+ }
contents = append(contents, ListEntry{
- Key: fmt.Sprintf("%s%s", dir, entry.Name),
- LastModified: time.Unix(entry.Attributes.Mtime, 0),
- ETag: "\"" + filer2.ETag(entry.Chunks) + "\"",
- Size: int64(filer2.TotalSize(entry.Chunks)),
+ Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):],
+ LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
+ ETag: "\"" + filer.ETag(entry) + "\"",
+ Size: int64(filer.FileSize(entry)),
Owner: CanonicalUser{
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
DisplayName: entry.Attributes.UserName,
},
- StorageClass: "STANDARD",
+ StorageClass: StorageClass(storageClass),
})
}
+ })
+ if doErr != nil {
+ return doErr
+ }
+ if !isTruncated {
+ nextMarker = ""
}
response = ListBucketResult{
Name: bucket,
Prefix: originalPrefix,
Marker: marker,
- NextMarker: lastEntryName,
+ NextMarker: nextMarker,
MaxKeys: maxKeys,
- Delimiter: "/",
+ Delimiter: delimiter,
IsTruncated: isTruncated,
Contents: contents,
CommonPrefixes: commonPrefixes,
}
- glog.V(4).Infof("read directory: %v, found: %v, %+v", request, counter, response)
-
return nil
})
return
}
+func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) {
+ // invariants
+ // prefix and marker should be under dir, marker may contain "/"
+ // maxKeys should be updated for each recursion
+
+ if prefix == "/" && delimiter == "/" {
+ return
+ }
+ if maxKeys <= 0 {
+ return
+ }
+
+ if strings.Contains(marker, "/") {
+ sepIndex := strings.Index(marker, "/")
+ subDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:]
+ // println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys)
+ subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, eachEntryFn)
+ if subErr != nil {
+ err = subErr
+ return
+ }
+ isTruncated = isTruncated || subIsTruncated
+ maxKeys -= subCounter
+ nextMarker = subDir + "/" + subNextMarker
+ // finished processing this sub directory
+ marker = subDir
+ }
+
+ // now marker is also a direct child of dir
+ request := &filer_pb.ListEntriesRequest{
+ Directory: dir,
+ Prefix: prefix,
+ Limit: uint32(maxKeys + 1),
+ StartFromFileName: marker,
+ InclusiveStartFrom: false,
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, listErr := client.ListEntries(ctx, request)
+ if listErr != nil {
+ err = fmt.Errorf("list entires %+v: %v", request, listErr)
+ return
+ }
+
+ for {
+ resp, recvErr := stream.Recv()
+ if recvErr != nil {
+ if recvErr == io.EOF {
+ break
+ } else {
+ err = fmt.Errorf("iterating entires %+v: %v", request, recvErr)
+ return
+ }
+ }
+ if counter >= maxKeys {
+ isTruncated = true
+ return
+ }
+ entry := resp.Entry
+ nextMarker = entry.Name
+ if entry.IsDirectory {
+ // println("ListEntries", dir, "dir:", entry.Name)
+ if entry.Name != ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys
+ if delimiter != "/" {
+ eachEntryFn(dir, entry)
+ // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter)
+ subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, eachEntryFn)
+ if subErr != nil {
+ err = fmt.Errorf("doListFilerEntries2: %v", subErr)
+ return
+ }
+ // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated)
+ counter += subCounter
+ nextMarker = entry.Name + "/" + subNextMarker
+ if subIsTruncated {
+ isTruncated = true
+ return
+ }
+ } else {
+ var isEmpty bool
+ if !s3a.option.AllowEmptyFolder {
+ if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {
+ glog.Errorf("check empty folder %s: %v", dir, err)
+ }
+ }
+ if !isEmpty {
+ eachEntryFn(dir, entry)
+ counter++
+ }
+ }
+ }
+ } else {
+ // println("ListEntries", dir, "file:", entry.Name)
+ eachEntryFn(dir, entry)
+ counter++
+ }
+ }
+ return
+}
+
func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) {
prefix = values.Get("prefix")
token = values.Get("continuation-token")
@@ -203,3 +308,57 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
}
return
}
+
+func (s3a *S3ApiServer) isDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) {
+ // println("+ isDirectoryAllEmpty", dir, name)
+ glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name)
+ defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty)
+ var fileCounter int
+ var subDirs []string
+ currentDir := parentDir + "/" + name
+ var startFrom string
+ var isExhausted bool
+ var foundEntry bool
+ for fileCounter == 0 && !isExhausted && err == nil {
+ err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error {
+ foundEntry = true
+ if entry.IsDirectory {
+ subDirs = append(subDirs, entry.Name)
+ } else {
+ fileCounter++
+ }
+ startFrom = entry.Name
+ isExhausted = isExhausted || isLast
+ glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast)
+ return nil
+ }, startFrom, false, 8)
+ if !foundEntry {
+ break
+ }
+ }
+
+ if err != nil {
+ return false, err
+ }
+
+ if fileCounter > 0 {
+ return false, nil
+ }
+
+ for _, subDir := range subDirs {
+ isSubEmpty, subErr := s3a.isDirectoryAllEmpty(filerClient, currentDir, subDir)
+ if subErr != nil {
+ return false, subErr
+ }
+ if !isSubEmpty {
+ return false, nil
+ }
+ }
+
+ glog.V(1).Infof("deleting empty folder %s", currentDir)
+ if err = doDeleteEntry(filerClient, parentDir, name, true, true); err != nil {
+ return
+ }
+
+ return true, nil
+}
diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go
index edf634444..54df29492 100644
--- a/weed/s3api/s3api_server.go
+++ b/weed/s3api/s3api_server.go
@@ -1,30 +1,43 @@
package s3api
import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
+ "net/http"
+ "strings"
+ "time"
+
"github.com/gorilla/mux"
"google.golang.org/grpc"
- "net/http"
)
type S3ApiServerOption struct {
Filer string
+ Port int
FilerGrpcAddress string
+ Config string
DomainName string
BucketsPath string
GrpcDialOption grpc.DialOption
+ AllowEmptyFolder bool
}
type S3ApiServer struct {
option *S3ApiServerOption
+ iam *IdentityAccessManagement
}
func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) {
s3ApiServer = &S3ApiServer{
option: option,
+ iam: NewIdentityAccessManagement(option),
}
s3ApiServer.registerRouter(router)
+ go s3ApiServer.subscribeMetaEvents("s3", filer.IamConfigDirecotry+"/"+filer.IamIdentityFile, time.Now().UnixNano())
+
return s3ApiServer, nil
}
@@ -33,55 +46,70 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
apiRouter := router.PathPrefix("/").Subrouter()
var routers []*mux.Router
if s3a.option.DomainName != "" {
- routers = append(routers, apiRouter.Host("{bucket:.+}."+s3a.option.DomainName).Subrouter())
+ domainNames := strings.Split(s3a.option.DomainName, ",")
+ for _, domainName := range domainNames {
+ routers = append(routers, apiRouter.Host(
+ fmt.Sprintf("%s.%s:%d", "{bucket:.+}", domainName, s3a.option.Port)).Subrouter())
+ routers = append(routers, apiRouter.Host(
+ fmt.Sprintf("%s.%s", "{bucket:.+}", domainName)).Subrouter())
+ }
}
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
for _, bucket := range routers {
// HeadObject
- bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.HeadObjectHandler)
+ bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET"))
// HeadBucket
- bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler)
+ bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN), "GET"))
+ // CopyObjectPart
+ bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
- bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
- bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.NewMultipartUploadHandler).Queries("uploads", "")
+ bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploads", "")
// AbortMultipartUpload
- bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE), "DELETE")).Queries("uploadId", "{uploadId:.*}")
// ListObjectParts
- bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_READ), "GET")).Queries("uploadId", "{uploadId:.*}")
// ListMultipartUploads
- bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "")
+ bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_READ), "GET")).Queries("uploads", "")
+ // GetObjectTagging
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectTaggingHandler, ACTION_READ), "GET")).Queries("tagging", "")
+ // PutObjectTagging
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectTaggingHandler, ACTION_TAGGING), "PUT")).Queries("tagging", "")
+ // DeleteObjectTagging
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING), "DELETE")).Queries("tagging", "")
+
+ // CopyObject
+ bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY"))
// PutObject
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler)
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE), "PUT"))
// PutBucket
- bucket.Methods("PUT").HandlerFunc(s3a.PutBucketHandler)
+ bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN), "PUT"))
// DeleteObject
- bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.DeleteObjectHandler)
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE), "DELETE"))
// DeleteBucket
- bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketHandler)
+ bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE"))
// ListObjectsV2
- bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV2Handler).Queries("list-type", "2")
+ bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_LIST), "LIST")).Queries("list-type", "2")
// GetObject, but directory listing is not supported
- bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectHandler)
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET"))
// ListObjectsV1 (Legacy)
- bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV1Handler)
+ bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST"))
+
+ // PostPolicy
+ bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST"))
// DeleteMultipleObjects
- bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "")
+ bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE), "DELETE")).Queries("delete", "")
/*
- // CopyObject
- bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler)
-
- // CopyObjectPart
- bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// not implemented
// GetBucketLocation
@@ -96,14 +124,12 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
bucket.Methods("PUT").HandlerFunc(s3a.PutBucketPolicyHandler).Queries("policy", "")
// DeleteBucketPolicy
bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketPolicyHandler).Queries("policy", "")
- // PostPolicy
- bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(s3a.PostPolicyBucketHandler)
*/
}
// ListBuckets
- apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.ListBucketsHandler)
+ apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.ListBucketsHandler, "LIST"))
// NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
diff --git a/weed/s3api/s3api_test.go b/weed/s3api/s3api_test.go
new file mode 100644
index 000000000..026766beb
--- /dev/null
+++ b/weed/s3api/s3api_test.go
@@ -0,0 +1,32 @@
+package s3api
+
+import (
+ "testing"
+ "time"
+)
+
+func TestCopyObjectResponse(t *testing.T) {
+
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+
+ response := CopyObjectResult{
+ ETag: "12345678",
+ LastModified: time.Now(),
+ }
+
+ println(string(encodeResponse(response)))
+
+}
+
+func TestCopyPartResponse(t *testing.T) {
+
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+
+ response := CopyPartResult{
+ ETag: "12345678",
+ LastModified: time.Now(),
+ }
+
+ println(string(encodeResponse(response)))
+
+}
diff --git a/weed/s3api/s3err/s3-error.go b/weed/s3api/s3err/s3-error.go
new file mode 100644
index 000000000..224378ec5
--- /dev/null
+++ b/weed/s3api/s3err/s3-error.go
@@ -0,0 +1,61 @@
+package s3err
+
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Non exhaustive list of AWS S3 standard error responses -
+// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+var s3ErrorResponseMap = map[string]string{
+ "AccessDenied": "Access Denied.",
+ "BadDigest": "The Content-Md5 you specified did not match what we received.",
+ "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
+ "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
+ "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
+ "InternalError": "We encountered an internal error, please try again.",
+ "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
+ "InvalidBucketName": "The specified bucket is not valid.",
+ "InvalidDigest": "The Content-Md5 you specified is not valid.",
+ "InvalidRange": "The requested range is not satisfiable",
+ "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
+ "MissingContentLength": "You must provide the Content-Length HTTP header.",
+ "MissingContentMD5": "Missing required header for this request: Content-Md5.",
+ "MissingRequestBodyError": "Request body is empty.",
+ "NoSuchBucket": "The specified bucket does not exist.",
+ "NoSuchBucketPolicy": "The bucket policy does not exist",
+ "NoSuchKey": "The specified key does not exist.",
+ "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
+ "NotImplemented": "A header you provided implies functionality that is not implemented",
+ "PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
+ "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
+ "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
+ "MethodNotAllowed": "The specified method is not allowed against this resource.",
+ "InvalidPart": "One or more of the specified parts could not be found.",
+ "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
+ "InvalidObjectState": "The operation is not valid for the current state of the object.",
+ "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
+ "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
+ "BucketNotEmpty": "The bucket you tried to delete is not empty",
+ "AllAccessDisabled": "All access to this bucket has been disabled.",
+ "MalformedPolicy": "Policy has invalid resource.",
+ "MissingFields": "Missing fields in request.",
+ "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
+ "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
+ "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
+ "InvalidDuration": "Duration provided in the request is invalid.",
+ "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
+ // Add new API errors here.
+}
diff --git a/weed/s3api/s3err/s3api_errors.go b/weed/s3api/s3err/s3api_errors.go
new file mode 100644
index 000000000..a3f7bb25e
--- /dev/null
+++ b/weed/s3api/s3err/s3api_errors.go
@@ -0,0 +1,359 @@
+package s3err
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+)
+
+// APIError structure
+type APIError struct {
+ Code string
+ Description string
+ HTTPStatusCode int
+}
+
+// RESTErrorResponse - error response format
+type RESTErrorResponse struct {
+ XMLName xml.Name `xml:"Error" json:"-"`
+ Code string `xml:"Code" json:"Code"`
+ Message string `xml:"Message" json:"Message"`
+ Resource string `xml:"Resource" json:"Resource"`
+ RequestID string `xml:"RequestId" json:"RequestId"`
+
+ // Underlying HTTP status code for the returned error
+ StatusCode int `xml:"-" json:"-"`
+}
+
+// Error - Returns S3 error string.
+func (e RESTErrorResponse) Error() string {
+ if e.Message == "" {
+ msg, ok := s3ErrorResponseMap[e.Code]
+ if !ok {
+ msg = fmt.Sprintf("Error response code %s.", e.Code)
+ }
+ return msg
+ }
+ return e.Message
+}
+
+// ErrorCode type of error status.
+type ErrorCode int
+
+// Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+const (
+ ErrNone ErrorCode = iota
+ ErrAccessDenied
+ ErrMethodNotAllowed
+ ErrBucketNotEmpty
+ ErrBucketAlreadyExists
+ ErrBucketAlreadyOwnedByYou
+ ErrNoSuchBucket
+ ErrNoSuchKey
+ ErrNoSuchUpload
+ ErrInvalidBucketName
+ ErrInvalidDigest
+ ErrInvalidMaxKeys
+ ErrInvalidMaxUploads
+ ErrInvalidMaxParts
+ ErrInvalidPartNumberMarker
+ ErrInvalidPart
+ ErrInternalError
+ ErrInvalidCopyDest
+ ErrInvalidCopySource
+ ErrInvalidTag
+ ErrAuthHeaderEmpty
+ ErrSignatureVersionNotSupported
+ ErrMalformedPOSTRequest
+ ErrPOSTFileRequired
+ ErrPostPolicyConditionInvalidFormat
+ ErrEntityTooSmall
+ ErrEntityTooLarge
+ ErrMissingFields
+ ErrMissingCredTag
+ ErrCredMalformed
+ ErrMalformedXML
+ ErrMalformedDate
+ ErrMalformedPresignedDate
+ ErrMalformedCredentialDate
+ ErrMissingSignHeadersTag
+ ErrMissingSignTag
+ ErrUnsignedHeaders
+ ErrInvalidQueryParams
+ ErrInvalidQuerySignatureAlgo
+ ErrExpiredPresignRequest
+ ErrMalformedExpires
+ ErrNegativeExpires
+ ErrMaximumExpires
+ ErrSignatureDoesNotMatch
+ ErrContentSHA256Mismatch
+ ErrInvalidAccessKeyID
+ ErrRequestNotReadyYet
+ ErrMissingDateHeader
+ ErrInvalidRequest
+ ErrNotImplemented
+
+ ErrExistingObjectIsDirectory
+)
+
+// error code to APIError structure, these fields carry respective
+// descriptions for all the error responses.
+var errorCodeResponse = map[ErrorCode]APIError{
+ ErrAccessDenied: {
+ Code: "AccessDenied",
+ Description: "Access Denied.",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+ ErrMethodNotAllowed: {
+ Code: "MethodNotAllowed",
+ Description: "The specified method is not allowed against this resource.",
+ HTTPStatusCode: http.StatusMethodNotAllowed,
+ },
+ ErrBucketNotEmpty: {
+ Code: "BucketNotEmpty",
+ Description: "The bucket you tried to delete is not empty",
+ HTTPStatusCode: http.StatusConflict,
+ },
+ ErrBucketAlreadyExists: {
+ Code: "BucketAlreadyExists",
+ Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.",
+ HTTPStatusCode: http.StatusConflict,
+ },
+ ErrBucketAlreadyOwnedByYou: {
+ Code: "BucketAlreadyOwnedByYou",
+ Description: "Your previous request to create the named bucket succeeded and you already own it.",
+ HTTPStatusCode: http.StatusConflict,
+ },
+ ErrInvalidBucketName: {
+ Code: "InvalidBucketName",
+ Description: "The specified bucket is not valid.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidDigest: {
+ Code: "InvalidDigest",
+ Description: "The Content-Md5 you specified is not valid.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidMaxUploads: {
+ Code: "InvalidArgument",
+ Description: "Argument max-uploads must be an integer between 0 and 2147483647",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidMaxKeys: {
+ Code: "InvalidArgument",
+ Description: "Argument maxKeys must be an integer between 0 and 2147483647",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidMaxParts: {
+ Code: "InvalidArgument",
+ Description: "Argument max-parts must be an integer between 0 and 2147483647",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidPartNumberMarker: {
+ Code: "InvalidArgument",
+ Description: "Argument partNumberMarker must be an integer.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrNoSuchBucket: {
+ Code: "NoSuchBucket",
+ Description: "The specified bucket does not exist",
+ HTTPStatusCode: http.StatusNotFound,
+ },
+ ErrNoSuchKey: {
+ Code: "NoSuchKey",
+ Description: "The specified key does not exist.",
+ HTTPStatusCode: http.StatusNotFound,
+ },
+ ErrNoSuchUpload: {
+ Code: "NoSuchUpload",
+ Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
+ HTTPStatusCode: http.StatusNotFound,
+ },
+ ErrInternalError: {
+ Code: "InternalError",
+ Description: "We encountered an internal error, please try again.",
+ HTTPStatusCode: http.StatusInternalServerError,
+ },
+
+ ErrInvalidPart: {
+ Code: "InvalidPart",
+ Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+
+ ErrInvalidCopyDest: {
+ Code: "InvalidRequest",
+ Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidCopySource: {
+ Code: "InvalidArgument",
+ Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidTag: {
+ Code: "InvalidArgument",
+ Description: "The Tag value you have provided is invalid",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMalformedXML: {
+ Code: "MalformedXML",
+ Description: "The XML you provided was not well-formed or did not validate against our published schema.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrAuthHeaderEmpty: {
+ Code: "InvalidArgument",
+ Description: "Authorization header is invalid -- one and only one ' ' (space) required.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrSignatureVersionNotSupported: {
+ Code: "InvalidRequest",
+ Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMalformedPOSTRequest: {
+ Code: "MalformedPOSTRequest",
+ Description: "The body of your POST request is not well-formed multipart/form-data.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrPOSTFileRequired: {
+ Code: "InvalidArgument",
+ Description: "POST requires exactly one file upload per request.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrPostPolicyConditionInvalidFormat: {
+ Code: "PostPolicyInvalidKeyName",
+ Description: "Invalid according to Policy: Policy Condition failed",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+ ErrEntityTooSmall: {
+ Code: "EntityTooSmall",
+ Description: "Your proposed upload is smaller than the minimum allowed object size.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrEntityTooLarge: {
+ Code: "EntityTooLarge",
+ Description: "Your proposed upload exceeds the maximum allowed object size.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingFields: {
+ Code: "MissingFields",
+ Description: "Missing fields in request.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingCredTag: {
+ Code: "InvalidRequest",
+ Description: "Missing Credential field for this request.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrCredMalformed: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMalformedDate: {
+ Code: "MalformedDate",
+ Description: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMalformedPresignedDate: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingSignHeadersTag: {
+ Code: "InvalidArgument",
+ Description: "Signature header missing SignedHeaders field.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingSignTag: {
+ Code: "AccessDenied",
+ Description: "Signature header missing Signature field.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+
+ ErrUnsignedHeaders: {
+ Code: "AccessDenied",
+ Description: "There were headers present in the request which were not signed",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidQueryParams: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidQuerySignatureAlgo: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Algorithm only supports \"AWS4-HMAC-SHA256\".",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrExpiredPresignRequest: {
+ Code: "AccessDenied",
+ Description: "Request has expired",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+ ErrMalformedExpires: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Expires should be a number",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrNegativeExpires: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Expires must be non-negative",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMaximumExpires: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+
+ ErrInvalidAccessKeyID: {
+ Code: "InvalidAccessKeyId",
+ Description: "The access key ID you provided does not exist in our records.",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+
+ ErrRequestNotReadyYet: {
+ Code: "AccessDenied",
+ Description: "Request is not valid yet",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+
+ ErrSignatureDoesNotMatch: {
+ Code: "SignatureDoesNotMatch",
+ Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+
+ ErrContentSHA256Mismatch: {
+ Code: "XAmzContentSHA256Mismatch",
+ Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingDateHeader: {
+ Code: "AccessDenied",
+ Description: "AWS authentication requires a valid Date or x-amz-date header",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidRequest: {
+ Code: "InvalidRequest",
+ Description: "Invalid Request",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrNotImplemented: {
+ Code: "NotImplemented",
+ Description: "A header you provided implies functionality that is not implemented",
+ HTTPStatusCode: http.StatusNotImplemented,
+ },
+ ErrExistingObjectIsDirectory: {
+ Code: "ExistingObjectIsDirectory",
+ Description: "Existing Object is a directory.",
+ HTTPStatusCode: http.StatusConflict,
+ },
+}
+
+// GetAPIError provides API Error for input API error code.
+func GetAPIError(code ErrorCode) APIError {
+ return errorCodeResponse[code]
+}
diff --git a/weed/s3api/stats.go b/weed/s3api/stats.go
new file mode 100644
index 000000000..b667b32a0
--- /dev/null
+++ b/weed/s3api/stats.go
@@ -0,0 +1,38 @@
+package s3api
+
+import (
+ stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "net/http"
+ "strconv"
+ "time"
+)
+
+type StatusRecorder struct {
+ http.ResponseWriter
+ Status int
+}
+
+func NewStatusResponseWriter(w http.ResponseWriter) *StatusRecorder {
+ return &StatusRecorder{w, http.StatusOK}
+}
+
+func (r *StatusRecorder) WriteHeader(status int) {
+ r.Status = status
+ r.ResponseWriter.WriteHeader(status)
+}
+
+func (r *StatusRecorder) Flush() {
+ r.ResponseWriter.(http.Flusher).Flush()
+}
+
+func track(f http.HandlerFunc, action string) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Server", "SeaweedFS S3 "+util.VERSION)
+ recorder := NewStatusResponseWriter(w)
+ start := time.Now()
+ f(recorder, r)
+ stats_collect.S3RequestHistogram.WithLabelValues(action).Observe(time.Since(start).Seconds())
+ stats_collect.S3RequestCounter.WithLabelValues(action, strconv.Itoa(recorder.Status)).Inc()
+ }
+}
diff --git a/weed/s3api/tags.go b/weed/s3api/tags.go
new file mode 100644
index 000000000..9ff7d1fba
--- /dev/null
+++ b/weed/s3api/tags.go
@@ -0,0 +1,38 @@
+package s3api
+
+import (
+ "encoding/xml"
+)
+
+type Tag struct {
+ Key string `xml:"Key"`
+ Value string `xml:"Value"`
+}
+
+type TagSet struct {
+ Tag []Tag `xml:"Tag"`
+}
+
+type Tagging struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging"`
+ TagSet TagSet `xml:"TagSet"`
+}
+
+func (t *Tagging) ToTags() map[string]string {
+ output := make(map[string]string)
+ for _, tag := range t.TagSet.Tag {
+ output[tag.Key] = tag.Value
+ }
+ return output
+}
+
+func FromTags(tags map[string]string) (t *Tagging) {
+ t = &Tagging{}
+ for k, v := range tags {
+ t.TagSet.Tag = append(t.TagSet.Tag, Tag{
+ Key: k,
+ Value: v,
+ })
+ }
+ return
+}
diff --git a/weed/s3api/tags_test.go b/weed/s3api/tags_test.go
new file mode 100644
index 000000000..887843d6f
--- /dev/null
+++ b/weed/s3api/tags_test.go
@@ -0,0 +1,50 @@
+package s3api
+
+import (
+ "encoding/xml"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestXMLUnmarshall(t *testing.T) {
+
+ input := `<?xml version="1.0" encoding="UTF-8"?>
+<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <TagSet>
+ <Tag>
+ <Key>key1</Key>
+ <Value>value1</Value>
+ </Tag>
+ </TagSet>
+</Tagging>
+`
+
+ tags := &Tagging{}
+
+ xml.Unmarshal([]byte(input), tags)
+
+ assert.Equal(t, len(tags.TagSet.Tag), 1)
+ assert.Equal(t, tags.TagSet.Tag[0].Key, "key1")
+ assert.Equal(t, tags.TagSet.Tag[0].Value, "value1")
+
+}
+
+func TestXMLMarshall(t *testing.T) {
+ tags := &Tagging{
+ TagSet: TagSet{
+ []Tag{
+ {
+ Key: "key1",
+ Value: "value1",
+ },
+ },
+ },
+ }
+
+ actual := string(encodeResponse(tags))
+
+ expected := `<?xml version="1.0" encoding="UTF-8"?>
+<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><TagSet><Tag><Key>key1</Key><Value>value1</Value></Tag></TagSet></Tagging>`
+ assert.Equal(t, expected, actual)
+
+}