aboutsummaryrefslogtreecommitdiff
path: root/weed/s3api
diff options
context:
space:
mode:
authorChris Lu <chrislusf@users.noreply.github.com>2025-11-25 15:34:28 -0800
committerGitHub <noreply@github.com>2025-11-25 15:34:28 -0800
commitc156a130b7e1fbd8b63da3b24b494c9fbeb91d30 (patch)
tree9b250e9c2caec47354a9dd02cf01b8d7a983945b /weed/s3api
parent2843cb1255642816dab23562bfa68f98d80aebd1 (diff)
downloadseaweedfs-c156a130b7e1fbd8b63da3b24b494c9fbeb91d30.tar.xz
seaweedfs-c156a130b7e1fbd8b63da3b24b494c9fbeb91d30.zip
S3: Auto create bucket (#7549)
* auto create buckets * only admin users can auto create buckets * Update weed/s3api/s3api_bucket_handlers.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * validate bucket name * refactor * error handling * error * refetch * ensure owner * multiple errors --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Diffstat (limited to 'weed/s3api')
-rw-r--r--weed/s3api/s3api_bucket_handlers.go99
-rw-r--r--weed/s3api/s3api_object_handlers_multipart.go10
-rw-r--r--weed/s3api/s3api_object_handlers_put.go19
3 files changed, 111 insertions, 17 deletions
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index eaff6d442..d73fabd2f 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -244,18 +244,8 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
return
}
- fn := func(entry *filer_pb.Entry) {
- // Reuse currentIdentityId from above (already retrieved from context)
- if currentIdentityId != "" {
- if entry.Extended == nil {
- entry.Extended = make(map[string][]byte)
- }
- entry.Extended[s3_constants.AmzIdentityId] = []byte(currentIdentityId)
- }
- }
-
// create the folder for bucket, but lazily create actual collection
- if err := s3a.mkdir(s3a.option.BucketsPath, bucket, fn); err != nil {
+ if err := s3a.mkdir(s3a.option.BucketsPath, bucket, setBucketOwner(r)); err != nil {
glog.Errorf("PutBucketHandler mkdir: %v", err)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
@@ -565,6 +555,93 @@ func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorC
return s3err.ErrNone
}
+// ErrAutoCreatePermissionDenied is returned when a user lacks permission to auto-create buckets
+var ErrAutoCreatePermissionDenied = errors.New("permission denied - requires Admin permission")
+
+// ErrInvalidBucketName is returned when a bucket name doesn't meet S3 naming requirements
+var ErrInvalidBucketName = errors.New("invalid bucket name")
+
+// setBucketOwner creates a function that sets the bucket owner from the request context
+func setBucketOwner(r *http.Request) func(entry *filer_pb.Entry) {
+ currentIdentityId := s3_constants.GetIdentityNameFromContext(r)
+ return func(entry *filer_pb.Entry) {
+ if currentIdentityId != "" {
+ if entry.Extended == nil {
+ entry.Extended = make(map[string][]byte)
+ }
+ entry.Extended[s3_constants.AmzIdentityId] = []byte(currentIdentityId)
+ }
+ }
+}
+
+// autoCreateBucket creates a bucket if it doesn't exist, setting the owner from the request context
+// Only users with admin permissions are allowed to auto-create buckets
+func (s3a *S3ApiServer) autoCreateBucket(r *http.Request, bucket string) error {
+ // Validate the bucket name before auto-creating
+ if err := s3bucket.VerifyS3BucketName(bucket); err != nil {
+ return fmt.Errorf("auto-create bucket %s: %w", bucket, errors.Join(ErrInvalidBucketName, err))
+ }
+
+ // Check if user has admin permissions
+ if !s3a.isUserAdmin(r) {
+ return fmt.Errorf("auto-create bucket %s: %w", bucket, ErrAutoCreatePermissionDenied)
+ }
+
+ if err := s3a.mkdir(s3a.option.BucketsPath, bucket, setBucketOwner(r)); err != nil {
+ // In case of a race condition where another request created the bucket
+ // in the meantime, check for existence before returning an error.
+ if exist, err2 := s3a.exists(s3a.option.BucketsPath, bucket, true); err2 != nil {
+ glog.Warningf("autoCreateBucket: failed to check existence for bucket %s: %v", bucket, err2)
+ return fmt.Errorf("failed to auto-create bucket %s: %w", bucket, errors.Join(err, err2))
+ } else if exist {
+ // The bucket exists, which is fine. However, we should ensure it has an owner.
+ // If it was created by a concurrent request that didn't set an owner,
+ // we'll set it here to ensure consistency.
+ if entry, getErr := s3a.getEntry(s3a.option.BucketsPath, bucket); getErr == nil {
+ if entry.Extended == nil || len(entry.Extended[s3_constants.AmzIdentityId]) == 0 {
+ // No owner set, assign current admin as owner
+ setBucketOwner(r)(entry)
+ if updateErr := s3a.updateEntry(s3a.option.BucketsPath, entry); updateErr != nil {
+ glog.Warningf("autoCreateBucket: failed to set owner for existing bucket %s: %v", bucket, updateErr)
+ } else {
+ glog.V(1).Infof("Set owner for existing bucket %s (created by concurrent request)", bucket)
+ }
+ }
+ } else {
+ glog.Warningf("autoCreateBucket: failed to get entry for existing bucket %s: %v", bucket, getErr)
+ }
+ return nil
+ }
+ return fmt.Errorf("failed to auto-create bucket %s: %w", bucket, err)
+ }
+
+ // Remove bucket from negative cache after successful creation
+ if s3a.bucketConfigCache != nil {
+ s3a.bucketConfigCache.RemoveNegativeCache(bucket)
+ }
+
+ glog.V(1).Infof("Auto-created bucket %s", bucket)
+ return nil
+}
+
+// handleAutoCreateBucket attempts to auto-create a bucket and writes appropriate error responses
+// Returns true if the bucket was created successfully or already exists, false if an error was written
+func (s3a *S3ApiServer) handleAutoCreateBucket(w http.ResponseWriter, r *http.Request, bucket, handlerName string) bool {
+ if err := s3a.autoCreateBucket(r, bucket); err != nil {
+ glog.Warningf("%s: %v", handlerName, err)
+ // Check for specific errors to return appropriate S3 error codes
+ if errors.Is(err, ErrInvalidBucketName) {
+ s3err.WriteErrorResponse(w, r, s3err.ErrInvalidBucketName)
+ } else if errors.Is(err, ErrAutoCreatePermissionDenied) {
+ s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied)
+ } else {
+ s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
+ }
+ return false
+ }
+ return true
+}
+
func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool {
// Check if user is properly authenticated as admin through IAM system
if s3a.isUserAdmin(r) {
diff --git a/weed/s3api/s3api_object_handlers_multipart.go b/weed/s3api/s3api_object_handlers_multipart.go
index ba9886d66..2d9f8e620 100644
--- a/weed/s3api/s3api_object_handlers_multipart.go
+++ b/weed/s3api/s3api_object_handlers_multipart.go
@@ -33,8 +33,14 @@ const (
func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := s3_constants.GetBucketAndObject(r)
- // Check if bucket exists before creating multipart upload
- if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
+ // Check if bucket exists, and create it if it doesn't (auto-create bucket)
+ if err := s3a.checkBucket(r, bucket); err == s3err.ErrNoSuchBucket {
+ // Auto-create bucket if it doesn't exist (requires Admin permission)
+ if !s3a.handleAutoCreateBucket(w, r, bucket, "NewMultipartUploadHandler") {
+ return
+ }
+ } else if err != s3err.ErrNone {
+ // Other errors (like access denied) should still fail
s3err.WriteErrorResponse(w, r, err)
return
}
diff --git a/weed/s3api/s3api_object_handlers_put.go b/weed/s3api/s3api_object_handlers_put.go
index c618405ca..540a1e512 100644
--- a/weed/s3api/s3api_object_handlers_put.go
+++ b/weed/s3api/s3api_object_handlers_put.go
@@ -135,12 +135,23 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
versioningState, err := s3a.getVersioningState(bucket)
if err != nil {
if errors.Is(err, filer_pb.ErrNotFound) {
- s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
+ // Auto-create bucket if it doesn't exist (requires Admin permission)
+ if !s3a.handleAutoCreateBucket(w, r, bucket, "PutObjectHandler") {
+ return
+ }
+ // Re-fetch versioning state to handle race conditions where
+ // another process might have created the bucket with versioning enabled.
+ versioningState, err = s3a.getVersioningState(bucket)
+ if err != nil {
+ glog.Errorf("Error re-checking versioning status for bucket %s after auto-creation: %v", bucket, err)
+ s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
+ return
+ }
+ } else {
+ glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err)
+ s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
}
- glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err)
- s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
- return
}
versioningEnabled := (versioningState == s3_constants.VersioningEnabled)