aboutsummaryrefslogtreecommitdiff
path: root/weed/s3api/s3api_bucket_handlers.go
diff options
context:
space:
mode:
authorChris Lu <chrislusf@users.noreply.github.com>2025-12-06 21:37:25 -0800
committerGitHub <noreply@github.com>2025-12-06 21:37:25 -0800
commit55f0fbf364ca64ee2016d3fed6b8163936f3155d (patch)
tree00fc09a843db69575bcc03fb69ac14fc392e1ce1 /weed/s3api/s3api_bucket_handlers.go
parent62a83ed4699292d76267b8d6343d1ed968f485f6 (diff)
downloadseaweedfs-55f0fbf364ca64ee2016d3fed6b8163936f3155d.tar.xz
seaweedfs-55f0fbf364ca64ee2016d3fed6b8163936f3155d.zip
s3: optimize DELETE by skipping lock check for buckets without Object Lock (#7642)
This optimization avoids an expensive filer gRPC call for every DELETE operation on buckets that don't have Object Lock enabled. Before this change, enforceObjectLockProtections() would always call getObjectEntry() to fetch object metadata to check for retention/legal hold, even for buckets that never had Object Lock configured. Changes: 1. Add early return in enforceObjectLockProtections() if bucket has no Object Lock config or bucket doesn't exist 2. Add isObjectLockEnabled() helper function to check if a bucket has Object Lock configured 3. Fix validateObjectLockHeaders() to check ObjectLockConfig instead of just versioningEnabled - this ensures object-lock headers are properly rejected on buckets without Object Lock enabled, which aligns with AWS S3 semantics 4. Make bucket creation with Object Lock atomic - set Object Lock config in the same CreateEntry call as bucket creation, preventing race conditions where bucket exists without Object Lock enabled 5. Properly handle Object Lock setup failures during bucket creation - if StoreObjectLockConfigurationInExtended fails, roll back the bucket creation and return an error instead of leaving a bucket without the requested Object Lock configuration This significantly improves DELETE latency for non-Object-Lock buckets, which is the common case (lockCheck time reduced from 1-10ms to ~1µs).
Diffstat (limited to 'weed/s3api/s3api_bucket_handlers.go')
-rw-r--r--weed/s3api/s3api_bucket_handlers.go78
1 files changed, 48 insertions, 30 deletions
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index f0704fe23..a810dfd37 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -244,46 +244,64 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
return
}
- // create the folder for bucket, but lazily create actual collection
- if err := s3a.mkdir(s3a.option.BucketsPath, bucket, setBucketOwner(r)); err != nil {
- glog.Errorf("PutBucketHandler mkdir: %v", err)
- s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
- return
- }
+ // Check for x-amz-bucket-object-lock-enabled header BEFORE creating bucket
+ // This allows us to create the bucket with Object Lock configuration atomically
+ objectLockEnabled := strings.EqualFold(r.Header.Get(s3_constants.AmzBucketObjectLockEnabled), "true")
- // Remove bucket from negative cache after successful creation
- if s3a.bucketConfigCache != nil {
- s3a.bucketConfigCache.RemoveNegativeCache(bucket)
- }
+ // Capture any Object Lock configuration error from within the callback
+ // The mkdir callback doesn't support returning errors, so we capture it here
+ var objectLockSetupError error
- // Check for x-amz-bucket-object-lock-enabled header (S3 standard compliance)
- if objectLockHeaderValue := r.Header.Get(s3_constants.AmzBucketObjectLockEnabled); strings.EqualFold(objectLockHeaderValue, "true") {
- glog.V(3).Infof("PutBucketHandler: enabling Object Lock and Versioning for bucket %s due to x-amz-bucket-object-lock-enabled header", bucket)
+ // Create the folder for bucket with all settings atomically
+ // This ensures Object Lock configuration is set in the same CreateEntry call,
+ // preventing race conditions where the bucket exists without Object Lock enabled
+ if err := s3a.mkdir(s3a.option.BucketsPath, bucket, func(entry *filer_pb.Entry) {
+ // Set bucket owner
+ setBucketOwner(r)(entry)
+
+ // Set Object Lock configuration atomically during bucket creation
+ if objectLockEnabled {
+ glog.V(3).Infof("PutBucketHandler: enabling Object Lock and Versioning for bucket %s atomically", bucket)
+
+ if entry.Extended == nil {
+ entry.Extended = make(map[string][]byte)
+ }
- // Atomically update the configuration of the specified bucket. See the updateBucketConfig
- // function definition for detailed documentation on parameters and behavior.
- errCode := s3a.updateBucketConfig(bucket, func(bucketConfig *BucketConfig) error {
// Enable versioning (required for Object Lock)
- bucketConfig.Versioning = s3_constants.VersioningEnabled
+ entry.Extended[s3_constants.ExtVersioningKey] = []byte(s3_constants.VersioningEnabled)
- // Create basic Object Lock configuration (enabled without default retention)
+ // Create and store Object Lock configuration
objectLockConfig := &ObjectLockConfiguration{
ObjectLockEnabled: s3_constants.ObjectLockEnabled,
}
+ if err := StoreObjectLockConfigurationInExtended(entry, objectLockConfig); err != nil {
+ glog.Errorf("PutBucketHandler: failed to store Object Lock config for bucket %s: %v", bucket, err)
+ objectLockSetupError = err
+ // Note: The entry will still be created, but we'll roll it back below
+ } else {
+ glog.V(3).Infof("PutBucketHandler: set ObjectLockConfig for bucket %s: %+v", bucket, objectLockConfig)
+ }
+ }
+ }); err != nil {
+ glog.Errorf("PutBucketHandler mkdir: %v", err)
+ s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
+ return
+ }
- // Set the cached Object Lock configuration
- bucketConfig.ObjectLockConfig = objectLockConfig
- glog.V(3).Infof("PutBucketHandler: set ObjectLockConfig for bucket %s: %+v", bucket, objectLockConfig)
-
- return nil
- })
-
- if errCode != s3err.ErrNone {
- glog.Errorf("PutBucketHandler: failed to enable Object Lock for bucket %s: %v", bucket, errCode)
- s3err.WriteErrorResponse(w, r, errCode)
- return
+ // If Object Lock setup failed, roll back the bucket creation
+ // This ensures we don't leave a bucket without the requested Object Lock configuration
+ if objectLockSetupError != nil {
+ glog.Errorf("PutBucketHandler: rolling back bucket %s creation due to Object Lock setup failure: %v", bucket, objectLockSetupError)
+ if deleteErr := s3a.rm(s3a.option.BucketsPath, bucket, true, true); deleteErr != nil {
+ glog.Errorf("PutBucketHandler: failed to rollback bucket %s after Object Lock setup failure: %v", bucket, deleteErr)
}
- glog.V(3).Infof("PutBucketHandler: enabled Object Lock and Versioning for bucket %s", bucket)
+ s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
+ return
+ }
+
+ // Remove bucket from negative cache after successful creation
+ if s3a.bucketConfigCache != nil {
+ s3a.bucketConfigCache.RemoveNegativeCache(bucket)
}
w.Header().Set("Location", "/"+bucket)