aboutsummaryrefslogtreecommitdiff
path: root/weed/s3api/s3api_object_retention.go
diff options
context:
space:
mode:
authorChris Lu <chrislusf@users.noreply.github.com>2025-12-06 21:37:25 -0800
committerGitHub <noreply@github.com>2025-12-06 21:37:25 -0800
commit55f0fbf364ca64ee2016d3fed6b8163936f3155d (patch)
tree00fc09a843db69575bcc03fb69ac14fc392e1ce1 /weed/s3api/s3api_object_retention.go
parent62a83ed4699292d76267b8d6343d1ed968f485f6 (diff)
downloadseaweedfs-55f0fbf364ca64ee2016d3fed6b8163936f3155d.tar.xz
seaweedfs-55f0fbf364ca64ee2016d3fed6b8163936f3155d.zip
s3: optimize DELETE by skipping lock check for buckets without Object Lock (#7642)
This optimization avoids an expensive filer gRPC call for every DELETE operation on buckets that don't have Object Lock enabled. Before this change, enforceObjectLockProtections() would always call getObjectEntry() to fetch object metadata to check for retention/legal hold, even for buckets that never had Object Lock configured. Changes: 1. Add early return in enforceObjectLockProtections() if bucket has no Object Lock config or bucket doesn't exist 2. Add isObjectLockEnabled() helper function to check if a bucket has Object Lock configured 3. Fix validateObjectLockHeaders() to check ObjectLockConfig instead of just versioningEnabled - this ensures object-lock headers are properly rejected on buckets without Object Lock enabled, which aligns with AWS S3 semantics 4. Make bucket creation with Object Lock atomic - set Object Lock config in the same CreateEntry call as bucket creation, preventing race conditions where bucket exists without Object Lock enabled 5. Properly handle Object Lock setup failures during bucket creation - if StoreObjectLockConfigurationInExtended fails, roll back the bucket creation and return an error instead of leaving a bucket without the requested Object Lock configuration This significantly improves DELETE latency for non-Object-Lock buckets, which is the common case (lockCheck time reduced from 1-10ms to ~1µs).
Diffstat (limited to 'weed/s3api/s3api_object_retention.go')
-rw-r--r--weed/s3api/s3api_object_retention.go18
1 files changed, 17 insertions, 1 deletions
diff --git a/weed/s3api/s3api_object_retention.go b/weed/s3api/s3api_object_retention.go
index ef298eb43..328e938c5 100644
--- a/weed/s3api/s3api_object_retention.go
+++ b/weed/s3api/s3api_object_retention.go
@@ -586,10 +586,26 @@ func (s3a *S3ApiServer) evaluateGovernanceBypassRequest(r *http.Request, bucket,
// enforceObjectLockProtections enforces object lock protections for operations
func (s3a *S3ApiServer) enforceObjectLockProtections(request *http.Request, bucket, object, versionId string, governanceBypassAllowed bool) error {
+ // Quick check: if bucket doesn't have Object Lock enabled, skip the expensive entry lookup
+ // This optimization avoids a filer gRPC call for every DELETE operation on buckets without Object Lock
+ objectLockEnabled, err := s3a.isObjectLockEnabled(bucket)
+ if err != nil {
+ if errors.Is(err, filer_pb.ErrNotFound) {
+ // Bucket does not exist, so no protections to enforce
+ return nil
+ }
+ // For other errors, we can't determine lock status, so we should fail.
+ glog.Errorf("enforceObjectLockProtections: failed to check object lock for bucket %s: %v", bucket, err)
+ return err
+ }
+ if !objectLockEnabled {
+ // Object Lock is not enabled on this bucket, no protections to enforce
+ return nil
+ }
+
// Get the object entry to check both retention and legal hold
// For delete operations without versionId, we need to check the latest version
var entry *filer_pb.Entry
- var err error
if versionId != "" {
// Check specific version