aboutsummaryrefslogtreecommitdiff
path: root/test/s3
diff options
context:
space:
mode:
Diffstat (limited to 'test/s3')
-rw-r--r--test/s3/fix_s3_tests_bucket_conflicts.py284
-rw-r--r--test/s3/iam/docker-compose-simple.yml2
-rw-r--r--test/s3/iam/docker-compose.test.yml2
-rw-r--r--test/s3/iam/docker-compose.yml2
-rwxr-xr-xtest/s3/iam/run_all_tests.sh14
-rwxr-xr-xtest/s3/iam/run_performance_tests.sh2
-rwxr-xr-xtest/s3/iam/run_stress_tests.sh2
-rw-r--r--test/s3/iam/s3_iam_distributed_test.go4
-rw-r--r--test/s3/iam/s3_iam_framework.go22
-rw-r--r--test/s3/iam/s3_iam_integration_test.go92
-rwxr-xr-xtest/s3/iam/setup_all_tests.sh32
-rwxr-xr-xtest/s3/iam/setup_keycloak.sh64
-rwxr-xr-xtest/s3/iam/setup_keycloak_docker.sh26
-rw-r--r--test/s3/retention/object_lock_reproduce_test.go14
-rw-r--r--test/s3/retention/object_lock_validation_test.go20
-rw-r--r--test/s3/sse/docker-compose.yml2
-rw-r--r--test/s3/sse/s3_sse_multipart_copy_test.go2
-rwxr-xr-xtest/s3/sse/setup_openbao_sse.sh20
-rw-r--r--test/s3/sse/simple_sse_test.go6
-rw-r--r--test/s3/sse/sse_kms_openbao_test.go4
-rw-r--r--test/s3/versioning/s3_bucket_creation_test.go266
-rw-r--r--test/s3/versioning/s3_directory_versioning_test.go2
-rw-r--r--test/s3/versioning/s3_suspended_versioning_test.go257
23 files changed, 971 insertions, 170 deletions
diff --git a/test/s3/fix_s3_tests_bucket_conflicts.py b/test/s3/fix_s3_tests_bucket_conflicts.py
new file mode 100644
index 000000000..9fb71684a
--- /dev/null
+++ b/test/s3/fix_s3_tests_bucket_conflicts.py
@@ -0,0 +1,284 @@
+#!/usr/bin/env python3
+"""
+Patch Ceph s3-tests helpers to avoid bucket name mismatches and make bucket
+creation idempotent when a fixed bucket name is provided.
+
+Why:
+- Some tests call get_new_bucket() to get a name, then call
+ get_new_bucket_resource(name=<that name>) which unconditionally calls
+ CreateBucket again. If the bucket already exists, boto3 raises a
+ ClientError. We want to treat that as idempotent and reuse the bucket.
+- We must NOT silently generate a different bucket name when a name is
+ explicitly provided, otherwise subsequent test steps still reference the
+ original string and read from the wrong (empty) bucket.
+
+What this does:
+- get_new_bucket_resource(name=...):
+ - Try to create the exact bucket name.
+ - If error code is BucketAlreadyOwnedByYou OR BucketAlreadyExists, simply
+ reuse and return the bucket object for that SAME name.
+ - Only when name is None, generate a new unique name with retries.
+- get_new_bucket(client=None, name=None):
+ - If name is None, generate unique names with retries until creation
+ succeeds, and return the actual name string to the caller.
+
+This keeps bucket names consistent across the test helper calls and prevents
+404s or KeyErrors later in the tests that depend on that bucket name.
+"""
+
+import os
+import sys
+
+
+def patch_s3_tests_init_file(file_path: str) -> bool:
+ if not os.path.exists(file_path):
+ print(f"Error: File {file_path} not found")
+ return False
+
+ print(f"Patching {file_path}...")
+ with open(file_path, "r", encoding="utf-8") as f:
+ content = f.read()
+
+ # If already patched, skip
+ if "max_retries = 10" in content and "BucketAlreadyOwnedByYou" in content and "BucketAlreadyExists" in content:
+ print("Already patched. Skipping.")
+ return True
+
+ old_resource_func = '''def get_new_bucket_resource(name=None):
+ """
+ Get a bucket that exists and is empty.
+
+ Always recreates a bucket from scratch. This is useful to also
+ reset ACLs and such.
+ """
+ s3 = boto3.resource('s3',
+ aws_access_key_id=config.main_access_key,
+ aws_secret_access_key=config.main_secret_key,
+ endpoint_url=config.default_endpoint,
+ use_ssl=config.default_is_secure,
+ verify=config.default_ssl_verify)
+ if name is None:
+ name = get_new_bucket_name()
+ bucket = s3.Bucket(name)
+ bucket_location = bucket.create()
+ return bucket'''
+
+ new_resource_func = '''def get_new_bucket_resource(name=None):
+ """
+ Get a bucket that exists and is empty.
+
+ Always recreates a bucket from scratch. This is useful to also
+ reset ACLs and such.
+ """
+ s3 = boto3.resource('s3',
+ aws_access_key_id=config.main_access_key,
+ aws_secret_access_key=config.main_secret_key,
+ endpoint_url=config.default_endpoint,
+ use_ssl=config.default_is_secure,
+ verify=config.default_ssl_verify)
+
+ from botocore.exceptions import ClientError
+
+ # If a name is provided, do not change it. Reuse that exact bucket name.
+ if name is not None:
+ bucket = s3.Bucket(name)
+ try:
+ bucket.create()
+ except ClientError as e:
+ code = e.response.get('Error', {}).get('Code')
+ if code in ('BucketAlreadyOwnedByYou', 'BucketAlreadyExists'):
+ # Treat as idempotent create for an explicitly provided name.
+ # We must not change the name or tests will read from the wrong bucket.
+ return bucket
+ # Other errors should surface
+ raise
+ else:
+ return bucket
+
+ # Only generate unique names when no name was provided
+ max_retries = 10
+ for attempt in range(max_retries):
+ gen_name = get_new_bucket_name()
+ bucket = s3.Bucket(gen_name)
+ try:
+ bucket.create()
+ return bucket
+ except ClientError as e:
+ code = e.response.get('Error', {}).get('Code')
+ if code in ('BucketAlreadyExists', 'BucketAlreadyOwnedByYou'):
+ if attempt == max_retries - 1:
+ raise Exception(f"Failed to create unique bucket after {max_retries} attempts")
+ continue
+ else:
+ raise'''
+
+ old_client_func = '''def get_new_bucket(client=None, name=None):
+ """
+ Get a bucket that exists and is empty.
+
+ Always recreates a bucket from scratch. This is useful to also
+ reset ACLs and such.
+ """
+ if client is None:
+ client = get_client()
+ if name is None:
+ name = get_new_bucket_name()
+
+ client.create_bucket(Bucket=name)
+ return name'''
+
+ new_client_func = '''def get_new_bucket(client=None, name=None):
+ """
+ Get a bucket that exists and is empty.
+
+ Always recreates a bucket from scratch. This is useful to also
+ reset ACLs and such.
+ """
+ if client is None:
+ client = get_client()
+
+ from botocore.exceptions import ClientError
+
+ # If a name is provided, just try to create it once and fall back to idempotent reuse
+ if name is not None:
+ try:
+ client.create_bucket(Bucket=name)
+ except ClientError as e:
+ code = e.response.get('Error', {}).get('Code')
+ if code in ('BucketAlreadyOwnedByYou', 'BucketAlreadyExists'):
+ return name
+ raise
+ else:
+ return name
+
+ # Otherwise, generate a unique name with retries and return the actual name string
+ max_retries = 10
+ for attempt in range(max_retries):
+ gen_name = get_new_bucket_name()
+ try:
+ client.create_bucket(Bucket=gen_name)
+ return gen_name
+ except ClientError as e:
+ code = e.response.get('Error', {}).get('Code')
+ if code in ('BucketAlreadyExists', 'BucketAlreadyOwnedByYou'):
+ if attempt == max_retries - 1:
+ raise Exception(f"Failed to create unique bucket after {max_retries} attempts")
+ continue
+ else:
+ raise'''
+
+ updated = content
+ updated = updated.replace(old_resource_func, new_resource_func)
+ updated = updated.replace(old_client_func, new_client_func)
+
+ if updated == content:
+ print("Patterns not found; appending override implementations to end of file.")
+ append_patch = '''
+
+# --- SeaweedFS override start ---
+from botocore.exceptions import ClientError as _Sw_ClientError
+
+
+# Idempotent create for provided name; generate unique only when no name given
+# Keep the bucket name stable when provided by the caller
+
+def _sw_get_new_bucket_resource(name=None):
+ s3 = boto3.resource('s3',
+ aws_access_key_id=config.main_access_key,
+ aws_secret_access_key=config.main_secret_key,
+ endpoint_url=config.default_endpoint,
+ use_ssl=config.default_is_secure,
+ verify=config.default_ssl_verify)
+ if name is not None:
+ bucket = s3.Bucket(name)
+ try:
+ bucket.create()
+ except _Sw_ClientError as e:
+ code = e.response.get('Error', {}).get('Code')
+ if code in ('BucketAlreadyOwnedByYou', 'BucketAlreadyExists'):
+ return bucket
+ raise
+ else:
+ return bucket
+ # name not provided: generate unique
+ max_retries = 10
+ for attempt in range(max_retries):
+ gen_name = get_new_bucket_name()
+ bucket = s3.Bucket(gen_name)
+ try:
+ bucket.create()
+ return bucket
+ except _Sw_ClientError as e:
+ code = e.response.get('Error', {}).get('Code')
+ if code in ('BucketAlreadyExists', 'BucketAlreadyOwnedByYou'):
+ if attempt == max_retries - 1:
+ raise Exception(f"Failed to create unique bucket after {max_retries} attempts")
+ continue
+ else:
+ raise
+
+
+from botocore.exceptions import ClientError as _Sw2_ClientError
+
+
+def _sw_get_new_bucket(client=None, name=None):
+ if client is None:
+ client = get_client()
+ if name is not None:
+ try:
+ client.create_bucket(Bucket=name)
+ except _Sw2_ClientError as e:
+ code = e.response.get('Error', {}).get('Code')
+ if code in ('BucketAlreadyOwnedByYou', 'BucketAlreadyExists'):
+ return name
+ raise
+ else:
+ return name
+ max_retries = 10
+ for attempt in range(max_retries):
+ gen_name = get_new_bucket_name()
+ try:
+ client.create_bucket(Bucket=gen_name)
+ return gen_name
+ except _Sw2_ClientError as e:
+ code = e.response.get('Error', {}).get('Code')
+ if code in ('BucketAlreadyExists', 'BucketAlreadyOwnedByYou'):
+ if attempt == max_retries - 1:
+ raise Exception(f"Failed to create unique bucket after {max_retries} attempts")
+ continue
+ else:
+ raise
+
+# Override original helper functions
+get_new_bucket_resource = _sw_get_new_bucket_resource
+get_new_bucket = _sw_get_new_bucket
+# --- SeaweedFS override end ---
+'''
+ with open(file_path, "a", encoding="utf-8") as f:
+ f.write(append_patch)
+ print("Appended override implementations.")
+ return True
+
+ with open(file_path, "w", encoding="utf-8") as f:
+ f.write(updated)
+
+ print("Successfully patched s3-tests helpers.")
+ return True
+
+
+def main() -> int:
+ s3_tests_path = os.environ.get("S3_TESTS_PATH", "s3-tests")
+ init_file_path = os.path.join(s3_tests_path, "s3tests_boto3", "functional", "__init__.py")
+ print("Applying s3-tests patch for bucket creation idempotency...")
+ print(f"Target repo path: {s3_tests_path}")
+ if not os.path.exists(s3_tests_path):
+ print(f"Error: s3-tests directory not found at {s3_tests_path}")
+ return 1
+ ok = patch_s3_tests_init_file(init_file_path)
+ return 0 if ok else 1
+
+
+if __name__ == "__main__":
+ sys.exit(main())
+
+
diff --git a/test/s3/iam/docker-compose-simple.yml b/test/s3/iam/docker-compose-simple.yml
index 9e3b91e42..b52a158a3 100644
--- a/test/s3/iam/docker-compose-simple.yml
+++ b/test/s3/iam/docker-compose-simple.yml
@@ -1,5 +1,3 @@
-version: '3.8'
-
services:
# Keycloak Identity Provider
keycloak:
diff --git a/test/s3/iam/docker-compose.test.yml b/test/s3/iam/docker-compose.test.yml
index e759f63dc..bb229cfc3 100644
--- a/test/s3/iam/docker-compose.test.yml
+++ b/test/s3/iam/docker-compose.test.yml
@@ -1,6 +1,4 @@
# Docker Compose for SeaweedFS S3 IAM Integration Tests
-version: '3.8'
-
services:
# SeaweedFS Master
seaweedfs-master:
diff --git a/test/s3/iam/docker-compose.yml b/test/s3/iam/docker-compose.yml
index 9e9c00f6d..fd3e3039f 100644
--- a/test/s3/iam/docker-compose.yml
+++ b/test/s3/iam/docker-compose.yml
@@ -1,5 +1,3 @@
-version: '3.8'
-
services:
# Keycloak Identity Provider
keycloak:
diff --git a/test/s3/iam/run_all_tests.sh b/test/s3/iam/run_all_tests.sh
index f5c2cea59..7bb8ba956 100755
--- a/test/s3/iam/run_all_tests.sh
+++ b/test/s3/iam/run_all_tests.sh
@@ -34,10 +34,10 @@ run_test_category() {
echo -e "${YELLOW}๐Ÿงช Running $description...${NC}"
if go test -v -timeout=$TEST_TIMEOUT -run "$test_pattern" ./...; then
- echo -e "${GREEN}โœ… $description completed successfully${NC}"
+ echo -e "${GREEN}[OK] $description completed successfully${NC}"
return 0
else
- echo -e "${RED}โŒ $description failed${NC}"
+ echo -e "${RED}[FAIL] $description failed${NC}"
return 1
fi
}
@@ -83,10 +83,10 @@ fi
echo -e "\n${BLUE}5. Benchmark Tests${NC}"
TOTAL_CATEGORIES=$((TOTAL_CATEGORIES + 1))
if go test -bench=. -benchmem -timeout=$TEST_TIMEOUT ./...; then
- echo -e "${GREEN}โœ… Benchmark tests completed successfully${NC}"
+ echo -e "${GREEN}[OK] Benchmark tests completed successfully${NC}"
PASSED_CATEGORIES=$((PASSED_CATEGORIES + 1))
else
- echo -e "${RED}โŒ Benchmark tests failed${NC}"
+ echo -e "${RED}[FAIL] Benchmark tests failed${NC}"
fi
# 6. Versioning Stress Tests
@@ -94,10 +94,10 @@ echo -e "\n${BLUE}6. S3 Versioning Stress Tests${NC}"
TOTAL_CATEGORIES=$((TOTAL_CATEGORIES + 1))
if [ -f "../versioning/enable_stress_tests.sh" ]; then
if (cd ../versioning && ./enable_stress_tests.sh); then
- echo -e "${GREEN}โœ… Versioning stress tests completed successfully${NC}"
+ echo -e "${GREEN}[OK] Versioning stress tests completed successfully${NC}"
PASSED_CATEGORIES=$((PASSED_CATEGORIES + 1))
else
- echo -e "${RED}โŒ Versioning stress tests failed${NC}"
+ echo -e "${RED}[FAIL] Versioning stress tests failed${NC}"
fi
else
echo -e "${YELLOW}โš ๏ธ Versioning stress tests not available${NC}"
@@ -114,6 +114,6 @@ if [ $PASSED_CATEGORIES -eq $TOTAL_CATEGORIES ]; then
echo -e "\n${GREEN}๐ŸŽ‰ All test categories passed!${NC}"
exit 0
else
- echo -e "\n${RED}โŒ Some test categories failed${NC}"
+ echo -e "\n${RED}[FAIL] Some test categories failed${NC}"
exit 1
fi
diff --git a/test/s3/iam/run_performance_tests.sh b/test/s3/iam/run_performance_tests.sh
index 293632b2c..e8e8983fb 100755
--- a/test/s3/iam/run_performance_tests.sh
+++ b/test/s3/iam/run_performance_tests.sh
@@ -23,4 +23,4 @@ go test -bench=. -benchmem -timeout=$TEST_TIMEOUT ./...
echo -e "${YELLOW}๐Ÿงช Running performance test suite...${NC}"
go test -v -timeout=$TEST_TIMEOUT -run "TestS3IAMPerformanceTests" ./...
-echo -e "${GREEN}โœ… Performance tests completed${NC}"
+echo -e "${GREEN}[OK] Performance tests completed${NC}"
diff --git a/test/s3/iam/run_stress_tests.sh b/test/s3/iam/run_stress_tests.sh
index a302c4488..d7520012a 100755
--- a/test/s3/iam/run_stress_tests.sh
+++ b/test/s3/iam/run_stress_tests.sh
@@ -33,4 +33,4 @@ for i in $(seq 1 $STRESS_ITERATIONS); do
sleep 2
done
-echo -e "${GREEN}โœ… All stress test iterations completed successfully${NC}"
+echo -e "${GREEN}[OK] All stress test iterations completed successfully${NC}"
diff --git a/test/s3/iam/s3_iam_distributed_test.go b/test/s3/iam/s3_iam_distributed_test.go
index 545a56bcb..fbaf25e9d 100644
--- a/test/s3/iam/s3_iam_distributed_test.go
+++ b/test/s3/iam/s3_iam_distributed_test.go
@@ -243,7 +243,7 @@ func TestS3IAMDistributedTests(t *testing.T) {
// Report results
if len(errorList) == 0 {
- t.Logf("๐ŸŽ‰ All %d concurrent operations completed successfully with retry mechanisms!", totalOperations)
+ t.Logf("All %d concurrent operations completed successfully with retry mechanisms!", totalOperations)
} else {
t.Logf("Concurrent operations summary:")
t.Logf(" Total operations: %d", totalOperations)
@@ -262,7 +262,7 @@ func TestS3IAMDistributedTests(t *testing.T) {
// With proper retry mechanisms, we should expect near-zero failures
// Any remaining errors likely indicate real concurrency issues or system problems
if len(errorList) > 0 {
- t.Errorf("โŒ %d operation(s) failed even after retry mechanisms (%.1f%% failure rate). This indicates potential system issues or race conditions that need investigation.",
+ t.Errorf("%d operation(s) failed even after retry mechanisms (%.1f%% failure rate). This indicates potential system issues or race conditions that need investigation.",
len(errorList), float64(len(errorList))/float64(totalOperations)*100)
}
})
diff --git a/test/s3/iam/s3_iam_framework.go b/test/s3/iam/s3_iam_framework.go
index aee70e4a1..92e880bdc 100644
--- a/test/s3/iam/s3_iam_framework.go
+++ b/test/s3/iam/s3_iam_framework.go
@@ -333,7 +333,7 @@ func (t *BearerTokenTransport) extractPrincipalFromJWT(tokenString string) strin
// This is safe because the actual validation happens server-side
return []byte("dummy-key"), nil
})
-
+
// Even if parsing fails due to signature verification, we might still get claims
if claims, ok := token.Claims.(jwt.MapClaims); ok {
// Try multiple possible claim names for the principal ARN
@@ -348,7 +348,7 @@ func (t *BearerTokenTransport) extractPrincipalFromJWT(tokenString string) strin
}
}
}
-
+
return ""
}
@@ -693,13 +693,25 @@ func (f *S3IAMTestFramework) CreateBucketWithCleanup(s3Client *s3.S3, bucketName
if err != nil {
// If bucket already exists, clean it up first
- if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "BucketAlreadyExists" {
+ if awsErr, ok := err.(awserr.Error); ok && (awsErr.Code() == "BucketAlreadyExists" || awsErr.Code() == "BucketAlreadyOwnedByYou") {
f.t.Logf("Bucket %s already exists, cleaning up first", bucketName)
- // Empty the existing bucket
+ // First try to delete the bucket completely
f.emptyBucket(s3Client, bucketName)
+ _, deleteErr := s3Client.DeleteBucket(&s3.DeleteBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ if deleteErr != nil {
+ f.t.Logf("Warning: Failed to delete existing bucket %s: %v", bucketName, deleteErr)
+ }
- // Don't need to recreate - bucket already exists and is now empty
+ // Now create it fresh
+ _, err = s3Client.CreateBucket(&s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to recreate bucket after cleanup: %v", err)
+ }
} else {
return err
}
diff --git a/test/s3/iam/s3_iam_integration_test.go b/test/s3/iam/s3_iam_integration_test.go
index 5c89bda6f..c7836c4bf 100644
--- a/test/s3/iam/s3_iam_integration_test.go
+++ b/test/s3/iam/s3_iam_integration_test.go
@@ -1,7 +1,6 @@
package iam
import (
- "bytes"
"fmt"
"io"
"strings"
@@ -15,15 +14,11 @@ import (
)
const (
- testEndpoint = "http://localhost:8333"
- testRegion = "us-west-2"
- testBucketPrefix = "test-iam-bucket"
- testObjectKey = "test-object.txt"
- testObjectData = "Hello, SeaweedFS IAM Integration!"
-)
-
-var (
- testBucket = testBucketPrefix
+ testEndpoint = "http://localhost:8333"
+ testRegion = "us-west-2"
+ testBucket = "test-iam-bucket"
+ testObjectKey = "test-object.txt"
+ testObjectData = "Hello, SeaweedFS IAM Integration!"
)
// TestS3IAMAuthentication tests S3 API authentication with IAM JWT tokens
@@ -98,12 +93,14 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
adminClient, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole")
require.NoError(t, err)
- err = framework.CreateBucket(adminClient, testBucket)
+ // Use unique bucket name to avoid collection conflicts
+ bucketName := framework.GenerateUniqueBucketName("test-iam-policy")
+ err = framework.CreateBucket(adminClient, bucketName)
require.NoError(t, err)
// Put test object with admin client
_, err = adminClient.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testObjectKey),
Body: strings.NewReader(testObjectData),
})
@@ -116,7 +113,7 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
// Should be able to read objects
result, err := readOnlyClient.GetObject(&s3.GetObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testObjectKey),
})
require.NoError(t, err)
@@ -128,7 +125,7 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
// Should be able to list objects
listResult, err := readOnlyClient.ListObjects(&s3.ListObjectsInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
})
require.NoError(t, err)
assert.Len(t, listResult.Contents, 1)
@@ -136,7 +133,7 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
// Should NOT be able to put objects
_, err = readOnlyClient.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String("forbidden-object.txt"),
Body: strings.NewReader("This should fail"),
})
@@ -147,7 +144,7 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
// Should NOT be able to delete objects
_, err = readOnlyClient.DeleteObject(&s3.DeleteObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testObjectKey),
})
require.Error(t, err)
@@ -166,7 +163,7 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
testWriteData := "Write-only test data"
_, err = writeOnlyClient.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testWriteKey),
Body: strings.NewReader(testWriteData),
})
@@ -174,14 +171,14 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
// Should be able to delete objects
_, err = writeOnlyClient.DeleteObject(&s3.DeleteObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testWriteKey),
})
require.NoError(t, err)
// Should NOT be able to read objects
_, err = writeOnlyClient.GetObject(&s3.GetObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testObjectKey),
})
require.Error(t, err)
@@ -191,7 +188,7 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
// Should NOT be able to list objects
_, err = writeOnlyClient.ListObjects(&s3.ListObjectsInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
})
require.Error(t, err)
if awsErr, ok := err.(awserr.Error); ok {
@@ -206,7 +203,7 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
// Should be able to put objects
_, err = adminClient.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testAdminKey),
Body: strings.NewReader(testAdminData),
})
@@ -214,7 +211,7 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
// Should be able to read objects
result, err := adminClient.GetObject(&s3.GetObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testAdminKey),
})
require.NoError(t, err)
@@ -226,14 +223,14 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
// Should be able to list objects
listResult, err := adminClient.ListObjects(&s3.ListObjectsInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
})
require.NoError(t, err)
assert.GreaterOrEqual(t, len(listResult.Contents), 1)
// Should be able to delete objects
_, err = adminClient.DeleteObject(&s3.DeleteObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testAdminKey),
})
require.NoError(t, err)
@@ -241,14 +238,14 @@ func TestS3IAMPolicyEnforcement(t *testing.T) {
// Should be able to delete buckets
// First delete remaining objects
_, err = adminClient.DeleteObject(&s3.DeleteObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testObjectKey),
})
require.NoError(t, err)
// Then delete the bucket
_, err = adminClient.DeleteBucket(&s3.DeleteBucketInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
})
require.NoError(t, err)
})
@@ -398,7 +395,9 @@ func TestS3IAMBucketPolicyIntegration(t *testing.T) {
adminClient, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole")
require.NoError(t, err)
- err = framework.CreateBucket(adminClient, testBucket)
+ // Use unique bucket name to avoid collection conflicts
+ bucketName := framework.GenerateUniqueBucketName("test-iam-bucket-policy")
+ err = framework.CreateBucket(adminClient, bucketName)
require.NoError(t, err)
t.Run("bucket_policy_allows_public_read", func(t *testing.T) {
@@ -414,17 +413,17 @@ func TestS3IAMBucketPolicyIntegration(t *testing.T) {
"Resource": ["arn:seaweed:s3:::%s/*"]
}
]
- }`, testBucket)
+ }`, bucketName)
_, err = adminClient.PutBucketPolicy(&s3.PutBucketPolicyInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Policy: aws.String(bucketPolicy),
})
require.NoError(t, err)
// Put test object
_, err = adminClient.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testObjectKey),
Body: strings.NewReader(testObjectData),
})
@@ -435,7 +434,7 @@ func TestS3IAMBucketPolicyIntegration(t *testing.T) {
require.NoError(t, err)
result, err := readOnlyClient.GetObject(&s3.GetObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testObjectKey),
})
require.NoError(t, err)
@@ -459,17 +458,17 @@ func TestS3IAMBucketPolicyIntegration(t *testing.T) {
"Resource": ["arn:seaweed:s3:::%s/*"]
}
]
- }`, testBucket)
+ }`, bucketName)
_, err = adminClient.PutBucketPolicy(&s3.PutBucketPolicyInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Policy: aws.String(bucketPolicy),
})
require.NoError(t, err)
// Verify that the bucket policy was stored successfully by retrieving it
policyResult, err := adminClient.GetBucketPolicy(&s3.GetBucketPolicyInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
})
require.NoError(t, err)
assert.Contains(t, *policyResult.Policy, "s3:DeleteObject")
@@ -483,18 +482,18 @@ func TestS3IAMBucketPolicyIntegration(t *testing.T) {
// Cleanup - delete bucket policy first, then objects and bucket
_, err = adminClient.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
})
require.NoError(t, err)
_, err = adminClient.DeleteObject(&s3.DeleteObjectInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
Key: aws.String(testObjectKey),
})
require.NoError(t, err)
_, err = adminClient.DeleteBucket(&s3.DeleteBucketInput{
- Bucket: aws.String(testBucket),
+ Bucket: aws.String(bucketName),
})
require.NoError(t, err)
}
@@ -527,15 +526,6 @@ func TestS3IAMContextualPolicyEnforcement(t *testing.T) {
})
}
-// Helper function to create test content of specific size
-func createTestContent(size int) *bytes.Reader {
- content := make([]byte, size)
- for i := range content {
- content[i] = byte(i % 256)
- }
- return bytes.NewReader(content)
-}
-
// TestS3IAMPresignedURLIntegration tests presigned URL generation with IAM
func TestS3IAMPresignedURLIntegration(t *testing.T) {
framework := NewS3IAMTestFramework(t)
@@ -546,12 +536,12 @@ func TestS3IAMPresignedURLIntegration(t *testing.T) {
require.NoError(t, err)
// Use static bucket name but with cleanup to handle conflicts
- err = framework.CreateBucketWithCleanup(adminClient, testBucketPrefix)
+ err = framework.CreateBucketWithCleanup(adminClient, testBucket)
require.NoError(t, err)
// Put test object
_, err = adminClient.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(testBucketPrefix),
+ Bucket: aws.String(testBucket),
Key: aws.String(testObjectKey),
Body: strings.NewReader(testObjectData),
})
@@ -573,13 +563,13 @@ func TestS3IAMPresignedURLIntegration(t *testing.T) {
// Test direct object access with JWT Bearer token (recommended approach)
_, err := adminClient.GetObject(&s3.GetObjectInput{
- Bucket: aws.String(testBucketPrefix),
+ Bucket: aws.String(testBucket),
Key: aws.String(testObjectKey),
})
require.NoError(t, err, "Direct object access with JWT Bearer token works correctly")
- t.Log("โœ… JWT Bearer token authentication confirmed working for direct S3 API calls")
- t.Log("โ„น๏ธ Note: Presigned URLs are not supported with JWT Bearer authentication by design")
+ t.Log("JWT Bearer token authentication confirmed working for direct S3 API calls")
+ t.Log("Note: Presigned URLs are not supported with JWT Bearer authentication by design")
})
// Cleanup
diff --git a/test/s3/iam/setup_all_tests.sh b/test/s3/iam/setup_all_tests.sh
index 597d367aa..aaec54691 100755
--- a/test/s3/iam/setup_all_tests.sh
+++ b/test/s3/iam/setup_all_tests.sh
@@ -30,12 +30,12 @@ check_prerequisites() {
done
if [ ${#missing_tools[@]} -gt 0 ]; then
- echo -e "${RED}โŒ Missing required tools: ${missing_tools[*]}${NC}"
+ echo -e "${RED}[FAIL] Missing required tools: ${missing_tools[*]}${NC}"
echo -e "${YELLOW}Please install the missing tools and try again${NC}"
exit 1
fi
- echo -e "${GREEN}โœ… All prerequisites met${NC}"
+ echo -e "${GREEN}[OK] All prerequisites met${NC}"
}
# Set up Keycloak for OIDC testing
@@ -43,11 +43,11 @@ setup_keycloak() {
echo -e "\n${BLUE}1. Setting up Keycloak for OIDC testing...${NC}"
if ! "${SCRIPT_DIR}/setup_keycloak.sh"; then
- echo -e "${RED}โŒ Failed to set up Keycloak${NC}"
+ echo -e "${RED}[FAIL] Failed to set up Keycloak${NC}"
return 1
fi
- echo -e "${GREEN}โœ… Keycloak setup completed${NC}"
+ echo -e "${GREEN}[OK] Keycloak setup completed${NC}"
}
# Set up SeaweedFS test cluster
@@ -58,7 +58,7 @@ setup_seaweedfs_cluster() {
echo -e "${YELLOW}๐Ÿ”ง Building SeaweedFS binary...${NC}"
cd "${SCRIPT_DIR}/../../../" # Go to seaweedfs root
if ! make > /dev/null 2>&1; then
- echo -e "${RED}โŒ Failed to build SeaweedFS binary${NC}"
+ echo -e "${RED}[FAIL] Failed to build SeaweedFS binary${NC}"
return 1
fi
@@ -68,7 +68,7 @@ setup_seaweedfs_cluster() {
echo -e "${YELLOW}๐Ÿงน Cleaning up existing test data...${NC}"
rm -rf test-volume-data/* 2>/dev/null || true
- echo -e "${GREEN}โœ… SeaweedFS cluster setup completed${NC}"
+ echo -e "${GREEN}[OK] SeaweedFS cluster setup completed${NC}"
}
# Set up test data and configurations
@@ -79,18 +79,18 @@ setup_test_configurations() {
if [ ! -f "${SCRIPT_DIR}/iam_config.json" ]; then
echo -e "${YELLOW}โš ๏ธ IAM configuration not found, using default config${NC}"
cp "${SCRIPT_DIR}/iam_config.local.json" "${SCRIPT_DIR}/iam_config.json" 2>/dev/null || {
- echo -e "${RED}โŒ No IAM configuration files found${NC}"
+ echo -e "${RED}[FAIL] No IAM configuration files found${NC}"
return 1
}
fi
# Validate configuration
if ! jq . "${SCRIPT_DIR}/iam_config.json" >/dev/null; then
- echo -e "${RED}โŒ Invalid IAM configuration JSON${NC}"
+ echo -e "${RED}[FAIL] Invalid IAM configuration JSON${NC}"
return 1
fi
- echo -e "${GREEN}โœ… Test configurations set up${NC}"
+ echo -e "${GREEN}[OK] Test configurations set up${NC}"
}
# Verify services are ready
@@ -113,13 +113,13 @@ verify_services() {
done
if [ "$keycloak_ready" = true ]; then
- echo -e "${GREEN}โœ… Keycloak is ready${NC}"
+ echo -e "${GREEN}[OK] Keycloak is ready${NC}"
else
echo -e "${YELLOW}โš ๏ธ Keycloak may not be fully ready yet${NC}"
echo -e "${YELLOW}This is okay - tests will wait for Keycloak when needed${NC}"
fi
- echo -e "${GREEN}โœ… Service verification completed${NC}"
+ echo -e "${GREEN}[OK] Service verification completed${NC}"
}
# Set up environment variables
@@ -145,7 +145,7 @@ export TEST_TIMEOUT=60m
export CGO_ENABLED=0
EOF
- echo -e "${GREEN}โœ… Environment variables set${NC}"
+ echo -e "${GREEN}[OK] Environment variables set${NC}"
}
# Display setup summary
@@ -157,7 +157,7 @@ display_summary() {
echo -e "Test Timeout: ${TEST_TIMEOUT:-60m}"
echo -e "IAM Config: ${SCRIPT_DIR}/iam_config.json"
echo -e ""
- echo -e "${GREEN}โœ… Complete test environment setup finished!${NC}"
+ echo -e "${GREEN}[OK] Complete test environment setup finished!${NC}"
echo -e "${YELLOW}๐Ÿ’ก You can now run tests with: make run-all-tests${NC}"
echo -e "${YELLOW}๐Ÿ’ก Or run specific tests with: go test -v -timeout=60m -run TestName${NC}"
echo -e "${YELLOW}๐Ÿ’ก To stop Keycloak: docker stop keycloak-iam-test${NC}"
@@ -173,21 +173,21 @@ main() {
if setup_keycloak; then
setup_steps+=("keycloak")
else
- echo -e "${RED}โŒ Failed to set up Keycloak${NC}"
+ echo -e "${RED}[FAIL] Failed to set up Keycloak${NC}"
exit 1
fi
if setup_seaweedfs_cluster; then
setup_steps+=("seaweedfs")
else
- echo -e "${RED}โŒ Failed to set up SeaweedFS cluster${NC}"
+ echo -e "${RED}[FAIL] Failed to set up SeaweedFS cluster${NC}"
exit 1
fi
if setup_test_configurations; then
setup_steps+=("config")
else
- echo -e "${RED}โŒ Failed to set up test configurations${NC}"
+ echo -e "${RED}[FAIL] Failed to set up test configurations${NC}"
exit 1
fi
diff --git a/test/s3/iam/setup_keycloak.sh b/test/s3/iam/setup_keycloak.sh
index 5d3cc45d6..14fb08435 100755
--- a/test/s3/iam/setup_keycloak.sh
+++ b/test/s3/iam/setup_keycloak.sh
@@ -54,7 +54,7 @@ ensure_container() {
if [[ -n "$extracted_port" ]]; then
KEYCLOAK_PORT="$extracted_port"
KEYCLOAK_URL="http://localhost:${KEYCLOAK_PORT}"
- echo -e "${GREEN}โœ… Using existing container '${CONTAINER_NAME}' on port ${KEYCLOAK_PORT}${NC}"
+ echo -e "${GREEN}[OK] Using existing container '${CONTAINER_NAME}' on port ${KEYCLOAK_PORT}${NC}"
return 0
fi
fi
@@ -71,11 +71,11 @@ ensure_container() {
KEYCLOAK_URL="http://localhost:${KEYCLOAK_PORT}"
fi
fi
- echo -e "${GREEN}โœ… Using existing container '${CONTAINER_NAME}' on port ${KEYCLOAK_PORT}${NC}"
+ echo -e "${GREEN}[OK] Using existing container '${CONTAINER_NAME}' on port ${KEYCLOAK_PORT}${NC}"
return 0
fi
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
- echo -e "${GREEN}โœ… Using existing container '${CONTAINER_NAME}'${NC}"
+ echo -e "${GREEN}[OK] Using existing container '${CONTAINER_NAME}'${NC}"
return 0
fi
echo -e "${YELLOW}๐Ÿณ Starting Keycloak container (${KEYCLOAK_IMAGE})...${NC}"
@@ -94,16 +94,16 @@ wait_ready() {
echo -e "${YELLOW}โณ Waiting for Keycloak to be ready...${NC}"
for i in $(seq 1 120); do
if curl -sf "${KEYCLOAK_URL}/health/ready" >/dev/null; then
- echo -e "${GREEN}โœ… Keycloak health check passed${NC}"
+ echo -e "${GREEN}[OK] Keycloak health check passed${NC}"
return 0
fi
if curl -sf "${KEYCLOAK_URL}/realms/master" >/dev/null; then
- echo -e "${GREEN}โœ… Keycloak master realm accessible${NC}"
+ echo -e "${GREEN}[OK] Keycloak master realm accessible${NC}"
return 0
fi
sleep 2
done
- echo -e "${RED}โŒ Keycloak did not become ready in time${NC}"
+ echo -e "${RED}[FAIL] Keycloak did not become ready in time${NC}"
exit 1
}
@@ -122,7 +122,7 @@ kcadm() {
done
if [[ "$auth_success" == false ]]; then
- echo -e "${RED}โŒ Failed to authenticate with any known admin password${NC}"
+ echo -e "${RED}[FAIL] Failed to authenticate with any known admin password${NC}"
return 1
fi
@@ -136,17 +136,17 @@ admin_login() {
ensure_realm() {
if kcadm get realms | grep -q "${REALM_NAME}"; then
- echo -e "${GREEN}โœ… Realm '${REALM_NAME}' already exists${NC}"
+ echo -e "${GREEN}[OK] Realm '${REALM_NAME}' already exists${NC}"
else
echo -e "${YELLOW}๐Ÿ“ Creating realm '${REALM_NAME}'...${NC}"
if kcadm create realms -s realm="${REALM_NAME}" -s enabled=true 2>/dev/null; then
- echo -e "${GREEN}โœ… Realm created${NC}"
+ echo -e "${GREEN}[OK] Realm created${NC}"
else
# Check if it exists now (might have been created by another process)
if kcadm get realms | grep -q "${REALM_NAME}"; then
- echo -e "${GREEN}โœ… Realm '${REALM_NAME}' already exists (created concurrently)${NC}"
+ echo -e "${GREEN}[OK] Realm '${REALM_NAME}' already exists (created concurrently)${NC}"
else
- echo -e "${RED}โŒ Failed to create realm '${REALM_NAME}'${NC}"
+ echo -e "${RED}[FAIL] Failed to create realm '${REALM_NAME}'${NC}"
return 1
fi
fi
@@ -157,7 +157,7 @@ ensure_client() {
local id
id=$(kcadm get clients -r "${REALM_NAME}" -q clientId="${CLIENT_ID}" | jq -r '.[0].id // empty')
if [[ -n "${id}" ]]; then
- echo -e "${GREEN}โœ… Client '${CLIENT_ID}' already exists${NC}"
+ echo -e "${GREEN}[OK] Client '${CLIENT_ID}' already exists${NC}"
else
echo -e "${YELLOW}๐Ÿ“ Creating client '${CLIENT_ID}'...${NC}"
kcadm create clients -r "${REALM_NAME}" \
@@ -169,7 +169,7 @@ ensure_client() {
-s standardFlowEnabled=true \
-s implicitFlowEnabled=false \
-s secret="${CLIENT_SECRET}" >/dev/null
- echo -e "${GREEN}โœ… Client created${NC}"
+ echo -e "${GREEN}[OK] Client created${NC}"
fi
# Create and configure role mapper for the client
@@ -179,7 +179,7 @@ ensure_client() {
ensure_role() {
local role="$1"
if kcadm get roles -r "${REALM_NAME}" | jq -r '.[].name' | grep -qx "${role}"; then
- echo -e "${GREEN}โœ… Role '${role}' exists${NC}"
+ echo -e "${GREEN}[OK] Role '${role}' exists${NC}"
else
echo -e "${YELLOW}๐Ÿ“ Creating role '${role}'...${NC}"
kcadm create roles -r "${REALM_NAME}" -s name="${role}" >/dev/null
@@ -201,7 +201,7 @@ ensure_user() {
-s lastName="User" \
-i)
else
- echo -e "${GREEN}โœ… User '${username}' exists${NC}"
+ echo -e "${GREEN}[OK] User '${username}' exists${NC}"
fi
echo -e "${YELLOW}๐Ÿ”‘ Setting password for '${username}'...${NC}"
kcadm set-password -r "${REALM_NAME}" --userid "${uid}" --new-password "${password}" --temporary=false >/dev/null
@@ -214,7 +214,7 @@ assign_role() {
rid=$(kcadm get roles -r "${REALM_NAME}" | jq -r ".[] | select(.name==\"${role}\") | .id")
# Check if role already assigned
if kcadm get "users/${uid}/role-mappings/realm" -r "${REALM_NAME}" | jq -r '.[].name' | grep -qx "${role}"; then
- echo -e "${GREEN}โœ… User '${username}' already has role '${role}'${NC}"
+ echo -e "${GREEN}[OK] User '${username}' already has role '${role}'${NC}"
return 0
fi
echo -e "${YELLOW}โž• Assigning role '${role}' to '${username}'...${NC}"
@@ -229,7 +229,7 @@ configure_role_mapper() {
internal_id=$(kcadm get clients -r "${REALM_NAME}" -q clientId="${CLIENT_ID}" | jq -r '.[0].id // empty')
if [[ -z "${internal_id}" ]]; then
- echo -e "${RED}โŒ Could not find client ${client_id} to configure role mapper${NC}"
+ echo -e "${RED}[FAIL] Could not find client ${client_id} to configure role mapper${NC}"
return 1
fi
@@ -238,7 +238,7 @@ configure_role_mapper() {
existing_mapper=$(kcadm get "clients/${internal_id}/protocol-mappers/models" -r "${REALM_NAME}" | jq -r '.[] | select(.name=="realm roles" and .protocolMapper=="oidc-usermodel-realm-role-mapper") | .id // empty')
if [[ -n "${existing_mapper}" ]]; then
- echo -e "${GREEN}โœ… Realm roles mapper already exists${NC}"
+ echo -e "${GREEN}[OK] Realm roles mapper already exists${NC}"
else
echo -e "${YELLOW}๐Ÿ“ Creating realm roles mapper...${NC}"
@@ -254,11 +254,11 @@ configure_role_mapper() {
-s 'config."access.token.claim"=true' \
-s 'config."claim.name"=roles' \
-s 'config."jsonType.label"=String' >/dev/null || {
- echo -e "${RED}โŒ Failed to create realm roles mapper${NC}"
+ echo -e "${RED}[FAIL] Failed to create realm roles mapper${NC}"
return 1
}
- echo -e "${GREEN}โœ… Realm roles mapper created${NC}"
+ echo -e "${GREEN}[OK] Realm roles mapper created${NC}"
fi
}
@@ -270,7 +270,7 @@ configure_audience_mapper() {
internal_id=$(kcadm get clients -r "${REALM_NAME}" -q clientId="${CLIENT_ID}" | jq -r '.[0].id // empty')
if [[ -z "${internal_id}" ]]; then
- echo -e "${RED}โŒ Could not find client ${CLIENT_ID} to configure audience mapper${NC}"
+ echo -e "${RED}[FAIL] Could not find client ${CLIENT_ID} to configure audience mapper${NC}"
return 1
fi
@@ -279,7 +279,7 @@ configure_audience_mapper() {
existing_mapper=$(kcadm get "clients/${internal_id}/protocol-mappers/models" -r "${REALM_NAME}" | jq -r '.[] | select(.name=="audience-mapper" and .protocolMapper=="oidc-audience-mapper") | .id // empty')
if [[ -n "${existing_mapper}" ]]; then
- echo -e "${GREEN}โœ… Audience mapper already exists${NC}"
+ echo -e "${GREEN}[OK] Audience mapper already exists${NC}"
else
echo -e "${YELLOW}๐Ÿ“ Creating audience mapper...${NC}"
@@ -292,17 +292,17 @@ configure_audience_mapper() {
-s 'config."included.client.audience"='"${CLIENT_ID}" \
-s 'config."id.token.claim"=false' \
-s 'config."access.token.claim"=true' >/dev/null || {
- echo -e "${RED}โŒ Failed to create audience mapper${NC}"
+ echo -e "${RED}[FAIL] Failed to create audience mapper${NC}"
return 1
}
- echo -e "${GREEN}โœ… Audience mapper created${NC}"
+ echo -e "${GREEN}[OK] Audience mapper created${NC}"
fi
}
main() {
- command -v docker >/dev/null || { echo -e "${RED}โŒ Docker is required${NC}"; exit 1; }
- command -v jq >/dev/null || { echo -e "${RED}โŒ jq is required${NC}"; exit 1; }
+ command -v docker >/dev/null || { echo -e "${RED}[FAIL] Docker is required${NC}"; exit 1; }
+ command -v jq >/dev/null || { echo -e "${RED}[FAIL] jq is required${NC}"; exit 1; }
ensure_container
echo "Keycloak URL: ${KEYCLOAK_URL}"
@@ -347,7 +347,7 @@ main() {
-o /tmp/auth_test_response.json)
if [[ "${validation_result: -3}" == "200" ]]; then
- echo -e "${GREEN}โœ… Authentication validation successful${NC}"
+ echo -e "${GREEN}[OK] Authentication validation successful${NC}"
# Extract and decode JWT token to check for roles
local access_token=$(cat /tmp/auth_test_response.json | jq -r '.access_token // empty')
@@ -363,7 +363,7 @@ main() {
local roles=$(echo "${decoded}" | jq -r '.roles // empty' 2>/dev/null || echo "")
if [[ -n "${roles}" && "${roles}" != "null" ]]; then
- echo -e "${GREEN}โœ… JWT token includes roles: ${roles}${NC}"
+ echo -e "${GREEN}[OK] JWT token includes roles: ${roles}${NC}"
else
echo -e "${YELLOW}โš ๏ธ JWT token does not include 'roles' claim${NC}"
echo -e "${YELLOW}Decoded payload sample:${NC}"
@@ -371,14 +371,14 @@ main() {
fi
fi
else
- echo -e "${RED}โŒ Authentication validation failed with HTTP ${validation_result: -3}${NC}"
+ echo -e "${RED}[FAIL] Authentication validation failed with HTTP ${validation_result: -3}${NC}"
echo -e "${YELLOW}Response body:${NC}"
cat /tmp/auth_test_response.json 2>/dev/null || echo "No response body"
echo -e "${YELLOW}This may indicate a setup issue that needs to be resolved${NC}"
fi
rm -f /tmp/auth_test_response.json
- echo -e "${GREEN}โœ… Keycloak test realm '${REALM_NAME}' configured${NC}"
+ echo -e "${GREEN}[OK] Keycloak test realm '${REALM_NAME}' configured${NC}"
}
setup_iam_config() {
@@ -400,7 +400,7 @@ setup_iam_config() {
# Verify source config exists
if [[ ! -f "$config_source" ]]; then
- echo -e "${RED}โŒ Config file $config_source not found in $script_dir${NC}"
+ echo -e "${RED}[FAIL] Config file $config_source not found in $script_dir${NC}"
exit 1
fi
@@ -408,7 +408,7 @@ setup_iam_config() {
cp "$config_source" "iam_config.json"
local detected_issuer=$(cat iam_config.json | jq -r '.providers[] | select(.name=="keycloak") | .config.issuer')
- echo -e "${GREEN}โœ… IAM configuration set successfully${NC}"
+ echo -e "${GREEN}[OK] IAM configuration set successfully${NC}"
echo " - Using config: $config_source"
echo " - Keycloak issuer: $detected_issuer"
}
diff --git a/test/s3/iam/setup_keycloak_docker.sh b/test/s3/iam/setup_keycloak_docker.sh
index e648bb7b6..6dce68abf 100755
--- a/test/s3/iam/setup_keycloak_docker.sh
+++ b/test/s3/iam/setup_keycloak_docker.sh
@@ -19,7 +19,7 @@ timeout 120 bash -c '
echo "Waiting for Keycloak..."
sleep 5
done
- echo "โœ… Keycloak health check passed"
+ echo "[OK] Keycloak health check passed"
' "$KEYCLOAK_URL"
# Download kcadm.sh if not available
@@ -51,14 +51,14 @@ kcadm() {
sleep 5
done
- echo "โŒ Failed to execute kcadm command after $max_retries retries"
+ echo "[FAIL] Failed to execute kcadm command after $max_retries retries"
return 1
}
# Create realm
echo "๐Ÿ“ Creating realm '$REALM_NAME'..."
kcadm create realms -s realm="$REALM_NAME" -s enabled=true || echo "Realm may already exist"
-echo "โœ… Realm created"
+echo "[OK] Realm created"
# Create OIDC client
echo "๐Ÿ“ Creating client '$CLIENT_ID'..."
@@ -74,9 +74,9 @@ CLIENT_UUID=$(kcadm create clients -r "$REALM_NAME" \
-i 2>/dev/null || echo "existing-client")
if [ "$CLIENT_UUID" != "existing-client" ]; then
- echo "โœ… Client created with ID: $CLIENT_UUID"
+ echo "[OK] Client created with ID: $CLIENT_UUID"
else
- echo "โœ… Using existing client"
+ echo "[OK] Using existing client"
CLIENT_UUID=$(kcadm get clients -r "$REALM_NAME" -q clientId="$CLIENT_ID" --fields id --format csv --noquotes | tail -n +2)
fi
@@ -94,8 +94,8 @@ MAPPER_CONFIG='{
}
}'
-kcadm create clients/"$CLIENT_UUID"/protocol-mappers/models -r "$REALM_NAME" -b "$MAPPER_CONFIG" 2>/dev/null || echo "โœ… Role mapper already exists"
-echo "โœ… Realm roles mapper configured"
+kcadm create clients/"$CLIENT_UUID"/protocol-mappers/models -r "$REALM_NAME" -b "$MAPPER_CONFIG" 2>/dev/null || echo "[OK] Role mapper already exists"
+echo "[OK] Realm roles mapper configured"
# Configure audience mapper to ensure JWT tokens have correct audience claim
echo "๐Ÿ”ง Configuring audience mapper for client '$CLIENT_ID'..."
@@ -110,8 +110,8 @@ AUDIENCE_MAPPER_CONFIG='{
}
}'
-kcadm create clients/"$CLIENT_UUID"/protocol-mappers/models -r "$REALM_NAME" -b "$AUDIENCE_MAPPER_CONFIG" 2>/dev/null || echo "โœ… Audience mapper already exists"
-echo "โœ… Audience mapper configured"
+kcadm create clients/"$CLIENT_UUID"/protocol-mappers/models -r "$REALM_NAME" -b "$AUDIENCE_MAPPER_CONFIG" 2>/dev/null || echo "[OK] Audience mapper already exists"
+echo "[OK] Audience mapper configured"
# Create realm roles
echo "๐Ÿ“ Creating realm roles..."
@@ -393,11 +393,11 @@ ACCESS_TOKEN=$(curl -s -X POST "$KEYCLOAK_TOKEN_URL" \
-d "scope=openid profile email" | jq -r '.access_token')
if [ "$ACCESS_TOKEN" = "null" ] || [ -z "$ACCESS_TOKEN" ]; then
- echo "โŒ Failed to obtain access token"
+ echo "[FAIL] Failed to obtain access token"
exit 1
fi
-echo "โœ… Authentication validation successful"
+echo "[OK] Authentication validation successful"
# Decode and check JWT claims
PAYLOAD=$(echo "$ACCESS_TOKEN" | cut -d'.' -f2)
@@ -410,10 +410,10 @@ CLAIMS=$(echo "$PAYLOAD" | base64 -d 2>/dev/null | jq .)
ROLES=$(echo "$CLAIMS" | jq -r '.roles[]?')
if [ -n "$ROLES" ]; then
- echo "โœ… JWT token includes roles: [$(echo "$ROLES" | tr '\n' ',' | sed 's/,$//' | sed 's/,/, /g')]"
+ echo "[OK] JWT token includes roles: [$(echo "$ROLES" | tr '\n' ',' | sed 's/,$//' | sed 's/,/, /g')]"
else
echo "โš ๏ธ No roles found in JWT token"
fi
-echo "โœ… Keycloak test realm '$REALM_NAME' configured for Docker environment"
+echo "[OK] Keycloak test realm '$REALM_NAME' configured for Docker environment"
echo "๐Ÿณ Setup complete! You can now run: docker-compose up -d"
diff --git a/test/s3/retention/object_lock_reproduce_test.go b/test/s3/retention/object_lock_reproduce_test.go
index e92236225..0b59dd832 100644
--- a/test/s3/retention/object_lock_reproduce_test.go
+++ b/test/s3/retention/object_lock_reproduce_test.go
@@ -31,7 +31,7 @@ func TestReproduceObjectLockIssue(t *testing.T) {
if err != nil {
t.Fatalf("Bucket creation failed: %v", err)
}
- t.Logf("โœ… Bucket created successfully")
+ t.Logf("Bucket created successfully")
t.Logf(" Response: %+v", createResp)
// Step 2: Check if Object Lock is actually enabled
@@ -42,19 +42,19 @@ func TestReproduceObjectLockIssue(t *testing.T) {
})
if err != nil {
- t.Logf("โŒ GetObjectLockConfiguration FAILED: %v", err)
+ t.Logf("GetObjectLockConfiguration FAILED: %v", err)
t.Logf(" This demonstrates the issue with header processing!")
t.Logf(" S3 clients expect this call to succeed if Object Lock is supported")
t.Logf(" When this fails, clients conclude that Object Lock is not supported")
// This failure demonstrates the bug - the bucket was created but Object Lock wasn't enabled
- t.Logf("\n๐Ÿ› BUG CONFIRMED:")
+ t.Logf("\nBUG CONFIRMED:")
t.Logf(" - Bucket creation with ObjectLockEnabledForBucket=true succeeded")
t.Logf(" - But GetObjectLockConfiguration fails")
t.Logf(" - This means the x-amz-bucket-object-lock-enabled header was ignored")
} else {
- t.Logf("โœ… GetObjectLockConfiguration succeeded!")
+ t.Logf("GetObjectLockConfiguration succeeded!")
t.Logf(" Response: %+v", objectLockResp)
t.Logf(" Object Lock is properly enabled - this is the expected behavior")
}
@@ -69,7 +69,7 @@ func TestReproduceObjectLockIssue(t *testing.T) {
t.Logf(" Versioning status: %v", versioningResp.Status)
if versioningResp.Status != "Enabled" {
- t.Logf(" โš ๏ธ Versioning should be automatically enabled when Object Lock is enabled")
+ t.Logf(" Versioning should be automatically enabled when Object Lock is enabled")
}
// Cleanup
@@ -100,14 +100,14 @@ func TestNormalBucketCreationStillWorks(t *testing.T) {
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
- t.Logf("โœ… Normal bucket creation works")
+ t.Logf("Normal bucket creation works")
// Object Lock should NOT be enabled
_, err = client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{
Bucket: aws.String(bucketName),
})
require.Error(t, err, "GetObjectLockConfiguration should fail for bucket without Object Lock")
- t.Logf("โœ… GetObjectLockConfiguration correctly fails for normal bucket")
+ t.Logf("GetObjectLockConfiguration correctly fails for normal bucket")
// Cleanup
client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{Bucket: aws.String(bucketName)})
diff --git a/test/s3/retention/object_lock_validation_test.go b/test/s3/retention/object_lock_validation_test.go
index 1480f33d4..4293486e8 100644
--- a/test/s3/retention/object_lock_validation_test.go
+++ b/test/s3/retention/object_lock_validation_test.go
@@ -30,7 +30,7 @@ func TestObjectLockValidation(t *testing.T) {
})
require.NoError(t, err, "Bucket creation should succeed")
defer client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{Bucket: aws.String(bucketName)})
- t.Log(" โœ… Bucket created successfully")
+ t.Log(" Bucket created successfully")
// Step 2: Check if Object Lock is supported (standard S3 client behavior)
t.Log("\n2. Testing Object Lock support detection")
@@ -38,7 +38,7 @@ func TestObjectLockValidation(t *testing.T) {
Bucket: aws.String(bucketName),
})
require.NoError(t, err, "GetObjectLockConfiguration should succeed for Object Lock enabled bucket")
- t.Log(" โœ… GetObjectLockConfiguration succeeded - Object Lock is properly enabled")
+ t.Log(" GetObjectLockConfiguration succeeded - Object Lock is properly enabled")
// Step 3: Verify versioning is enabled (required for Object Lock)
t.Log("\n3. Verifying versioning is automatically enabled")
@@ -47,7 +47,7 @@ func TestObjectLockValidation(t *testing.T) {
})
require.NoError(t, err)
require.Equal(t, types.BucketVersioningStatusEnabled, versioningResp.Status, "Versioning should be automatically enabled")
- t.Log(" โœ… Versioning automatically enabled")
+ t.Log(" Versioning automatically enabled")
// Step 4: Test actual Object Lock functionality
t.Log("\n4. Testing Object Lock retention functionality")
@@ -62,7 +62,7 @@ func TestObjectLockValidation(t *testing.T) {
})
require.NoError(t, err)
require.NotNil(t, putResp.VersionId, "Object should have a version ID")
- t.Log(" โœ… Object created with versioning")
+ t.Log(" Object created with versioning")
// Apply Object Lock retention
retentionUntil := time.Now().Add(24 * time.Hour)
@@ -75,7 +75,7 @@ func TestObjectLockValidation(t *testing.T) {
},
})
require.NoError(t, err, "Setting Object Lock retention should succeed")
- t.Log(" โœ… Object Lock retention applied successfully")
+ t.Log(" Object Lock retention applied successfully")
// Verify retention allows simple DELETE (creates delete marker) but blocks version deletion
// AWS S3 behavior: Simple DELETE (without version ID) is ALWAYS allowed and creates delete marker
@@ -84,7 +84,7 @@ func TestObjectLockValidation(t *testing.T) {
Key: aws.String(key),
})
require.NoError(t, err, "Simple DELETE should succeed and create delete marker (AWS S3 behavior)")
- t.Log(" โœ… Simple DELETE succeeded (creates delete marker - correct AWS behavior)")
+ t.Log(" Simple DELETE succeeded (creates delete marker - correct AWS behavior)")
// Now verify that DELETE with version ID is properly blocked by retention
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
@@ -93,7 +93,7 @@ func TestObjectLockValidation(t *testing.T) {
VersionId: putResp.VersionId,
})
require.Error(t, err, "DELETE with version ID should be blocked by COMPLIANCE retention")
- t.Log(" โœ… Object version is properly protected by retention policy")
+ t.Log(" Object version is properly protected by retention policy")
// Verify we can read the object version (should still work)
// Note: Need to specify version ID since latest version is now a delete marker
@@ -104,14 +104,14 @@ func TestObjectLockValidation(t *testing.T) {
})
require.NoError(t, err, "Reading protected object version should still work")
defer getResp.Body.Close()
- t.Log(" โœ… Protected object can still be read")
+ t.Log(" Protected object can still be read")
- t.Log("\n๐ŸŽ‰ S3 OBJECT LOCK VALIDATION SUCCESSFUL!")
+ t.Log("\nS3 OBJECT LOCK VALIDATION SUCCESSFUL!")
t.Log(" - Bucket creation with Object Lock header works")
t.Log(" - Object Lock support detection works (GetObjectLockConfiguration succeeds)")
t.Log(" - Versioning is automatically enabled")
t.Log(" - Object Lock retention functionality works")
t.Log(" - Objects are properly protected from deletion")
t.Log("")
- t.Log("โœ… S3 clients will now recognize SeaweedFS as supporting Object Lock!")
+ t.Log("S3 clients will now recognize SeaweedFS as supporting Object Lock!")
}
diff --git a/test/s3/sse/docker-compose.yml b/test/s3/sse/docker-compose.yml
index fa4630c6f..448788af4 100644
--- a/test/s3/sse/docker-compose.yml
+++ b/test/s3/sse/docker-compose.yml
@@ -1,5 +1,3 @@
-version: '3.8'
-
services:
# OpenBao server for KMS integration testing
openbao:
diff --git a/test/s3/sse/s3_sse_multipart_copy_test.go b/test/s3/sse/s3_sse_multipart_copy_test.go
index 49e1ac5e5..0b1e4a24b 100644
--- a/test/s3/sse/s3_sse_multipart_copy_test.go
+++ b/test/s3/sse/s3_sse_multipart_copy_test.go
@@ -369,5 +369,5 @@ func verifyEncryptedObject(t *testing.T, ctx context.Context, client *s3.Client,
require.Contains(t, aws.ToString(getResp.SSEKMSKeyId), *kmsKeyID, "SSE-KMS key ID mismatch")
}
- t.Logf("โœ… Successfully verified copied object %s: %d bytes, MD5=%s", objectKey, len(retrievedData), retrievedMD5)
+ t.Logf("Successfully verified copied object %s: %d bytes, MD5=%s", objectKey, len(retrievedData), retrievedMD5)
}
diff --git a/test/s3/sse/setup_openbao_sse.sh b/test/s3/sse/setup_openbao_sse.sh
index 99ea09e63..24034289b 100755
--- a/test/s3/sse/setup_openbao_sse.sh
+++ b/test/s3/sse/setup_openbao_sse.sh
@@ -22,11 +22,11 @@ export VAULT_TOKEN="$OPENBAO_TOKEN"
echo "โณ Waiting for OpenBao to be ready..."
for i in {1..30}; do
if curl -s "$OPENBAO_ADDR/v1/sys/health" > /dev/null 2>&1; then
- echo "โœ… OpenBao is ready!"
+ echo "[OK] OpenBao is ready!"
break
fi
if [ $i -eq 30 ]; then
- echo "โŒ OpenBao failed to start within 60 seconds"
+ echo "[FAIL] OpenBao failed to start within 60 seconds"
exit 1
fi
sleep 2
@@ -78,9 +78,9 @@ for key_info in "${keys[@]}"; do
"$OPENBAO_ADDR/v1/$TRANSIT_PATH/keys/$key_name")
if echo "$verify_response" | grep -q "\"name\":\"$key_name\""; then
- echo " โœ… Key $key_name created successfully"
+ echo " [OK] Key $key_name created successfully"
else
- echo " โŒ Failed to verify key $key_name"
+ echo " [FAIL] Failed to verify key $key_name"
echo " Response: $verify_response"
fi
done
@@ -99,7 +99,7 @@ encrypt_response=$(curl -s -X POST \
if echo "$encrypt_response" | grep -q "ciphertext"; then
ciphertext=$(echo "$encrypt_response" | grep -o '"ciphertext":"[^"]*"' | cut -d'"' -f4)
- echo " โœ… Encryption successful: ${ciphertext:0:50}..."
+ echo " [OK] Encryption successful: ${ciphertext:0:50}..."
# Decrypt to verify
decrypt_response=$(curl -s -X POST \
@@ -112,15 +112,15 @@ if echo "$encrypt_response" | grep -q "ciphertext"; then
decrypted_b64=$(echo "$decrypt_response" | grep -o '"plaintext":"[^"]*"' | cut -d'"' -f4)
decrypted=$(echo "$decrypted_b64" | base64 -d)
if [ "$decrypted" = "$test_plaintext" ]; then
- echo " โœ… Decryption successful: $decrypted"
+ echo " [OK] Decryption successful: $decrypted"
else
- echo " โŒ Decryption failed: expected '$test_plaintext', got '$decrypted'"
+ echo " [FAIL] Decryption failed: expected '$test_plaintext', got '$decrypted'"
fi
else
- echo " โŒ Decryption failed: $decrypt_response"
+ echo " [FAIL] Decryption failed: $decrypt_response"
fi
else
- echo " โŒ Encryption failed: $encrypt_response"
+ echo " [FAIL] Encryption failed: $encrypt_response"
fi
echo ""
@@ -143,4 +143,4 @@ echo " # Check status"
echo " curl $OPENBAO_ADDR/v1/sys/health"
echo ""
-echo "โœ… OpenBao SSE setup complete!"
+echo "[OK] OpenBao SSE setup complete!"
diff --git a/test/s3/sse/simple_sse_test.go b/test/s3/sse/simple_sse_test.go
index 665837f82..2fd8f642b 100644
--- a/test/s3/sse/simple_sse_test.go
+++ b/test/s3/sse/simple_sse_test.go
@@ -79,7 +79,7 @@ func TestSimpleSSECIntegration(t *testing.T) {
SSECustomerKeyMD5: aws.String(keyMD5),
})
require.NoError(t, err, "Failed to upload SSE-C object")
- t.Log("โœ… SSE-C PUT succeeded!")
+ t.Log("SSE-C PUT succeeded!")
})
t.Run("GET with SSE-C", func(t *testing.T) {
@@ -101,7 +101,7 @@ func TestSimpleSSECIntegration(t *testing.T) {
assert.Equal(t, "AES256", aws.ToString(resp.SSECustomerAlgorithm))
assert.Equal(t, keyMD5, aws.ToString(resp.SSECustomerKeyMD5))
- t.Log("โœ… SSE-C GET succeeded and data matches!")
+ t.Log("SSE-C GET succeeded and data matches!")
})
t.Run("GET without key should fail", func(t *testing.T) {
@@ -110,6 +110,6 @@ func TestSimpleSSECIntegration(t *testing.T) {
Key: aws.String(objectKey),
})
assert.Error(t, err, "Should fail to retrieve SSE-C object without key")
- t.Log("โœ… GET without key correctly failed")
+ t.Log("GET without key correctly failed")
})
}
diff --git a/test/s3/sse/sse_kms_openbao_test.go b/test/s3/sse/sse_kms_openbao_test.go
index 6360f6fad..b7606fe6a 100644
--- a/test/s3/sse/sse_kms_openbao_test.go
+++ b/test/s3/sse/sse_kms_openbao_test.go
@@ -169,7 +169,7 @@ func TestSSEKMSOpenBaoAvailability(t *testing.T) {
t.Skipf("OpenBao KMS not available for testing: %v", err)
}
- t.Logf("โœ… OpenBao KMS is available and working")
+ t.Logf("OpenBao KMS is available and working")
// Verify we can retrieve the object
getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
@@ -180,5 +180,5 @@ func TestSSEKMSOpenBaoAvailability(t *testing.T) {
defer getResp.Body.Close()
assert.Equal(t, types.ServerSideEncryptionAwsKms, getResp.ServerSideEncryption)
- t.Logf("โœ… KMS encryption/decryption working correctly")
+ t.Logf("KMS encryption/decryption working correctly")
}
diff --git a/test/s3/versioning/s3_bucket_creation_test.go b/test/s3/versioning/s3_bucket_creation_test.go
new file mode 100644
index 000000000..36bd70ba8
--- /dev/null
+++ b/test/s3/versioning/s3_bucket_creation_test.go
@@ -0,0 +1,266 @@
+package s3api
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TestBucketCreationBehavior tests the S3-compliant bucket creation behavior
+func TestBucketCreationBehavior(t *testing.T) {
+ client := getS3Client(t)
+ ctx := context.Background()
+
+ // Test cases for bucket creation behavior
+ testCases := []struct {
+ name string
+ setupFunc func(t *testing.T, bucketName string) // Setup before test
+ bucketName string
+ objectLockEnabled *bool
+ expectedStatusCode int
+ expectedError string
+ cleanupFunc func(t *testing.T, bucketName string) // Cleanup after test
+ }{
+ {
+ name: "Create new bucket - should succeed",
+ bucketName: "test-new-bucket-" + fmt.Sprintf("%d", time.Now().Unix()),
+ objectLockEnabled: nil,
+ expectedStatusCode: 200,
+ expectedError: "",
+ },
+ {
+ name: "Create existing bucket with same owner - should return BucketAlreadyExists",
+ setupFunc: func(t *testing.T, bucketName string) {
+ // Create bucket first
+ _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ require.NoError(t, err, "Setup: failed to create initial bucket")
+ },
+ bucketName: "test-same-owner-same-settings-" + fmt.Sprintf("%d", time.Now().Unix()),
+ objectLockEnabled: nil,
+ expectedStatusCode: 409, // SeaweedFS now returns BucketAlreadyExists in all cases
+ expectedError: "BucketAlreadyExists",
+ },
+ {
+ name: "Create bucket with same owner but different Object Lock settings - should fail",
+ setupFunc: func(t *testing.T, bucketName string) {
+ // Create bucket without Object Lock first
+ _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ require.NoError(t, err, "Setup: failed to create initial bucket")
+ },
+ bucketName: "test-same-owner-diff-settings-" + fmt.Sprintf("%d", time.Now().Unix()),
+ objectLockEnabled: aws.Bool(true), // Try to enable Object Lock on existing bucket
+ expectedStatusCode: 409,
+ expectedError: "BucketAlreadyExists",
+ },
+ {
+ name: "Create bucket with Object Lock enabled - should succeed",
+ bucketName: "test-object-lock-new-" + fmt.Sprintf("%d", time.Now().Unix()),
+ objectLockEnabled: aws.Bool(true),
+ expectedStatusCode: 200,
+ expectedError: "",
+ },
+ {
+ name: "Create bucket with Object Lock enabled twice - should fail",
+ setupFunc: func(t *testing.T, bucketName string) {
+ // Create bucket with Object Lock first
+ _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ ObjectLockEnabledForBucket: aws.Bool(true),
+ })
+ require.NoError(t, err, "Setup: failed to create initial bucket with Object Lock")
+ },
+ bucketName: "test-object-lock-duplicate-" + fmt.Sprintf("%d", time.Now().Unix()),
+ objectLockEnabled: aws.Bool(true),
+ expectedStatusCode: 409,
+ expectedError: "BucketAlreadyExists",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ // Setup
+ if tc.setupFunc != nil {
+ tc.setupFunc(t, tc.bucketName)
+ }
+
+ // Cleanup function to ensure bucket is deleted after test
+ defer func() {
+ if tc.cleanupFunc != nil {
+ tc.cleanupFunc(t, tc.bucketName)
+ } else {
+ // Default cleanup - delete bucket and all objects
+ cleanupBucketForCreationTest(t, client, tc.bucketName)
+ }
+ }()
+
+ // Execute the test - attempt to create bucket
+ input := &s3.CreateBucketInput{
+ Bucket: aws.String(tc.bucketName),
+ }
+ if tc.objectLockEnabled != nil {
+ input.ObjectLockEnabledForBucket = tc.objectLockEnabled
+ }
+
+ _, err := client.CreateBucket(ctx, input)
+
+ // Verify results
+ if tc.expectedError == "" {
+ // Should succeed
+ assert.NoError(t, err, "Expected bucket creation to succeed")
+ } else {
+ // Should fail with specific error
+ assert.Error(t, err, "Expected bucket creation to fail")
+ if err != nil {
+ assert.Contains(t, err.Error(), tc.expectedError,
+ "Expected error to contain '%s', got: %v", tc.expectedError, err)
+ }
+ }
+ })
+ }
+}
+
+// TestBucketCreationWithDifferentUsers tests bucket creation with different identity contexts
+func TestBucketCreationWithDifferentUsers(t *testing.T) {
+ // This test would require setting up different S3 credentials/identities
+ // For now, we'll skip this as it requires more complex setup
+ t.Skip("Different user testing requires IAM setup - implement when IAM is configured")
+
+ // TODO: Implement when we have proper IAM/user management in test setup
+ // Should test:
+ // 1. User A creates bucket
+ // 2. User B tries to create same bucket -> should fail with BucketAlreadyExists
+}
+
+// TestBucketCreationVersioningInteraction tests interaction between bucket creation and versioning
+func TestBucketCreationVersioningInteraction(t *testing.T) {
+ client := getS3Client(t)
+ ctx := context.Background()
+ bucketName := "test-versioning-interaction-" + fmt.Sprintf("%d", time.Now().Unix())
+
+ defer cleanupBucketForCreationTest(t, client, bucketName)
+
+ // Create bucket with Object Lock (which enables versioning)
+ _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ ObjectLockEnabledForBucket: aws.Bool(true),
+ })
+ require.NoError(t, err, "Failed to create bucket with Object Lock")
+
+ // Verify versioning is enabled
+ versioningOutput, err := client.GetBucketVersioning(ctx, &s3.GetBucketVersioningInput{
+ Bucket: aws.String(bucketName),
+ })
+ require.NoError(t, err, "Failed to get bucket versioning status")
+ assert.Equal(t, types.BucketVersioningStatusEnabled, versioningOutput.Status,
+ "Expected versioning to be enabled when Object Lock is enabled")
+
+ // Try to create the same bucket again - should fail
+ _, err = client.CreateBucket(ctx, &s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ ObjectLockEnabledForBucket: aws.Bool(true),
+ })
+ assert.Error(t, err, "Expected second bucket creation to fail")
+ assert.Contains(t, err.Error(), "BucketAlreadyExists",
+ "Expected BucketAlreadyExists error, got: %v", err)
+}
+
+// TestBucketCreationErrorMessages tests that proper error messages are returned
+func TestBucketCreationErrorMessages(t *testing.T) {
+ client := getS3Client(t)
+ ctx := context.Background()
+ bucketName := "test-error-messages-" + fmt.Sprintf("%d", time.Now().Unix())
+
+ defer cleanupBucketForCreationTest(t, client, bucketName)
+
+ // Create bucket first
+ _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ require.NoError(t, err, "Failed to create initial bucket")
+
+ // Try to create again and check error details
+ _, err = client.CreateBucket(ctx, &s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+
+ require.Error(t, err, "Expected bucket creation to fail")
+
+ // Check that it's the right type of error
+ assert.Contains(t, err.Error(), "BucketAlreadyExists",
+ "Expected BucketAlreadyExists error, got: %v", err)
+}
+
+// cleanupBucketForCreationTest removes a bucket and all its contents
+func cleanupBucketForCreationTest(t *testing.T, client *s3.Client, bucketName string) {
+ ctx := context.Background()
+
+ // List and delete all objects (including versions)
+ listInput := &s3.ListObjectVersionsInput{
+ Bucket: aws.String(bucketName),
+ }
+
+ for {
+ listOutput, err := client.ListObjectVersions(ctx, listInput)
+ if err != nil {
+ // Bucket might not exist, which is fine
+ break
+ }
+
+ if len(listOutput.Versions) == 0 && len(listOutput.DeleteMarkers) == 0 {
+ break
+ }
+
+ // Delete all versions
+ var objectsToDelete []types.ObjectIdentifier
+ for _, version := range listOutput.Versions {
+ objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
+ Key: version.Key,
+ VersionId: version.VersionId,
+ })
+ }
+ for _, marker := range listOutput.DeleteMarkers {
+ objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
+ Key: marker.Key,
+ VersionId: marker.VersionId,
+ })
+ }
+
+ if len(objectsToDelete) > 0 {
+ _, err = client.DeleteObjects(ctx, &s3.DeleteObjectsInput{
+ Bucket: aws.String(bucketName),
+ Delete: &types.Delete{
+ Objects: objectsToDelete,
+ },
+ })
+ if err != nil {
+ t.Logf("Warning: failed to delete objects from bucket %s: %v", bucketName, err)
+ }
+ }
+
+ // Check if there are more objects
+ if !aws.ToBool(listOutput.IsTruncated) {
+ break
+ }
+ listInput.KeyMarker = listOutput.NextKeyMarker
+ listInput.VersionIdMarker = listOutput.NextVersionIdMarker
+ }
+
+ // Delete the bucket
+ _, err := client.DeleteBucket(ctx, &s3.DeleteBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ if err != nil {
+ t.Logf("Warning: failed to delete bucket %s: %v", bucketName, err)
+ }
+}
diff --git a/test/s3/versioning/s3_directory_versioning_test.go b/test/s3/versioning/s3_directory_versioning_test.go
index 096065506..7126c70b0 100644
--- a/test/s3/versioning/s3_directory_versioning_test.go
+++ b/test/s3/versioning/s3_directory_versioning_test.go
@@ -793,7 +793,7 @@ func TestPrefixFilteringLogic(t *testing.T) {
assert.Equal(t, []string{"a", "a/b"}, keys, "Should return both 'a' and 'a/b'")
- t.Logf("โœ… Prefix filtering logic correctly handles edge cases")
+ t.Logf("Prefix filtering logic correctly handles edge cases")
}
// Helper function to setup S3 client
diff --git a/test/s3/versioning/s3_suspended_versioning_test.go b/test/s3/versioning/s3_suspended_versioning_test.go
new file mode 100644
index 000000000..c1e8c7277
--- /dev/null
+++ b/test/s3/versioning/s3_suspended_versioning_test.go
@@ -0,0 +1,257 @@
+package s3api
+
+import (
+ "bytes"
+ "context"
+ "testing"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+)
+
+// TestSuspendedVersioningNullOverwrite tests the scenario where:
+// 1. Create object before versioning is enabled (pre-versioning object)
+// 2. Enable versioning, then suspend it
+// 3. Overwrite the object (should replace the null version, not create duplicate)
+// 4. List versions should show only 1 version with versionId "null"
+//
+// This test corresponds to: test_versioning_obj_plain_null_version_overwrite_suspended
+func TestSuspendedVersioningNullOverwrite(t *testing.T) {
+ ctx := context.Background()
+ client := getS3Client(t)
+
+ // Create bucket
+ bucketName := getNewBucketName()
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ objectKey := "testobjbar"
+
+ // Step 1: Put object before versioning is configured (pre-versioning object)
+ content1 := []byte("foooz")
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(content1),
+ })
+ if err != nil {
+ t.Fatalf("Failed to create pre-versioning object: %v", err)
+ }
+ t.Logf("Created pre-versioning object")
+
+ // Step 2: Enable versioning
+ _, err = client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{
+ Bucket: aws.String(bucketName),
+ VersioningConfiguration: &types.VersioningConfiguration{
+ Status: types.BucketVersioningStatusEnabled,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Failed to enable versioning: %v", err)
+ }
+ t.Logf("Enabled versioning")
+
+ // Step 3: Suspend versioning
+ _, err = client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{
+ Bucket: aws.String(bucketName),
+ VersioningConfiguration: &types.VersioningConfiguration{
+ Status: types.BucketVersioningStatusSuspended,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Failed to suspend versioning: %v", err)
+ }
+ t.Logf("Suspended versioning")
+
+ // Step 4: Overwrite the object during suspended versioning
+ content2 := []byte("zzz")
+ putResp, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(content2),
+ })
+ if err != nil {
+ t.Fatalf("Failed to overwrite object during suspended versioning: %v", err)
+ }
+
+ // Verify no VersionId is returned for suspended versioning
+ if putResp.VersionId != nil {
+ t.Errorf("Suspended versioning should NOT return VersionId, but got: %s", *putResp.VersionId)
+ }
+ t.Logf("Overwrote object during suspended versioning (no VersionId returned as expected)")
+
+ // Step 5: Verify content is updated
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ if err != nil {
+ t.Fatalf("Failed to get object: %v", err)
+ }
+ defer getResp.Body.Close()
+
+ gotContent := new(bytes.Buffer)
+ gotContent.ReadFrom(getResp.Body)
+ if !bytes.Equal(gotContent.Bytes(), content2) {
+ t.Errorf("Expected content %q, got %q", content2, gotContent.Bytes())
+ }
+ t.Logf("Object content is correctly updated to: %q", content2)
+
+ // Step 6: List object versions - should have only 1 version
+ listResp, err := client.ListObjectVersions(ctx, &s3.ListObjectVersionsInput{
+ Bucket: aws.String(bucketName),
+ })
+ if err != nil {
+ t.Fatalf("Failed to list object versions: %v", err)
+ }
+
+ // Count versions (excluding delete markers)
+ versionCount := len(listResp.Versions)
+ deleteMarkerCount := len(listResp.DeleteMarkers)
+
+ t.Logf("List results: %d versions, %d delete markers", versionCount, deleteMarkerCount)
+ for i, v := range listResp.Versions {
+ t.Logf(" Version %d: Key=%s, VersionId=%s, IsLatest=%v, Size=%d",
+ i, *v.Key, *v.VersionId, v.IsLatest, v.Size)
+ }
+
+ // THIS IS THE KEY ASSERTION: Should have exactly 1 version, not 2
+ if versionCount != 1 {
+ t.Errorf("Expected 1 version after suspended versioning overwrite, got %d versions", versionCount)
+ t.Error("BUG: Duplicate null versions detected! The overwrite should have replaced the pre-versioning object.")
+ } else {
+ t.Logf("PASS: Only 1 version found (no duplicate null versions)")
+ }
+
+ if deleteMarkerCount != 0 {
+ t.Errorf("Expected 0 delete markers, got %d", deleteMarkerCount)
+ }
+
+ // Verify the version has versionId "null"
+ if versionCount > 0 {
+ if listResp.Versions[0].VersionId == nil || *listResp.Versions[0].VersionId != "null" {
+ t.Errorf("Expected VersionId to be 'null', got %v", listResp.Versions[0].VersionId)
+ } else {
+ t.Logf("Version ID is 'null' as expected")
+ }
+ }
+
+ // Step 7: Delete the null version
+ _, err = client.DeleteObject(ctx, &s3.DeleteObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ VersionId: aws.String("null"),
+ })
+ if err != nil {
+ t.Fatalf("Failed to delete null version: %v", err)
+ }
+ t.Logf("Deleted null version")
+
+ // Step 8: Verify object no longer exists
+ _, err = client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ if err == nil {
+ t.Error("Expected object to not exist after deleting null version")
+ }
+ t.Logf("Object no longer exists after deleting null version")
+
+ // Step 9: Verify no versions remain
+ listResp, err = client.ListObjectVersions(ctx, &s3.ListObjectVersionsInput{
+ Bucket: aws.String(bucketName),
+ })
+ if err != nil {
+ t.Fatalf("Failed to list object versions: %v", err)
+ }
+
+ if len(listResp.Versions) != 0 || len(listResp.DeleteMarkers) != 0 {
+ t.Errorf("Expected no versions or delete markers, got %d versions and %d delete markers",
+ len(listResp.Versions), len(listResp.DeleteMarkers))
+ } else {
+ t.Logf("No versions remain after deletion")
+ }
+}
+
+// TestEnabledVersioningReturnsVersionId tests that when versioning is ENABLED,
+// every PutObject operation returns a version ID
+//
+// This test corresponds to the create_multiple_versions helper function
+func TestEnabledVersioningReturnsVersionId(t *testing.T) {
+ ctx := context.Background()
+ client := getS3Client(t)
+
+ // Create bucket
+ bucketName := getNewBucketName()
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ objectKey := "testobj"
+
+ // Enable versioning
+ _, err := client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{
+ Bucket: aws.String(bucketName),
+ VersioningConfiguration: &types.VersioningConfiguration{
+ Status: types.BucketVersioningStatusEnabled,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Failed to enable versioning: %v", err)
+ }
+ t.Logf("Enabled versioning")
+
+ // Create multiple versions
+ numVersions := 3
+ versionIds := make([]string, 0, numVersions)
+
+ for i := 0; i < numVersions; i++ {
+ content := []byte("content-" + string(rune('0'+i)))
+ putResp, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(content),
+ })
+ if err != nil {
+ t.Fatalf("Failed to create version %d: %v", i, err)
+ }
+
+ // THIS IS THE KEY ASSERTION: VersionId MUST be returned for enabled versioning
+ if putResp.VersionId == nil {
+ t.Errorf("FAILED: PutObject with enabled versioning MUST return VersionId, but got nil for version %d", i)
+ } else {
+ versionId := *putResp.VersionId
+ if versionId == "" {
+ t.Errorf("FAILED: PutObject returned empty VersionId for version %d", i)
+ } else if versionId == "null" {
+ t.Errorf("FAILED: PutObject with enabled versioning should NOT return 'null' version ID, got: %s", versionId)
+ } else {
+ versionIds = append(versionIds, versionId)
+ t.Logf("Version %d created with VersionId: %s", i, versionId)
+ }
+ }
+ }
+
+ if len(versionIds) != numVersions {
+ t.Errorf("Expected %d version IDs, got %d", numVersions, len(versionIds))
+ }
+
+ // List versions to verify all were created
+ listResp, err := client.ListObjectVersions(ctx, &s3.ListObjectVersionsInput{
+ Bucket: aws.String(bucketName),
+ })
+ if err != nil {
+ t.Fatalf("Failed to list object versions: %v", err)
+ }
+
+ if len(listResp.Versions) != numVersions {
+ t.Errorf("Expected %d versions in list, got %d", numVersions, len(listResp.Versions))
+ } else {
+ t.Logf("All %d versions are listed", numVersions)
+ }
+
+ // Verify all version IDs match
+ for i, v := range listResp.Versions {
+ t.Logf(" Version %d: VersionId=%s, Size=%d, IsLatest=%v", i, *v.VersionId, v.Size, v.IsLatest)
+ }
+}