aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--other/java/client/src/main/proto/filer.proto1
-rw-r--r--test/s3/sse/s3_sse_integration_test.go1089
-rw-r--r--weed/operation/upload_content.go6
-rw-r--r--weed/pb/filer.proto3
-rw-r--r--weed/pb/filer_pb/filer.pb.go21
-rw-r--r--weed/s3api/filer_multipart.go154
-rw-r--r--weed/s3api/policy_engine/types.go5
-rw-r--r--weed/s3api/s3_bucket_encryption.go12
-rw-r--r--weed/s3api/s3_constants/crypto.go32
-rw-r--r--weed/s3api/s3_constants/header.go7
-rw-r--r--weed/s3api/s3_error_utils.go54
-rw-r--r--weed/s3api/s3_sse_c.go9
-rw-r--r--weed/s3api/s3_sse_copy_test.go4
-rw-r--r--weed/s3api/s3_sse_error_test.go2
-rw-r--r--weed/s3api/s3_sse_kms.go241
-rw-r--r--weed/s3api/s3_sse_kms_utils.go99
-rw-r--r--weed/s3api/s3_sse_multipart_test.go6
-rw-r--r--weed/s3api/s3_sse_s3.go78
-rw-r--r--weed/s3api/s3_sse_utils.go42
-rw-r--r--weed/s3api/s3_validation_utils.go75
-rw-r--r--weed/s3api/s3api_bucket_skip_handlers.go24
-rw-r--r--weed/s3api/s3api_copy_size_calculation.go7
-rw-r--r--weed/s3api/s3api_key_rotation.go2
-rw-r--r--weed/s3api/s3api_object_handlers.go38
-rw-r--r--weed/s3api/s3api_object_handlers_copy.go213
-rw-r--r--weed/s3api/s3api_object_handlers_multipart.go62
-rw-r--r--weed/s3api/s3api_object_handlers_postpolicy.go2
-rw-r--r--weed/s3api/s3api_object_handlers_put.go263
-rw-r--r--weed/s3api/s3api_object_retention_test.go2
-rw-r--r--weed/s3api/s3api_put_handlers.go270
-rw-r--r--weed/server/filer_server_handlers_write_upload.go25
31 files changed, 2395 insertions, 453 deletions
diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto
index 66ba15183..8116a6589 100644
--- a/other/java/client/src/main/proto/filer.proto
+++ b/other/java/client/src/main/proto/filer.proto
@@ -146,6 +146,7 @@ enum SSEType {
NONE = 0; // No server-side encryption
SSE_C = 1; // Server-Side Encryption with Customer-Provided Keys
SSE_KMS = 2; // Server-Side Encryption with KMS-Managed Keys
+ SSE_S3 = 3; // Server-Side Encryption with S3-Managed Keys
}
message FileChunk {
diff --git a/test/s3/sse/s3_sse_integration_test.go b/test/s3/sse/s3_sse_integration_test.go
index cf5911f9c..0b3ff8f04 100644
--- a/test/s3/sse/s3_sse_integration_test.go
+++ b/test/s3/sse/s3_sse_integration_test.go
@@ -1176,3 +1176,1092 @@ func BenchmarkSSEKMSThroughput(b *testing.B) {
resp.Body.Close()
}
}
+
+// TestSSES3IntegrationBasic tests basic SSE-S3 upload and download functionality
+func TestSSES3IntegrationBasic(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, "sse-s3-basic")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ testData := []byte("Hello, SSE-S3! This is a test of server-side encryption with S3-managed keys.")
+ objectKey := "test-sse-s3-object.txt"
+
+ t.Run("SSE-S3 Upload", func(t *testing.T) {
+ // Upload object with SSE-S3
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Failed to upload object with SSE-S3")
+ })
+
+ t.Run("SSE-S3 Download", func(t *testing.T) {
+ // Download and verify object
+ resp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download SSE-S3 object")
+
+ // Verify SSE-S3 headers in response
+ assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "Server-side encryption header mismatch")
+
+ // Read and verify content
+ downloadedData, err := io.ReadAll(resp.Body)
+ require.NoError(t, err, "Failed to read downloaded data")
+ resp.Body.Close()
+
+ assertDataEqual(t, testData, downloadedData, "Downloaded data doesn't match original")
+ })
+
+ t.Run("SSE-S3 HEAD Request", func(t *testing.T) {
+ // HEAD request should also return SSE headers
+ resp, err := client.HeadObject(ctx, &s3.HeadObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to HEAD SSE-S3 object")
+
+ // Verify SSE-S3 headers
+ assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "SSE-S3 header missing in HEAD response")
+ })
+}
+
+// TestSSES3IntegrationVariousDataSizes tests SSE-S3 with various data sizes
+func TestSSES3IntegrationVariousDataSizes(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, "sse-s3-sizes")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ // Test various data sizes including edge cases
+ testSizes := []int{
+ 0, // Empty file
+ 1, // Single byte
+ 16, // One AES block
+ 31, // Just under two blocks
+ 32, // Exactly two blocks
+ 100, // Small file
+ 1024, // 1KB
+ 8192, // 8KB
+ 65536, // 64KB
+ 1024 * 1024, // 1MB
+ }
+
+ for _, size := range testSizes {
+ t.Run(fmt.Sprintf("Size_%d_bytes", size), func(t *testing.T) {
+ testData := generateTestData(size)
+ objectKey := fmt.Sprintf("test-sse-s3-%d.dat", size)
+
+ // Upload with SSE-S3
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Failed to upload SSE-S3 object of size %d", size)
+
+ // Download and verify
+ resp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download SSE-S3 object of size %d", size)
+
+ // Verify encryption headers
+ assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "Missing SSE-S3 header for size %d", size)
+
+ // Verify content
+ downloadedData, err := io.ReadAll(resp.Body)
+ require.NoError(t, err, "Failed to read downloaded data for size %d", size)
+ resp.Body.Close()
+
+ assertDataEqual(t, testData, downloadedData, "Data mismatch for size %d", size)
+ })
+ }
+}
+
+// TestSSES3WithUserMetadata tests SSE-S3 with user-defined metadata
+func TestSSES3WithUserMetadata(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, "sse-s3-metadata")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ testData := []byte("SSE-S3 with custom metadata")
+ objectKey := "test-object-with-metadata.txt"
+
+ userMetadata := map[string]string{
+ "author": "test-user",
+ "version": "1.0",
+ "environment": "test",
+ }
+
+ t.Run("Upload with Metadata", func(t *testing.T) {
+ // Upload object with SSE-S3 and user metadata
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ Metadata: userMetadata,
+ })
+ require.NoError(t, err, "Failed to upload object with SSE-S3 and metadata")
+ })
+
+ t.Run("Verify Metadata and Encryption", func(t *testing.T) {
+ // HEAD request to check metadata and encryption
+ resp, err := client.HeadObject(ctx, &s3.HeadObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to HEAD SSE-S3 object with metadata")
+
+ // Verify SSE-S3 headers
+ assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "SSE-S3 header missing with metadata")
+
+ // Verify user metadata
+ for key, expectedValue := range userMetadata {
+ actualValue, exists := resp.Metadata[key]
+ assert.True(t, exists, "Metadata key %s not found", key)
+ assert.Equal(t, expectedValue, actualValue, "Metadata value mismatch for key %s", key)
+ }
+ })
+
+ t.Run("Download and Verify Content", func(t *testing.T) {
+ // Download and verify content
+ resp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download SSE-S3 object with metadata")
+
+ // Verify SSE-S3 headers
+ assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "SSE-S3 header missing in GET response")
+
+ // Verify content
+ downloadedData, err := io.ReadAll(resp.Body)
+ require.NoError(t, err, "Failed to read downloaded data")
+ resp.Body.Close()
+
+ assertDataEqual(t, testData, downloadedData, "Downloaded data doesn't match original")
+ })
+}
+
+// TestSSES3RangeRequests tests SSE-S3 with HTTP range requests
+func TestSSES3RangeRequests(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, "sse-s3-range")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ // Create test data large enough to ensure multipart storage
+ testData := generateTestData(1024 * 1024) // 1MB to ensure multipart chunking
+ objectKey := "test-sse-s3-range.dat"
+
+ // Upload object with SSE-S3
+ _, err = client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Failed to upload SSE-S3 object for range testing")
+
+ testCases := []struct {
+ name string
+ rangeHeader string
+ expectedStart int
+ expectedEnd int
+ }{
+ {"First 100 bytes", "bytes=0-99", 0, 99},
+ {"Middle range", "bytes=100000-199999", 100000, 199999},
+ {"Last 100 bytes", "bytes=1048476-1048575", 1048476, 1048575},
+ {"From offset to end", "bytes=500000-", 500000, len(testData) - 1},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ // Request range
+ resp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Range: aws.String(tc.rangeHeader),
+ })
+ require.NoError(t, err, "Failed to get range %s", tc.rangeHeader)
+
+ // Verify SSE-S3 headers are present in range response
+ assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "SSE-S3 header missing in range response")
+
+ // Read range data
+ rangeData, err := io.ReadAll(resp.Body)
+ require.NoError(t, err, "Failed to read range data")
+ resp.Body.Close()
+
+ // Calculate expected data
+ endIndex := tc.expectedEnd
+ if tc.expectedEnd >= len(testData) {
+ endIndex = len(testData) - 1
+ }
+ expectedData := testData[tc.expectedStart : endIndex+1]
+
+ // Verify range data
+ assertDataEqual(t, expectedData, rangeData, "Range data mismatch for %s", tc.rangeHeader)
+ })
+ }
+}
+
+// TestSSES3BucketDefaultEncryption tests bucket-level default encryption with SSE-S3
+func TestSSES3BucketDefaultEncryption(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, "sse-s3-default")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ t.Run("Set Bucket Default Encryption", func(t *testing.T) {
+ // Set bucket encryption configuration
+ _, err := client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
+ Bucket: aws.String(bucketName),
+ ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{
+ Rules: []types.ServerSideEncryptionRule{
+ {
+ ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{
+ SSEAlgorithm: types.ServerSideEncryptionAes256,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err, "Failed to set bucket default encryption")
+ })
+
+ t.Run("Upload Object Without Encryption Headers", func(t *testing.T) {
+ testData := []byte("This object should be automatically encrypted with SSE-S3 due to bucket default policy.")
+ objectKey := "test-default-encrypted-object.txt"
+
+ // Upload object WITHOUT any encryption headers
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ // No ServerSideEncryption specified - should use bucket default
+ })
+ require.NoError(t, err, "Failed to upload object without encryption headers")
+
+ // Download and verify it was automatically encrypted
+ resp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download object")
+
+ // Verify SSE-S3 headers are present (indicating automatic encryption)
+ assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "Object should have been automatically encrypted with SSE-S3")
+
+ // Verify content is correct (decryption works)
+ downloadedData, err := io.ReadAll(resp.Body)
+ require.NoError(t, err, "Failed to read downloaded data")
+ resp.Body.Close()
+
+ assertDataEqual(t, testData, downloadedData, "Downloaded data doesn't match original")
+ })
+
+ t.Run("Get Bucket Encryption Configuration", func(t *testing.T) {
+ // Verify we can retrieve the bucket encryption configuration
+ resp, err := client.GetBucketEncryption(ctx, &s3.GetBucketEncryptionInput{
+ Bucket: aws.String(bucketName),
+ })
+ require.NoError(t, err, "Failed to get bucket encryption configuration")
+
+ require.Len(t, resp.ServerSideEncryptionConfiguration.Rules, 1, "Should have one encryption rule")
+ rule := resp.ServerSideEncryptionConfiguration.Rules[0]
+ assert.Equal(t, types.ServerSideEncryptionAes256, rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm, "Encryption algorithm should be AES256")
+ })
+
+ t.Run("Delete Bucket Encryption Configuration", func(t *testing.T) {
+ // Remove bucket encryption configuration
+ _, err := client.DeleteBucketEncryption(ctx, &s3.DeleteBucketEncryptionInput{
+ Bucket: aws.String(bucketName),
+ })
+ require.NoError(t, err, "Failed to delete bucket encryption configuration")
+
+ // Verify it's removed by trying to get it (should fail)
+ _, err = client.GetBucketEncryption(ctx, &s3.GetBucketEncryptionInput{
+ Bucket: aws.String(bucketName),
+ })
+ require.Error(t, err, "Getting bucket encryption should fail after deletion")
+ })
+
+ t.Run("Upload After Removing Default Encryption", func(t *testing.T) {
+ testData := []byte("This object should NOT be encrypted after removing bucket default.")
+ objectKey := "test-no-default-encryption.txt"
+
+ // Upload object without encryption headers (should not be encrypted now)
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ })
+ require.NoError(t, err, "Failed to upload object")
+
+ // Verify it's NOT encrypted
+ resp, err := client.HeadObject(ctx, &s3.HeadObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to HEAD object")
+
+ // ServerSideEncryption should be empty/nil when no encryption is applied
+ assert.Empty(t, resp.ServerSideEncryption, "Object should not be encrypted after removing bucket default")
+ })
+}
+
+// TestSSES3MultipartUploads tests SSE-S3 multipart upload functionality
+func TestSSES3MultipartUploads(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"sse-s3-multipart-")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ t.Run("Large_File_Multipart_Upload", func(t *testing.T) {
+ objectKey := "test-sse-s3-multipart-large.dat"
+ // Create 10MB test data to ensure multipart upload
+ testData := generateTestData(10 * 1024 * 1024)
+
+ // Upload with SSE-S3
+ _, err = client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "SSE-S3 multipart upload failed")
+
+ // Verify encryption headers
+ headResp, err := client.HeadObject(ctx, &s3.HeadObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to head object")
+
+ assert.Equal(t, types.ServerSideEncryptionAes256, headResp.ServerSideEncryption, "Expected SSE-S3 encryption")
+
+ // Download and verify content
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download SSE-S3 multipart object")
+ defer getResp.Body.Close()
+
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Failed to read downloaded data")
+
+ assert.Equal(t, testData, downloadedData, "SSE-S3 multipart upload data should match")
+
+ // Test range requests on multipart SSE-S3 object
+ t.Run("Range_Request_On_Multipart", func(t *testing.T) {
+ start := int64(1024 * 1024) // 1MB offset
+ end := int64(2*1024*1024 - 1) // 2MB - 1
+ expectedLength := end - start + 1
+
+ rangeResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Range: aws.String(fmt.Sprintf("bytes=%d-%d", start, end)),
+ })
+ require.NoError(t, err, "Failed to get range from SSE-S3 multipart object")
+ defer rangeResp.Body.Close()
+
+ rangeData, err := io.ReadAll(rangeResp.Body)
+ require.NoError(t, err, "Failed to read range data")
+
+ assert.Equal(t, expectedLength, int64(len(rangeData)), "Range length should match")
+
+ // Verify range content matches original data
+ expectedRange := testData[start : end+1]
+ assert.Equal(t, expectedRange, rangeData, "Range content should match for SSE-S3 multipart object")
+ })
+ })
+
+ t.Run("Explicit_Multipart_Upload_API", func(t *testing.T) {
+ objectKey := "test-sse-s3-explicit-multipart.dat"
+ testData := generateTestData(15 * 1024 * 1024) // 15MB
+
+ // Create multipart upload with SSE-S3
+ createResp, err := client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Failed to create SSE-S3 multipart upload")
+
+ uploadID := *createResp.UploadId
+ var parts []types.CompletedPart
+
+ // Upload parts (5MB each, except the last part)
+ partSize := 5 * 1024 * 1024
+ for i := 0; i < len(testData); i += partSize {
+ partNumber := int32(len(parts) + 1)
+ endIdx := i + partSize
+ if endIdx > len(testData) {
+ endIdx = len(testData)
+ }
+ partData := testData[i:endIdx]
+
+ uploadPartResp, err := client.UploadPart(ctx, &s3.UploadPartInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ PartNumber: aws.Int32(partNumber),
+ UploadId: aws.String(uploadID),
+ Body: bytes.NewReader(partData),
+ })
+ require.NoError(t, err, "Failed to upload part %d", partNumber)
+
+ parts = append(parts, types.CompletedPart{
+ ETag: uploadPartResp.ETag,
+ PartNumber: aws.Int32(partNumber),
+ })
+ }
+
+ // Complete multipart upload
+ _, err = client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ UploadId: aws.String(uploadID),
+ MultipartUpload: &types.CompletedMultipartUpload{
+ Parts: parts,
+ },
+ })
+ require.NoError(t, err, "Failed to complete SSE-S3 multipart upload")
+
+ // Verify the completed object
+ headResp, err := client.HeadObject(ctx, &s3.HeadObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to head completed multipart object")
+
+ assert.Equal(t, types.ServerSideEncryptionAes256, headResp.ServerSideEncryption, "Expected SSE-S3 encryption on completed multipart object")
+
+ // Download and verify content
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download completed SSE-S3 multipart object")
+ defer getResp.Body.Close()
+
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Failed to read downloaded data")
+
+ assert.Equal(t, testData, downloadedData, "Explicit SSE-S3 multipart upload data should match")
+ })
+}
+
+// TestCrossSSECopy tests copying objects between different SSE encryption types
+func TestCrossSSECopy(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"sse-cross-copy-")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ // Test data
+ testData := []byte("Cross-SSE copy test data")
+
+ // Generate proper SSE-C key
+ sseKey := generateSSECKey()
+
+ t.Run("SSE-S3_to_Unencrypted", func(t *testing.T) {
+ sourceKey := "source-sse-s3-obj"
+ destKey := "dest-unencrypted-obj"
+
+ // Upload with SSE-S3
+ _, err = client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(sourceKey),
+ Body: bytes.NewReader(testData),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "SSE-S3 upload failed")
+
+ // Copy to unencrypted
+ _, err = client.CopyObject(ctx, &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)),
+ })
+ require.NoError(t, err, "Copy SSE-S3 to unencrypted failed")
+
+ // Verify destination is unencrypted and content matches
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ })
+ require.NoError(t, err, "GET failed")
+ defer getResp.Body.Close()
+
+ assert.Empty(t, getResp.ServerSideEncryption, "Should be unencrypted")
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Read failed")
+ assertDataEqual(t, testData, downloadedData)
+ })
+
+ t.Run("Unencrypted_to_SSE-S3", func(t *testing.T) {
+ sourceKey := "source-unencrypted-obj"
+ destKey := "dest-sse-s3-obj"
+
+ // Upload unencrypted
+ _, err = client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(sourceKey),
+ Body: bytes.NewReader(testData),
+ })
+ require.NoError(t, err, "Unencrypted upload failed")
+
+ // Copy to SSE-S3
+ _, err = client.CopyObject(ctx, &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Copy unencrypted to SSE-S3 failed")
+
+ // Verify destination is SSE-S3 encrypted and content matches
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ })
+ require.NoError(t, err, "GET failed")
+ defer getResp.Body.Close()
+
+ assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "Expected SSE-S3")
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Read failed")
+ assertDataEqual(t, testData, downloadedData)
+ })
+
+ t.Run("SSE-C_to_SSE-S3", func(t *testing.T) {
+ sourceKey := "source-sse-c-obj"
+ destKey := "dest-sse-s3-obj"
+
+ // Upload with SSE-C
+ _, err = client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(sourceKey),
+ Body: bytes.NewReader(testData),
+ SSECustomerAlgorithm: aws.String("AES256"),
+ SSECustomerKey: aws.String(sseKey.KeyB64),
+ SSECustomerKeyMD5: aws.String(sseKey.KeyMD5),
+ })
+ require.NoError(t, err, "SSE-C upload failed")
+
+ // Copy to SSE-S3
+ _, err = client.CopyObject(ctx, &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)),
+ CopySourceSSECustomerAlgorithm: aws.String("AES256"),
+ CopySourceSSECustomerKey: aws.String(sseKey.KeyB64),
+ CopySourceSSECustomerKeyMD5: aws.String(sseKey.KeyMD5),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Copy SSE-C to SSE-S3 failed")
+
+ // Verify destination encryption and content
+ headResp, err := client.HeadObject(ctx, &s3.HeadObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ })
+ require.NoError(t, err, "HEAD failed")
+ assert.Equal(t, types.ServerSideEncryptionAes256, headResp.ServerSideEncryption, "Expected SSE-S3")
+
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ })
+ require.NoError(t, err, "GET failed")
+ defer getResp.Body.Close()
+
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Read failed")
+ assertDataEqual(t, testData, downloadedData)
+ })
+
+ t.Run("SSE-S3_to_SSE-C", func(t *testing.T) {
+ sourceKey := "source-sse-s3-obj"
+ destKey := "dest-sse-c-obj"
+
+ // Upload with SSE-S3
+ _, err = client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(sourceKey),
+ Body: bytes.NewReader(testData),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Failed to upload SSE-S3 source object")
+
+ // Copy to SSE-C
+ _, err = client.CopyObject(ctx, &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)),
+ SSECustomerAlgorithm: aws.String("AES256"),
+ SSECustomerKey: aws.String(sseKey.KeyB64),
+ SSECustomerKeyMD5: aws.String(sseKey.KeyMD5),
+ })
+ require.NoError(t, err, "Copy SSE-S3 to SSE-C failed")
+
+ // Verify destination encryption and content
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ SSECustomerAlgorithm: aws.String("AES256"),
+ SSECustomerKey: aws.String(sseKey.KeyB64),
+ SSECustomerKeyMD5: aws.String(sseKey.KeyMD5),
+ })
+ require.NoError(t, err, "GET with SSE-C failed")
+ defer getResp.Body.Close()
+
+ assert.Equal(t, "AES256", aws.ToString(getResp.SSECustomerAlgorithm), "Expected SSE-C")
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Read failed")
+ assertDataEqual(t, testData, downloadedData)
+ })
+}
+
+// REGRESSION TESTS FOR CRITICAL BUGS FIXED
+// These tests specifically target the IV storage bugs that were fixed
+
+// TestSSES3IVStorageRegression tests that IVs are properly stored for explicit SSE-S3 uploads
+// This test would have caught the critical bug where IVs were discarded in putToFiler
+func TestSSES3IVStorageRegression(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, "sse-s3-iv-regression")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ t.Run("Explicit SSE-S3 IV Storage and Retrieval", func(t *testing.T) {
+ testData := []byte("This tests the critical IV storage bug that was fixed - the IV must be stored on the key object for decryption to work.")
+ objectKey := "explicit-sse-s3-iv-test.txt"
+
+ // Upload with explicit SSE-S3 header (this used to discard the IV)
+ putResp, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Failed to upload explicit SSE-S3 object")
+
+ // Verify PUT response has SSE-S3 headers
+ assert.Equal(t, types.ServerSideEncryptionAes256, putResp.ServerSideEncryption, "PUT response should indicate SSE-S3")
+
+ // Critical test: Download and decrypt the object
+ // This would have FAILED with the original bug because IV was discarded
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download explicit SSE-S3 object")
+
+ // Verify GET response has SSE-S3 headers
+ assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "GET response should indicate SSE-S3")
+
+ // This is the critical test - verify data can be decrypted correctly
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Failed to read decrypted data")
+ getResp.Body.Close()
+
+ // This assertion would have FAILED with the original bug
+ assertDataEqual(t, testData, downloadedData, "CRITICAL: Decryption failed - IV was not stored properly")
+ })
+
+ t.Run("Multiple Explicit SSE-S3 Objects", func(t *testing.T) {
+ // Test multiple objects to ensure each gets its own unique IV
+ numObjects := 5
+ testDataSet := make([][]byte, numObjects)
+ objectKeys := make([]string, numObjects)
+
+ // Upload multiple objects with explicit SSE-S3
+ for i := 0; i < numObjects; i++ {
+ testDataSet[i] = []byte(fmt.Sprintf("Test data for object %d - verifying unique IV storage", i))
+ objectKeys[i] = fmt.Sprintf("explicit-sse-s3-multi-%d.txt", i)
+
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKeys[i]),
+ Body: bytes.NewReader(testDataSet[i]),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Failed to upload explicit SSE-S3 object %d", i)
+ }
+
+ // Download and verify each object decrypts correctly
+ for i := 0; i < numObjects; i++ {
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKeys[i]),
+ })
+ require.NoError(t, err, "Failed to download explicit SSE-S3 object %d", i)
+
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Failed to read decrypted data for object %d", i)
+ getResp.Body.Close()
+
+ assertDataEqual(t, testDataSet[i], downloadedData, "Decryption failed for object %d - IV not unique/stored", i)
+ }
+ })
+}
+
+// TestSSES3BucketDefaultIVStorageRegression tests bucket default SSE-S3 IV storage
+// This test would have caught the critical bug where IVs were not stored on key objects in bucket defaults
+func TestSSES3BucketDefaultIVStorageRegression(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, "sse-s3-default-iv-regression")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ // Set bucket default encryption to SSE-S3
+ _, err = client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
+ Bucket: aws.String(bucketName),
+ ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{
+ Rules: []types.ServerSideEncryptionRule{
+ {
+ ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{
+ SSEAlgorithm: types.ServerSideEncryptionAes256,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err, "Failed to set bucket default SSE-S3 encryption")
+
+ t.Run("Bucket Default SSE-S3 IV Storage", func(t *testing.T) {
+ testData := []byte("This tests the bucket default SSE-S3 IV storage bug - IV must be stored on key object for decryption.")
+ objectKey := "bucket-default-sse-s3-iv-test.txt"
+
+ // Upload WITHOUT encryption headers - should use bucket default SSE-S3
+ // This used to fail because applySSES3DefaultEncryption didn't store IV on key
+ putResp, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ // No ServerSideEncryption specified - should use bucket default
+ })
+ require.NoError(t, err, "Failed to upload object for bucket default SSE-S3")
+
+ // Verify bucket default encryption was applied
+ assert.Equal(t, types.ServerSideEncryptionAes256, putResp.ServerSideEncryption, "PUT response should show bucket default SSE-S3")
+
+ // Critical test: Download and decrypt the object
+ // This would have FAILED with the original bug because IV wasn't stored on key object
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download bucket default SSE-S3 object")
+
+ // Verify GET response shows SSE-S3 was applied
+ assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "GET response should show SSE-S3")
+
+ // This is the critical test - verify decryption works
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Failed to read decrypted data")
+ getResp.Body.Close()
+
+ // This assertion would have FAILED with the original bucket default bug
+ assertDataEqual(t, testData, downloadedData, "CRITICAL: Bucket default SSE-S3 decryption failed - IV not stored on key object")
+ })
+
+ t.Run("Multiple Bucket Default Objects", func(t *testing.T) {
+ // Test multiple objects with bucket default encryption
+ numObjects := 3
+ testDataSet := make([][]byte, numObjects)
+ objectKeys := make([]string, numObjects)
+
+ // Upload multiple objects without encryption headers
+ for i := 0; i < numObjects; i++ {
+ testDataSet[i] = []byte(fmt.Sprintf("Bucket default test data %d - verifying IV storage works", i))
+ objectKeys[i] = fmt.Sprintf("bucket-default-multi-%d.txt", i)
+
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKeys[i]),
+ Body: bytes.NewReader(testDataSet[i]),
+ // No encryption headers - bucket default should apply
+ })
+ require.NoError(t, err, "Failed to upload bucket default object %d", i)
+ }
+
+ // Verify each object was encrypted and can be decrypted
+ for i := 0; i < numObjects; i++ {
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKeys[i]),
+ })
+ require.NoError(t, err, "Failed to download bucket default object %d", i)
+
+ // Verify SSE-S3 was applied by bucket default
+ assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "Object %d should be SSE-S3 encrypted", i)
+
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Failed to read decrypted data for object %d", i)
+ getResp.Body.Close()
+
+ assertDataEqual(t, testDataSet[i], downloadedData, "Bucket default SSE-S3 decryption failed for object %d", i)
+ }
+ })
+}
+
+// TestSSES3EdgeCaseRegression tests edge cases that could cause IV storage issues
+func TestSSES3EdgeCaseRegression(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, "sse-s3-edge-regression")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ t.Run("Empty Object SSE-S3", func(t *testing.T) {
+ // Test edge case: empty objects with SSE-S3 (IV storage still required)
+ objectKey := "empty-sse-s3-object"
+
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader([]byte{}),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Failed to upload empty SSE-S3 object")
+
+ // Verify empty object can be retrieved (IV must be stored even for empty objects)
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download empty SSE-S3 object")
+
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Failed to read empty decrypted data")
+ getResp.Body.Close()
+
+ assert.Equal(t, []byte{}, downloadedData, "Empty object content mismatch")
+ assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "Empty object should be SSE-S3 encrypted")
+ })
+
+ t.Run("Large Object SSE-S3", func(t *testing.T) {
+ // Test large objects to ensure IV storage works for chunked uploads
+ largeData := generateTestData(1024 * 1024) // 1MB
+ objectKey := "large-sse-s3-object"
+
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(largeData),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ })
+ require.NoError(t, err, "Failed to upload large SSE-S3 object")
+
+ // Verify large object can be decrypted (IV must be stored properly)
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download large SSE-S3 object")
+
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Failed to read large decrypted data")
+ getResp.Body.Close()
+
+ assertDataEqual(t, largeData, downloadedData, "Large object decryption failed - IV storage issue")
+ assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "Large object should be SSE-S3 encrypted")
+ })
+}
+
+// TestSSES3ErrorHandlingRegression tests error handling improvements that were added
+func TestSSES3ErrorHandlingRegression(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, "sse-s3-error-regression")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ t.Run("SSE-S3 With Other Valid Operations", func(t *testing.T) {
+ // Ensure SSE-S3 works with other S3 operations (metadata, tagging, etc.)
+ testData := []byte("Testing SSE-S3 with metadata and other operations")
+ objectKey := "sse-s3-with-metadata"
+
+ // Upload with SSE-S3 and metadata
+ _, err := client.PutObject(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ ServerSideEncryption: types.ServerSideEncryptionAes256,
+ Metadata: map[string]string{
+ "test-key": "test-value",
+ "purpose": "regression-test",
+ },
+ })
+ require.NoError(t, err, "Failed to upload SSE-S3 object with metadata")
+
+ // HEAD request to verify metadata and encryption
+ headResp, err := client.HeadObject(ctx, &s3.HeadObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to HEAD SSE-S3 object")
+
+ assert.Equal(t, types.ServerSideEncryptionAes256, headResp.ServerSideEncryption, "HEAD should show SSE-S3")
+ assert.Equal(t, "test-value", headResp.Metadata["test-key"], "Metadata should be preserved")
+ assert.Equal(t, "regression-test", headResp.Metadata["purpose"], "Metadata should be preserved")
+
+ // GET to verify decryption still works with metadata
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to GET SSE-S3 object")
+
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Failed to read decrypted data")
+ getResp.Body.Close()
+
+ assertDataEqual(t, testData, downloadedData, "SSE-S3 with metadata decryption failed")
+ assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "GET should show SSE-S3")
+ assert.Equal(t, "test-value", getResp.Metadata["test-key"], "GET metadata should be preserved")
+ })
+}
+
+// TestSSES3FunctionalityCompletion tests that SSE-S3 feature is now fully functional
+func TestSSES3FunctionalityCompletion(t *testing.T) {
+ ctx := context.Background()
+ client, err := createS3Client(ctx, defaultConfig)
+ require.NoError(t, err, "Failed to create S3 client")
+
+ bucketName, err := createTestBucket(ctx, client, "sse-s3-completion")
+ require.NoError(t, err, "Failed to create test bucket")
+ defer cleanupTestBucket(ctx, client, bucketName)
+
+ t.Run("All SSE-S3 Scenarios Work", func(t *testing.T) {
+ scenarios := []struct {
+ name string
+ setupBucket func() error
+ encryption *types.ServerSideEncryption
+ expectSSES3 bool
+ }{
+ {
+ name: "Explicit SSE-S3 Header",
+ setupBucket: func() error { return nil },
+ encryption: &[]types.ServerSideEncryption{types.ServerSideEncryptionAes256}[0],
+ expectSSES3: true,
+ },
+ {
+ name: "Bucket Default SSE-S3",
+ setupBucket: func() error {
+ _, err := client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
+ Bucket: aws.String(bucketName),
+ ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{
+ Rules: []types.ServerSideEncryptionRule{
+ {
+ ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{
+ SSEAlgorithm: types.ServerSideEncryptionAes256,
+ },
+ },
+ },
+ },
+ })
+ return err
+ },
+ encryption: nil,
+ expectSSES3: true,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ t.Run(scenario.name, func(t *testing.T) {
+ // Setup bucket if needed
+ err := scenario.setupBucket()
+ require.NoError(t, err, "Failed to setup bucket for scenario %s", scenario.name)
+
+ testData := []byte(fmt.Sprintf("Test data for scenario: %s", scenario.name))
+ objectKey := fmt.Sprintf("completion-test-%d", i)
+
+ // Upload object
+ putInput := &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ Body: bytes.NewReader(testData),
+ }
+ if scenario.encryption != nil {
+ putInput.ServerSideEncryption = *scenario.encryption
+ }
+
+ putResp, err := client.PutObject(ctx, putInput)
+ require.NoError(t, err, "Failed to upload object for scenario %s", scenario.name)
+
+ if scenario.expectSSES3 {
+ assert.Equal(t, types.ServerSideEncryptionAes256, putResp.ServerSideEncryption, "Should use SSE-S3 for %s", scenario.name)
+ }
+
+ // Download and verify
+ getResp, err := client.GetObject(ctx, &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(objectKey),
+ })
+ require.NoError(t, err, "Failed to download object for scenario %s", scenario.name)
+
+ if scenario.expectSSES3 {
+ assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "Should return SSE-S3 for %s", scenario.name)
+ }
+
+ downloadedData, err := io.ReadAll(getResp.Body)
+ require.NoError(t, err, "Failed to read data for scenario %s", scenario.name)
+ getResp.Body.Close()
+
+ // This is the ultimate test - decryption must work
+ assertDataEqual(t, testData, downloadedData, "Decryption failed for scenario %s", scenario.name)
+
+ // Clean up bucket encryption for next scenario
+ client.DeleteBucketEncryption(ctx, &s3.DeleteBucketEncryptionInput{
+ Bucket: aws.String(bucketName),
+ })
+ })
+ }
+ })
+}
diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go
index c46b82cae..f469b2273 100644
--- a/weed/operation/upload_content.go
+++ b/weed/operation/upload_content.go
@@ -67,7 +67,7 @@ func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64, tsN
}
// ToPbFileChunkWithSSE creates a FileChunk with SSE metadata
-func (uploadResult *UploadResult) ToPbFileChunkWithSSE(fileId string, offset int64, tsNs int64, sseType filer_pb.SSEType, sseKmsMetadata []byte) *filer_pb.FileChunk {
+func (uploadResult *UploadResult) ToPbFileChunkWithSSE(fileId string, offset int64, tsNs int64, sseType filer_pb.SSEType, sseMetadata []byte) *filer_pb.FileChunk {
fid, _ := filer_pb.ToFileIdObject(fileId)
chunk := &filer_pb.FileChunk{
FileId: fileId,
@@ -82,8 +82,8 @@ func (uploadResult *UploadResult) ToPbFileChunkWithSSE(fileId string, offset int
// Add SSE metadata if provided
chunk.SseType = sseType
- if len(sseKmsMetadata) > 0 {
- chunk.SseKmsMetadata = sseKmsMetadata
+ if len(sseMetadata) > 0 {
+ chunk.SseMetadata = sseMetadata
}
return chunk
diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto
index 66ba15183..3eb3d3a14 100644
--- a/weed/pb/filer.proto
+++ b/weed/pb/filer.proto
@@ -146,6 +146,7 @@ enum SSEType {
NONE = 0; // No server-side encryption
SSE_C = 1; // Server-Side Encryption with Customer-Provided Keys
SSE_KMS = 2; // Server-Side Encryption with KMS-Managed Keys
+ SSE_S3 = 3; // Server-Side Encryption with S3-Managed Keys
}
message FileChunk {
@@ -161,7 +162,7 @@ message FileChunk {
bool is_compressed = 10;
bool is_chunk_manifest = 11; // content is a list of FileChunks
SSEType sse_type = 12; // Server-side encryption type
- bytes sse_kms_metadata = 13; // Serialized SSE-KMS metadata for this chunk
+ bytes sse_metadata = 13; // Serialized SSE metadata for this chunk (SSE-C, SSE-KMS, or SSE-S3)
}
message FileChunkManifest {
diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go
index 494692043..c8fbe4a43 100644
--- a/weed/pb/filer_pb/filer.pb.go
+++ b/weed/pb/filer_pb/filer.pb.go
@@ -27,6 +27,7 @@ const (
SSEType_NONE SSEType = 0 // No server-side encryption
SSEType_SSE_C SSEType = 1 // Server-Side Encryption with Customer-Provided Keys
SSEType_SSE_KMS SSEType = 2 // Server-Side Encryption with KMS-Managed Keys
+ SSEType_SSE_S3 SSEType = 3 // Server-Side Encryption with S3-Managed Keys
)
// Enum value maps for SSEType.
@@ -35,11 +36,13 @@ var (
0: "NONE",
1: "SSE_C",
2: "SSE_KMS",
+ 3: "SSE_S3",
}
SSEType_value = map[string]int32{
"NONE": 0,
"SSE_C": 1,
"SSE_KMS": 2,
+ "SSE_S3": 3,
}
)
@@ -636,7 +639,7 @@ type FileChunk struct {
IsCompressed bool `protobuf:"varint,10,opt,name=is_compressed,json=isCompressed,proto3" json:"is_compressed,omitempty"`
IsChunkManifest bool `protobuf:"varint,11,opt,name=is_chunk_manifest,json=isChunkManifest,proto3" json:"is_chunk_manifest,omitempty"` // content is a list of FileChunks
SseType SSEType `protobuf:"varint,12,opt,name=sse_type,json=sseType,proto3,enum=filer_pb.SSEType" json:"sse_type,omitempty"` // Server-side encryption type
- SseKmsMetadata []byte `protobuf:"bytes,13,opt,name=sse_kms_metadata,json=sseKmsMetadata,proto3" json:"sse_kms_metadata,omitempty"` // Serialized SSE-KMS metadata for this chunk
+ SseMetadata []byte `protobuf:"bytes,13,opt,name=sse_metadata,json=sseMetadata,proto3" json:"sse_metadata,omitempty"` // Serialized SSE metadata for this chunk (SSE-C, SSE-KMS, or SSE-S3)
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -755,9 +758,9 @@ func (x *FileChunk) GetSseType() SSEType {
return SSEType_NONE
}
-func (x *FileChunk) GetSseKmsMetadata() []byte {
+func (x *FileChunk) GetSseMetadata() []byte {
if x != nil {
- return x.SseKmsMetadata
+ return x.SseMetadata
}
return nil
}
@@ -4437,7 +4440,7 @@ const file_filer_proto_rawDesc = "" +
"\x15is_from_other_cluster\x18\x05 \x01(\bR\x12isFromOtherCluster\x12\x1e\n" +
"\n" +
"signatures\x18\x06 \x03(\x05R\n" +
- "signatures\"\xce\x03\n" +
+ "signatures\"\xc7\x03\n" +
"\tFileChunk\x12\x17\n" +
"\afile_id\x18\x01 \x01(\tR\x06fileId\x12\x16\n" +
"\x06offset\x18\x02 \x01(\x03R\x06offset\x12\x12\n" +
@@ -4453,8 +4456,8 @@ const file_filer_proto_rawDesc = "" +
"\ris_compressed\x18\n" +
" \x01(\bR\fisCompressed\x12*\n" +
"\x11is_chunk_manifest\x18\v \x01(\bR\x0fisChunkManifest\x12,\n" +
- "\bsse_type\x18\f \x01(\x0e2\x11.filer_pb.SSETypeR\asseType\x12(\n" +
- "\x10sse_kms_metadata\x18\r \x01(\fR\x0esseKmsMetadata\"@\n" +
+ "\bsse_type\x18\f \x01(\x0e2\x11.filer_pb.SSETypeR\asseType\x12!\n" +
+ "\fsse_metadata\x18\r \x01(\fR\vsseMetadata\"@\n" +
"\x11FileChunkManifest\x12+\n" +
"\x06chunks\x18\x01 \x03(\v2\x13.filer_pb.FileChunkR\x06chunks\"X\n" +
"\x06FileId\x12\x1b\n" +
@@ -4749,11 +4752,13 @@ const file_filer_proto_rawDesc = "" +
"\x05owner\x18\x04 \x01(\tR\x05owner\"<\n" +
"\x14TransferLocksRequest\x12$\n" +
"\x05locks\x18\x01 \x03(\v2\x0e.filer_pb.LockR\x05locks\"\x17\n" +
- "\x15TransferLocksResponse*+\n" +
+ "\x15TransferLocksResponse*7\n" +
"\aSSEType\x12\b\n" +
"\x04NONE\x10\x00\x12\t\n" +
"\x05SSE_C\x10\x01\x12\v\n" +
- "\aSSE_KMS\x10\x022\xf7\x10\n" +
+ "\aSSE_KMS\x10\x02\x12\n" +
+ "\n" +
+ "\x06SSE_S3\x10\x032\xf7\x10\n" +
"\fSeaweedFiler\x12g\n" +
"\x14LookupDirectoryEntry\x12%.filer_pb.LookupDirectoryEntryRequest\x1a&.filer_pb.LookupDirectoryEntryResponse\"\x00\x12N\n" +
"\vListEntries\x12\x1c.filer_pb.ListEntriesRequest\x1a\x1d.filer_pb.ListEntriesResponse\"\x000\x01\x12L\n" +
diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go
index ab48a211b..c6de70738 100644
--- a/weed/s3api/filer_multipart.go
+++ b/weed/s3api/filer_multipart.go
@@ -48,6 +48,9 @@ func (s3a *S3ApiServer) createMultipartUpload(r *http.Request, input *s3.CreateM
uploadIdString = uploadIdString + "_" + strings.ReplaceAll(uuid.New().String(), "-", "")
+ // Prepare error handling outside callback scope
+ var encryptionError error
+
if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
@@ -67,36 +70,14 @@ func (s3a *S3ApiServer) createMultipartUpload(r *http.Request, input *s3.CreateM
entry.Attributes.Mime = *input.ContentType
}
- // Store SSE-KMS information from create-multipart-upload headers
- // This allows upload-part operations to inherit encryption settings
- if IsSSEKMSRequest(r) {
- keyID := r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId)
- bucketKeyEnabled := strings.ToLower(r.Header.Get(s3_constants.AmzServerSideEncryptionBucketKeyEnabled)) == "true"
-
- // Store SSE-KMS configuration for parts to inherit
- entry.Extended[s3_constants.SeaweedFSSSEKMSKeyID] = []byte(keyID)
- if bucketKeyEnabled {
- entry.Extended[s3_constants.SeaweedFSSSEKMSBucketKeyEnabled] = []byte("true")
- }
-
- // Store encryption context if provided
- if contextHeader := r.Header.Get(s3_constants.AmzServerSideEncryptionContext); contextHeader != "" {
- entry.Extended[s3_constants.SeaweedFSSSEKMSEncryptionContext] = []byte(contextHeader)
- }
-
- // Generate and store a base IV for this multipart upload
- // Chunks within each part will use this base IV with their within-part offset
- baseIV := make([]byte, 16)
- if _, err := rand.Read(baseIV); err != nil {
- glog.Errorf("Failed to generate base IV for multipart upload %s: %v", uploadIdString, err)
- } else {
- // Store base IV as base64 encoded string to avoid HTTP header issues
- entry.Extended[s3_constants.SeaweedFSSSEKMSBaseIV] = []byte(base64.StdEncoding.EncodeToString(baseIV))
- glog.V(4).Infof("Generated base IV %x for multipart upload %s", baseIV[:8], uploadIdString)
- }
-
- glog.V(3).Infof("createMultipartUpload: stored SSE-KMS settings for upload %s with keyID %s", uploadIdString, keyID)
+ // Prepare and apply encryption configuration within directory creation
+ // This ensures encryption resources are only allocated if directory creation succeeds
+ encryptionConfig, prepErr := s3a.prepareMultipartEncryptionConfig(r, uploadIdString)
+ if prepErr != nil {
+ encryptionError = prepErr
+ return // Exit callback, letting mkdir handle the error
}
+ s3a.applyMultipartEncryptionConfig(entry, encryptionConfig)
// Extract and store object lock metadata from request headers
// This ensures object lock settings from create_multipart_upload are preserved
@@ -105,8 +86,14 @@ func (s3a *S3ApiServer) createMultipartUpload(r *http.Request, input *s3.CreateM
// Don't fail the upload - this matches AWS behavior for invalid metadata
}
}); err != nil {
- glog.Errorf("NewMultipartUpload error: %v", err)
- return nil, s3err.ErrInternalError
+ _, errorCode := handleMultipartInternalError("create multipart upload directory", err)
+ return nil, errorCode
+ }
+
+ // Check for encryption configuration errors that occurred within the callback
+ if encryptionError != nil {
+ _, errorCode := handleMultipartInternalError("prepare encryption configuration", encryptionError)
+ return nil, errorCode
}
output = &InitiateMultipartUploadResult{
@@ -266,11 +253,11 @@ func (s3a *S3ApiServer) completeMultipartUpload(r *http.Request, input *s3.Compl
for _, chunk := range entry.GetChunks() {
// Update SSE metadata with correct within-part offset (unified approach for KMS and SSE-C)
- sseKmsMetadata := chunk.SseKmsMetadata
+ sseKmsMetadata := chunk.SseMetadata
- if chunk.SseType == filer_pb.SSEType_SSE_KMS && len(chunk.SseKmsMetadata) > 0 {
+ if chunk.SseType == filer_pb.SSEType_SSE_KMS && len(chunk.SseMetadata) > 0 {
// Deserialize, update offset, and re-serialize SSE-KMS metadata
- if kmsKey, err := DeserializeSSEKMSMetadata(chunk.SseKmsMetadata); err == nil {
+ if kmsKey, err := DeserializeSSEKMSMetadata(chunk.SseMetadata); err == nil {
kmsKey.ChunkOffset = withinPartOffset
if updatedMetadata, serErr := SerializeSSEKMSMetadata(kmsKey); serErr == nil {
sseKmsMetadata = updatedMetadata
@@ -308,7 +295,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(r *http.Request, input *s3.Compl
IsCompressed: chunk.IsCompressed,
// Preserve SSE metadata with updated within-part offset
SseType: chunk.SseType,
- SseKmsMetadata: sseKmsMetadata,
+ SseMetadata: sseKmsMetadata,
}
finalParts = append(finalParts, p)
offset += int64(chunk.Size)
@@ -693,3 +680,100 @@ func maxInt(a, b int) int {
}
return b
}
+
+// MultipartEncryptionConfig holds pre-prepared encryption configuration to avoid error handling in callbacks
+type MultipartEncryptionConfig struct {
+ // SSE-KMS configuration
+ IsSSEKMS bool
+ KMSKeyID string
+ BucketKeyEnabled bool
+ EncryptionContext string
+ KMSBaseIVEncoded string
+
+ // SSE-S3 configuration
+ IsSSES3 bool
+ S3BaseIVEncoded string
+ S3KeyDataEncoded string
+}
+
+// prepareMultipartEncryptionConfig prepares encryption configuration with proper error handling
+// This eliminates the need for criticalError variable in callback functions
+func (s3a *S3ApiServer) prepareMultipartEncryptionConfig(r *http.Request, uploadIdString string) (*MultipartEncryptionConfig, error) {
+ config := &MultipartEncryptionConfig{}
+
+ // Prepare SSE-KMS configuration
+ if IsSSEKMSRequest(r) {
+ config.IsSSEKMS = true
+ config.KMSKeyID = r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId)
+ config.BucketKeyEnabled = strings.ToLower(r.Header.Get(s3_constants.AmzServerSideEncryptionBucketKeyEnabled)) == "true"
+ config.EncryptionContext = r.Header.Get(s3_constants.AmzServerSideEncryptionContext)
+
+ // Generate and encode base IV with proper error handling
+ baseIV := make([]byte, s3_constants.AESBlockSize)
+ n, err := rand.Read(baseIV)
+ if err != nil || n != len(baseIV) {
+ return nil, fmt.Errorf("failed to generate secure IV for SSE-KMS multipart upload: %v (read %d/%d bytes)", err, n, len(baseIV))
+ }
+ config.KMSBaseIVEncoded = base64.StdEncoding.EncodeToString(baseIV)
+ glog.V(4).Infof("Generated base IV %x for SSE-KMS multipart upload %s", baseIV[:8], uploadIdString)
+ }
+
+ // Prepare SSE-S3 configuration
+ if IsSSES3RequestInternal(r) {
+ config.IsSSES3 = true
+
+ // Generate and encode base IV with proper error handling
+ baseIV := make([]byte, s3_constants.AESBlockSize)
+ n, err := rand.Read(baseIV)
+ if err != nil || n != len(baseIV) {
+ return nil, fmt.Errorf("failed to generate secure IV for SSE-S3 multipart upload: %v (read %d/%d bytes)", err, n, len(baseIV))
+ }
+ config.S3BaseIVEncoded = base64.StdEncoding.EncodeToString(baseIV)
+ glog.V(4).Infof("Generated base IV %x for SSE-S3 multipart upload %s", baseIV[:8], uploadIdString)
+
+ // Generate and serialize SSE-S3 key with proper error handling
+ keyManager := GetSSES3KeyManager()
+ sseS3Key, err := keyManager.GetOrCreateKey("")
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate SSE-S3 key for multipart upload: %v", err)
+ }
+
+ keyData, serErr := SerializeSSES3Metadata(sseS3Key)
+ if serErr != nil {
+ return nil, fmt.Errorf("failed to serialize SSE-S3 metadata for multipart upload: %v", serErr)
+ }
+
+ config.S3KeyDataEncoded = base64.StdEncoding.EncodeToString(keyData)
+
+ // Store key in manager for later retrieval
+ keyManager.StoreKey(sseS3Key)
+ glog.V(4).Infof("Stored SSE-S3 key %s for multipart upload %s", sseS3Key.KeyID, uploadIdString)
+ }
+
+ return config, nil
+}
+
+// applyMultipartEncryptionConfig applies pre-prepared encryption configuration to filer entry
+// This function is guaranteed not to fail since all error-prone operations were done during preparation
+func (s3a *S3ApiServer) applyMultipartEncryptionConfig(entry *filer_pb.Entry, config *MultipartEncryptionConfig) {
+ // Apply SSE-KMS configuration
+ if config.IsSSEKMS {
+ entry.Extended[s3_constants.SeaweedFSSSEKMSKeyID] = []byte(config.KMSKeyID)
+ if config.BucketKeyEnabled {
+ entry.Extended[s3_constants.SeaweedFSSSEKMSBucketKeyEnabled] = []byte("true")
+ }
+ if config.EncryptionContext != "" {
+ entry.Extended[s3_constants.SeaweedFSSSEKMSEncryptionContext] = []byte(config.EncryptionContext)
+ }
+ entry.Extended[s3_constants.SeaweedFSSSEKMSBaseIV] = []byte(config.KMSBaseIVEncoded)
+ glog.V(3).Infof("applyMultipartEncryptionConfig: applied SSE-KMS settings with keyID %s", config.KMSKeyID)
+ }
+
+ // Apply SSE-S3 configuration
+ if config.IsSSES3 {
+ entry.Extended[s3_constants.SeaweedFSSSES3Encryption] = []byte(s3_constants.SSEAlgorithmAES256)
+ entry.Extended[s3_constants.SeaweedFSSSES3BaseIV] = []byte(config.S3BaseIVEncoded)
+ entry.Extended[s3_constants.SeaweedFSSSES3KeyData] = []byte(config.S3KeyDataEncoded)
+ glog.V(3).Infof("applyMultipartEncryptionConfig: applied SSE-S3 settings")
+ }
+}
diff --git a/weed/s3api/policy_engine/types.go b/weed/s3api/policy_engine/types.go
index 953e89650..5f417afb4 100644
--- a/weed/s3api/policy_engine/types.go
+++ b/weed/s3api/policy_engine/types.go
@@ -407,10 +407,7 @@ func (cs *CompiledStatement) EvaluateStatement(args *PolicyEvaluationArgs) bool
return false
}
- // TODO: Add condition evaluation if needed
- // if !cs.evaluateConditions(args.Conditions) {
- // return false
- // }
+
return true
}
diff --git a/weed/s3api/s3_bucket_encryption.go b/weed/s3api/s3_bucket_encryption.go
index 6ca05749f..3166fb81f 100644
--- a/weed/s3api/s3_bucket_encryption.go
+++ b/weed/s3api/s3_bucket_encryption.go
@@ -81,8 +81,8 @@ const (
EncryptionTypeKMS = "aws:kms"
)
-// GetBucketEncryption handles GET bucket encryption requests
-func (s3a *S3ApiServer) GetBucketEncryption(w http.ResponseWriter, r *http.Request) {
+// GetBucketEncryptionHandler handles GET bucket encryption requests
+func (s3a *S3ApiServer) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
bucket, _ := s3_constants.GetBucketAndObject(r)
// Load bucket encryption configuration
@@ -111,8 +111,8 @@ func (s3a *S3ApiServer) GetBucketEncryption(w http.ResponseWriter, r *http.Reque
}
}
-// PutBucketEncryption handles PUT bucket encryption requests
-func (s3a *S3ApiServer) PutBucketEncryption(w http.ResponseWriter, r *http.Request) {
+// PutBucketEncryptionHandler handles PUT bucket encryption requests
+func (s3a *S3ApiServer) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
bucket, _ := s3_constants.GetBucketAndObject(r)
// Read and parse the request body
@@ -168,8 +168,8 @@ func (s3a *S3ApiServer) PutBucketEncryption(w http.ResponseWriter, r *http.Reque
w.WriteHeader(http.StatusOK)
}
-// DeleteBucketEncryption handles DELETE bucket encryption requests
-func (s3a *S3ApiServer) DeleteBucketEncryption(w http.ResponseWriter, r *http.Request) {
+// DeleteBucketEncryptionHandler handles DELETE bucket encryption requests
+func (s3a *S3ApiServer) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
bucket, _ := s3_constants.GetBucketAndObject(r)
errCode := s3a.removeEncryptionConfiguration(bucket)
diff --git a/weed/s3api/s3_constants/crypto.go b/weed/s3api/s3_constants/crypto.go
new file mode 100644
index 000000000..398e2b669
--- /dev/null
+++ b/weed/s3api/s3_constants/crypto.go
@@ -0,0 +1,32 @@
+package s3_constants
+
+// Cryptographic constants
+const (
+ // AES block and key sizes
+ AESBlockSize = 16 // 128 bits for AES block size (IV length)
+ AESKeySize = 32 // 256 bits for AES-256 keys
+
+ // SSE algorithm identifiers
+ SSEAlgorithmAES256 = "AES256"
+ SSEAlgorithmKMS = "aws:kms"
+
+ // SSE type identifiers for response headers and internal processing
+ SSETypeC = "SSE-C"
+ SSETypeKMS = "SSE-KMS"
+ SSETypeS3 = "SSE-S3"
+
+ // S3 multipart upload limits and offsets
+ S3MaxPartSize = 5 * 1024 * 1024 * 1024 // 5GB - AWS S3 maximum part size limit
+
+ // Multipart offset calculation for unique IV generation
+ // Using 8GB offset between parts (larger than max part size) to prevent IV collisions
+ // Critical for CTR mode encryption security in multipart uploads
+ PartOffsetMultiplier = int64(1) << 33 // 8GB per part offset
+
+ // KMS validation limits based on AWS KMS service constraints
+ MaxKMSEncryptionContextPairs = 10 // Maximum number of encryption context key-value pairs
+ MaxKMSKeyIDLength = 500 // Maximum length for KMS key identifiers
+
+ // S3 multipart upload limits based on AWS S3 service constraints
+ MaxS3MultipartParts = 10000 // Maximum number of parts in a multipart upload (1-10,000)
+)
diff --git a/weed/s3api/s3_constants/header.go b/weed/s3api/s3_constants/header.go
index a2d79d83c..b4c91fa71 100644
--- a/weed/s3api/s3_constants/header.go
+++ b/weed/s3api/s3_constants/header.go
@@ -99,6 +99,11 @@ const (
SeaweedFSSSEKMSBucketKeyEnabled = "x-seaweedfs-sse-kms-bucket-key-enabled" // Bucket key setting for multipart upload SSE-KMS inheritance
SeaweedFSSSEKMSEncryptionContext = "x-seaweedfs-sse-kms-encryption-context" // Encryption context for multipart upload SSE-KMS inheritance
SeaweedFSSSEKMSBaseIV = "x-seaweedfs-sse-kms-base-iv" // Base IV for multipart upload SSE-KMS (for IV offset calculation)
+
+ // Multipart upload metadata keys for SSE-S3
+ SeaweedFSSSES3Encryption = "x-seaweedfs-sse-s3-encryption" // Encryption type for multipart upload SSE-S3 inheritance
+ SeaweedFSSSES3BaseIV = "x-seaweedfs-sse-s3-base-iv" // Base IV for multipart upload SSE-S3 (for IV offset calculation)
+ SeaweedFSSSES3KeyData = "x-seaweedfs-sse-s3-key-data" // Encrypted key data for multipart upload SSE-S3 inheritance
)
// SeaweedFS internal headers for filer communication
@@ -106,6 +111,8 @@ const (
SeaweedFSSSEKMSKeyHeader = "X-SeaweedFS-SSE-KMS-Key" // Header for passing SSE-KMS metadata to filer
SeaweedFSSSEIVHeader = "X-SeaweedFS-SSE-IV" // Header for passing SSE-C IV to filer (SSE-C only)
SeaweedFSSSEKMSBaseIVHeader = "X-SeaweedFS-SSE-KMS-Base-IV" // Header for passing base IV for multipart SSE-KMS
+ SeaweedFSSSES3BaseIVHeader = "X-SeaweedFS-SSE-S3-Base-IV" // Header for passing base IV for multipart SSE-S3
+ SeaweedFSSSES3KeyDataHeader = "X-SeaweedFS-SSE-S3-Key-Data" // Header for passing key data for multipart SSE-S3
)
// Non-Standard S3 HTTP request constants
diff --git a/weed/s3api/s3_error_utils.go b/weed/s3api/s3_error_utils.go
new file mode 100644
index 000000000..7afb241b5
--- /dev/null
+++ b/weed/s3api/s3_error_utils.go
@@ -0,0 +1,54 @@
+package s3api
+
+import (
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
+)
+
+// ErrorHandlers provide common error handling patterns for S3 API operations
+
+// handlePutToFilerError logs an error and returns the standard putToFiler error format
+func handlePutToFilerError(operation string, err error, errorCode s3err.ErrorCode) (string, s3err.ErrorCode, string) {
+ glog.Errorf("Failed to %s: %v", operation, err)
+ return "", errorCode, ""
+}
+
+// handlePutToFilerInternalError is a convenience wrapper for internal errors in putToFiler
+func handlePutToFilerInternalError(operation string, err error) (string, s3err.ErrorCode, string) {
+ return handlePutToFilerError(operation, err, s3err.ErrInternalError)
+}
+
+// handleMultipartError logs an error and returns the standard multipart error format
+func handleMultipartError(operation string, err error, errorCode s3err.ErrorCode) (interface{}, s3err.ErrorCode) {
+ glog.Errorf("Failed to %s: %v", operation, err)
+ return nil, errorCode
+}
+
+// handleMultipartInternalError is a convenience wrapper for internal errors in multipart operations
+func handleMultipartInternalError(operation string, err error) (interface{}, s3err.ErrorCode) {
+ return handleMultipartError(operation, err, s3err.ErrInternalError)
+}
+
+// logErrorAndReturn logs an error with operation context and returns the specified error code
+func logErrorAndReturn(operation string, err error, errorCode s3err.ErrorCode) s3err.ErrorCode {
+ glog.Errorf("Failed to %s: %v", operation, err)
+ return errorCode
+}
+
+// logInternalError is a convenience wrapper for internal error logging
+func logInternalError(operation string, err error) s3err.ErrorCode {
+ return logErrorAndReturn(operation, err, s3err.ErrInternalError)
+}
+
+// SSE-specific error handlers
+
+// handleSSEError handles common SSE-related errors with appropriate context
+func handleSSEError(sseType string, operation string, err error, errorCode s3err.ErrorCode) (string, s3err.ErrorCode, string) {
+ glog.Errorf("Failed to %s for %s: %v", operation, sseType, err)
+ return "", errorCode, ""
+}
+
+// handleSSEInternalError is a convenience wrapper for SSE internal errors
+func handleSSEInternalError(sseType string, operation string, err error) (string, s3err.ErrorCode, string) {
+ return handleSSEError(sseType, operation, err, s3err.ErrInternalError)
+}
diff --git a/weed/s3api/s3_sse_c.go b/weed/s3api/s3_sse_c.go
index 7eb5cf474..733ae764e 100644
--- a/weed/s3api/s3_sse_c.go
+++ b/weed/s3api/s3_sse_c.go
@@ -28,9 +28,8 @@ const (
const (
// SSE-C constants
- SSECustomerAlgorithmAES256 = "AES256"
+ SSECustomerAlgorithmAES256 = s3_constants.SSEAlgorithmAES256
SSECustomerKeySize = 32 // 256 bits
- AESBlockSize = 16 // AES block size in bytes
)
// SSE-C related errors
@@ -163,7 +162,7 @@ func CreateSSECEncryptedReader(r io.Reader, customerKey *SSECustomerKey) (io.Rea
}
// Generate random IV
- iv := make([]byte, AESBlockSize)
+ iv := make([]byte, s3_constants.AESBlockSize)
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
return nil, nil, fmt.Errorf("failed to generate IV: %v", err)
}
@@ -186,8 +185,8 @@ func CreateSSECDecryptedReader(r io.Reader, customerKey *SSECustomerKey, iv []by
}
// IV must be provided from metadata
- if len(iv) != AESBlockSize {
- return nil, fmt.Errorf("invalid IV length: expected %d bytes, got %d", AESBlockSize, len(iv))
+ if err := ValidateIV(iv, "IV"); err != nil {
+ return nil, fmt.Errorf("invalid IV from metadata: %w", err)
}
// Create AES cipher
diff --git a/weed/s3api/s3_sse_copy_test.go b/weed/s3api/s3_sse_copy_test.go
index 8fff2b7b0..35839a704 100644
--- a/weed/s3api/s3_sse_copy_test.go
+++ b/weed/s3api/s3_sse_copy_test.go
@@ -320,9 +320,9 @@ func TestSSECopyWithCorruptedSource(t *testing.T) {
// Corrupt the encrypted data
corruptedData := make([]byte, len(encryptedData))
copy(corruptedData, encryptedData)
- if len(corruptedData) > AESBlockSize {
+ if len(corruptedData) > s3_constants.AESBlockSize {
// Corrupt a byte after the IV
- corruptedData[AESBlockSize] ^= 0xFF
+ corruptedData[s3_constants.AESBlockSize] ^= 0xFF
}
// Try to decrypt corrupted data
diff --git a/weed/s3api/s3_sse_error_test.go b/weed/s3api/s3_sse_error_test.go
index 4b062faa6..a344e2ef7 100644
--- a/weed/s3api/s3_sse_error_test.go
+++ b/weed/s3api/s3_sse_error_test.go
@@ -275,7 +275,7 @@ func TestSSEEmptyDataHandling(t *testing.T) {
}
// Should have IV for empty data
- if len(iv) != AESBlockSize {
+ if len(iv) != s3_constants.AESBlockSize {
t.Error("IV should be present even for empty data")
}
diff --git a/weed/s3api/s3_sse_kms.go b/weed/s3api/s3_sse_kms.go
index 2abead3c6..11c3bf643 100644
--- a/weed/s3api/s3_sse_kms.go
+++ b/weed/s3api/s3_sse_kms.go
@@ -66,14 +66,6 @@ func CreateSSEKMSEncryptedReader(r io.Reader, keyID string, encryptionContext ma
// CreateSSEKMSEncryptedReaderWithBucketKey creates an encrypted reader with optional S3 Bucket Keys optimization
func CreateSSEKMSEncryptedReaderWithBucketKey(r io.Reader, keyID string, encryptionContext map[string]string, bucketKeyEnabled bool) (io.Reader, *SSEKMSKey, error) {
- kmsProvider := kms.GetGlobalKMS()
- if kmsProvider == nil {
- return nil, nil, fmt.Errorf("KMS is not configured")
- }
-
- var dataKeyResp *kms.GenerateDataKeyResponse
- var err error
-
if bucketKeyEnabled {
// Use S3 Bucket Keys optimization - try to get or create a bucket-level data key
// Note: This is a simplified implementation. In practice, this would need
@@ -83,29 +75,14 @@ func CreateSSEKMSEncryptedReaderWithBucketKey(r io.Reader, keyID string, encrypt
bucketKeyEnabled = false
}
- if !bucketKeyEnabled {
- // Generate a per-object data encryption key using KMS
- dataKeyReq := &kms.GenerateDataKeyRequest{
- KeyID: keyID,
- KeySpec: kms.KeySpecAES256,
- EncryptionContext: encryptionContext,
- }
-
- ctx := context.Background()
- dataKeyResp, err = kmsProvider.GenerateDataKey(ctx, dataKeyReq)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to generate data key: %v", err)
- }
+ // Generate data key using common utility
+ dataKeyResult, err := generateKMSDataKey(keyID, encryptionContext)
+ if err != nil {
+ return nil, nil, err
}
// Ensure we clear the plaintext data key from memory when done
- defer kms.ClearSensitiveData(dataKeyResp.Plaintext)
-
- // Create AES cipher with the data key
- block, err := aes.NewCipher(dataKeyResp.Plaintext)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to create AES cipher: %v", err)
- }
+ defer clearKMSDataKey(dataKeyResult)
// Generate a random IV for CTR mode
// Note: AES-CTR is used for object data encryption (not AES-GCM) because:
@@ -113,21 +90,16 @@ func CreateSSEKMSEncryptedReaderWithBucketKey(r io.Reader, keyID string, encrypt
// 2. CTR mode supports range requests (seek to arbitrary positions)
// 3. This matches AWS S3 and other S3-compatible implementations
// The KMS data key encryption (separate layer) uses AES-GCM for authentication
- iv := make([]byte, 16) // AES block size
+ iv := make([]byte, s3_constants.AESBlockSize)
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
return nil, nil, fmt.Errorf("failed to generate IV: %v", err)
}
// Create CTR mode cipher stream
- stream := cipher.NewCTR(block, iv)
+ stream := cipher.NewCTR(dataKeyResult.Block, iv)
- // Create the SSE-KMS metadata
- sseKey := &SSEKMSKey{
- KeyID: dataKeyResp.KeyID,
- EncryptedDataKey: dataKeyResp.CiphertextBlob,
- EncryptionContext: encryptionContext,
- BucketKeyEnabled: bucketKeyEnabled,
- }
+ // Create the SSE-KMS metadata using utility function
+ sseKey := createSSEKMSKey(dataKeyResult, encryptionContext, bucketKeyEnabled, iv, 0)
// The IV is stored in SSE key metadata, so the encrypted stream does not need to prepend the IV
// This ensures correct Content-Length for clients
@@ -142,51 +114,28 @@ func CreateSSEKMSEncryptedReaderWithBucketKey(r io.Reader, keyID string, encrypt
// CreateSSEKMSEncryptedReaderWithBaseIV creates an SSE-KMS encrypted reader using a provided base IV
// This is used for multipart uploads where all chunks need to use the same base IV
func CreateSSEKMSEncryptedReaderWithBaseIV(r io.Reader, keyID string, encryptionContext map[string]string, bucketKeyEnabled bool, baseIV []byte) (io.Reader, *SSEKMSKey, error) {
- if len(baseIV) != 16 {
- return nil, nil, fmt.Errorf("base IV must be exactly 16 bytes, got %d", len(baseIV))
+ if err := ValidateIV(baseIV, "base IV"); err != nil {
+ return nil, nil, err
}
- kmsProvider := kms.GetGlobalKMS()
- if kmsProvider == nil {
- return nil, nil, fmt.Errorf("KMS is not configured")
- }
-
- // Create a new data key for the object
- generateDataKeyReq := &kms.GenerateDataKeyRequest{
- KeyID: keyID,
- KeySpec: kms.KeySpecAES256,
- EncryptionContext: encryptionContext,
- }
-
- ctx := context.Background()
- dataKeyResp, err := kmsProvider.GenerateDataKey(ctx, generateDataKeyReq)
+ // Generate data key using common utility
+ dataKeyResult, err := generateKMSDataKey(keyID, encryptionContext)
if err != nil {
- return nil, nil, fmt.Errorf("failed to generate data key: %v", err)
+ return nil, nil, err
}
// Ensure we clear the plaintext data key from memory when done
- defer kms.ClearSensitiveData(dataKeyResp.Plaintext)
-
- // Create AES cipher with the plaintext data key
- block, err := aes.NewCipher(dataKeyResp.Plaintext)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to create AES cipher: %v", err)
- }
+ defer clearKMSDataKey(dataKeyResult)
// Use the provided base IV instead of generating a new one
- iv := make([]byte, 16)
+ iv := make([]byte, s3_constants.AESBlockSize)
copy(iv, baseIV)
// Create CTR mode cipher stream
- stream := cipher.NewCTR(block, iv)
+ stream := cipher.NewCTR(dataKeyResult.Block, iv)
- // Create the SSE-KMS metadata with the provided base IV
- sseKey := &SSEKMSKey{
- KeyID: dataKeyResp.KeyID,
- EncryptedDataKey: dataKeyResp.CiphertextBlob,
- EncryptionContext: encryptionContext,
- BucketKeyEnabled: bucketKeyEnabled,
- }
+ // Create the SSE-KMS metadata using utility function
+ sseKey := createSSEKMSKey(dataKeyResult, encryptionContext, bucketKeyEnabled, iv, 0)
// The IV is stored in SSE key metadata, so the encrypted stream does not need to prepend the IV
// This ensures correct Content-Length for clients
@@ -198,6 +147,38 @@ func CreateSSEKMSEncryptedReaderWithBaseIV(r io.Reader, keyID string, encryption
return encryptedReader, sseKey, nil
}
+// CreateSSEKMSEncryptedReaderWithBaseIVAndOffset creates an SSE-KMS encrypted reader using a provided base IV and offset
+// This is used for multipart uploads where all chunks need unique IVs to prevent IV reuse vulnerabilities
+func CreateSSEKMSEncryptedReaderWithBaseIVAndOffset(r io.Reader, keyID string, encryptionContext map[string]string, bucketKeyEnabled bool, baseIV []byte, offset int64) (io.Reader, *SSEKMSKey, error) {
+ if err := ValidateIV(baseIV, "base IV"); err != nil {
+ return nil, nil, err
+ }
+
+ // Generate data key using common utility
+ dataKeyResult, err := generateKMSDataKey(keyID, encryptionContext)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Ensure we clear the plaintext data key from memory when done
+ defer clearKMSDataKey(dataKeyResult)
+
+ // Calculate unique IV using base IV and offset to prevent IV reuse in multipart uploads
+ iv := calculateIVWithOffset(baseIV, offset)
+
+ // Create CTR mode cipher stream
+ stream := cipher.NewCTR(dataKeyResult.Block, iv)
+
+ // Create the SSE-KMS metadata using utility function
+ sseKey := createSSEKMSKey(dataKeyResult, encryptionContext, bucketKeyEnabled, iv, offset)
+
+ // The IV is stored in SSE key metadata, so the encrypted stream does not need to prepend the IV
+ // This ensures correct Content-Length for clients
+ encryptedReader := &cipher.StreamReader{S: stream, R: r}
+
+ return encryptedReader, sseKey, nil
+}
+
// hashEncryptionContext creates a deterministic hash of the encryption context
func hashEncryptionContext(encryptionContext map[string]string) string {
if len(encryptionContext) == 0 {
@@ -434,8 +415,8 @@ func CreateSSEKMSDecryptedReader(r io.Reader, sseKey *SSEKMSKey) (io.Reader, err
}
// Use the IV from the SSE key metadata, calculating offset if this is a chunked part
- if len(sseKey.IV) != 16 {
- return nil, fmt.Errorf("invalid IV length in SSE key: expected 16 bytes, got %d", len(sseKey.IV))
+ if err := ValidateIV(sseKey.IV, "SSE key IV"); err != nil {
+ return nil, fmt.Errorf("invalid IV in SSE key: %w", err)
}
// Calculate the correct IV for this chunk's offset within the original part
@@ -445,7 +426,7 @@ func CreateSSEKMSDecryptedReader(r io.Reader, sseKey *SSEKMSKey) (io.Reader, err
glog.Infof("Using calculated IV with offset %d for chunk decryption", sseKey.ChunkOffset)
} else {
iv = sseKey.IV
- glog.Infof("Using base IV for chunk decryption (offset=0)")
+ // glog.Infof("Using base IV for chunk decryption (offset=0)")
}
// Create AES cipher with the decrypted data key
@@ -470,7 +451,7 @@ func ParseSSEKMSHeaders(r *http.Request) (*SSEKMSKey, error) {
if sseAlgorithm == "" {
return nil, nil // No SSE headers present
}
- if sseAlgorithm != "aws:kms" {
+ if sseAlgorithm != s3_constants.SSEAlgorithmKMS {
return nil, fmt.Errorf("invalid SSE algorithm: %s", sseAlgorithm)
}
@@ -501,8 +482,8 @@ func ParseSSEKMSHeaders(r *http.Request) (*SSEKMSKey, error) {
BucketKeyEnabled: bucketKeyEnabled,
}
- // Validate the parsed key
- if err := ValidateSSEKMSKey(sseKey); err != nil {
+ // Validate the parsed key including key ID format
+ if err := ValidateSSEKMSKeyInternal(sseKey); err != nil {
return nil, err
}
@@ -510,9 +491,9 @@ func ParseSSEKMSHeaders(r *http.Request) (*SSEKMSKey, error) {
}
// ValidateSSEKMSKey validates an SSE-KMS key configuration
-func ValidateSSEKMSKey(sseKey *SSEKMSKey) error {
- if sseKey == nil {
- return fmt.Errorf("SSE-KMS key is required")
+func ValidateSSEKMSKeyInternal(sseKey *SSEKMSKey) error {
+ if err := ValidateSSEKMSKey(sseKey); err != nil {
+ return err
}
// An empty key ID is valid and means the default KMS key should be used.
@@ -523,38 +504,6 @@ func ValidateSSEKMSKey(sseKey *SSEKMSKey) error {
return nil
}
-// isValidKMSKeyID performs basic validation of KMS key identifiers.
-// Following Minio's approach: be permissive and accept any reasonable key format.
-// Only reject keys with leading/trailing spaces or other obvious issues.
-func isValidKMSKeyID(keyID string) bool {
- // Reject empty keys
- if keyID == "" {
- return false
- }
-
- // Following Minio's validation: reject keys with leading/trailing spaces
- if strings.HasPrefix(keyID, " ") || strings.HasSuffix(keyID, " ") {
- return false
- }
-
- // Also reject keys with internal spaces (common sense validation)
- if strings.Contains(keyID, " ") {
- return false
- }
-
- // Reject keys with control characters or newlines
- if strings.ContainsAny(keyID, "\t\n\r\x00") {
- return false
- }
-
- // Accept any reasonable length key (be permissive for various KMS providers)
- if len(keyID) > 0 && len(keyID) <= 500 {
- return true
- }
-
- return false
-}
-
// BuildEncryptionContext creates the encryption context for S3 objects
func BuildEncryptionContext(bucketName, objectKey string, useBucketKey bool) map[string]string {
return kms.BuildS3EncryptionContext(bucketName, objectKey, useBucketKey)
@@ -594,12 +543,12 @@ func parseEncryptionContext(contextHeader string) (map[string]string, error) {
// SerializeSSEKMSMetadata serializes SSE-KMS metadata for storage in object metadata
func SerializeSSEKMSMetadata(sseKey *SSEKMSKey) ([]byte, error) {
- if sseKey == nil {
- return nil, fmt.Errorf("SSE-KMS key cannot be nil")
+ if err := ValidateSSEKMSKey(sseKey); err != nil {
+ return nil, err
}
metadata := &SSEKMSMetadata{
- Algorithm: "aws:kms",
+ Algorithm: s3_constants.SSEAlgorithmKMS,
KeyID: sseKey.KeyID,
EncryptedDataKey: base64.StdEncoding.EncodeToString(sseKey.EncryptedDataKey),
EncryptionContext: sseKey.EncryptionContext,
@@ -629,13 +578,13 @@ func DeserializeSSEKMSMetadata(data []byte) (*SSEKMSKey, error) {
}
// Validate algorithm - be lenient with missing/empty algorithm for backward compatibility
- if metadata.Algorithm != "" && metadata.Algorithm != "aws:kms" {
+ if metadata.Algorithm != "" && metadata.Algorithm != s3_constants.SSEAlgorithmKMS {
return nil, fmt.Errorf("invalid SSE-KMS algorithm: %s", metadata.Algorithm)
}
// Set default algorithm if empty
if metadata.Algorithm == "" {
- metadata.Algorithm = "aws:kms"
+ metadata.Algorithm = s3_constants.SSEAlgorithmKMS
}
// Decode the encrypted data key
@@ -666,48 +615,6 @@ func DeserializeSSEKMSMetadata(data []byte) (*SSEKMSKey, error) {
return sseKey, nil
}
-// calculateIVWithOffset calculates the correct IV for a chunk at a given offset within the original data stream
-// This is necessary for AES-CTR mode when data is split into multiple chunks
-func calculateIVWithOffset(baseIV []byte, offset int64) []byte {
- if len(baseIV) != 16 {
- glog.Errorf("Invalid base IV length: expected 16, got %d", len(baseIV))
- return baseIV // Return original IV as fallback
- }
-
- // Create a copy of the base IV to avoid modifying the original
- iv := make([]byte, 16)
- copy(iv, baseIV)
-
- // Calculate the block offset (AES block size is 16 bytes)
- blockOffset := offset / 16
- glog.Infof("calculateIVWithOffset DEBUG: offset=%d, blockOffset=%d (0x%x)",
- offset, blockOffset, blockOffset)
-
- // Add the block offset to the IV counter (last 8 bytes, big-endian)
- // This matches how AES-CTR mode increments the counter
- // Process from least significant byte (index 15) to most significant byte (index 8)
- originalBlockOffset := blockOffset
- carry := uint64(0)
- for i := 15; i >= 8; i-- {
- sum := uint64(iv[i]) + uint64(blockOffset&0xFF) + carry
- oldByte := iv[i]
- iv[i] = byte(sum & 0xFF)
- carry = sum >> 8
- blockOffset = blockOffset >> 8
- glog.Infof("calculateIVWithOffset DEBUG: i=%d, oldByte=0x%02x, newByte=0x%02x, carry=%d, blockOffset=0x%x",
- i, oldByte, iv[i], carry, blockOffset)
-
- // If no more blockOffset bits and no carry, we can stop early
- if blockOffset == 0 && carry == 0 {
- break
- }
- }
-
- glog.Infof("calculateIVWithOffset: baseIV=%x, offset=%d, blockOffset=%d, calculatedIV=%x",
- baseIV, offset, originalBlockOffset, iv)
- return iv
-}
-
// SSECMetadata represents SSE-C metadata for per-chunk storage (unified with SSE-KMS approach)
type SSECMetadata struct {
Algorithm string `json:"algorithm"` // SSE-C algorithm (always "AES256")
@@ -718,12 +625,12 @@ type SSECMetadata struct {
// SerializeSSECMetadata serializes SSE-C metadata for storage in chunk metadata
func SerializeSSECMetadata(iv []byte, keyMD5 string, partOffset int64) ([]byte, error) {
- if len(iv) != 16 {
- return nil, fmt.Errorf("invalid IV length: expected 16, got %d", len(iv))
+ if err := ValidateIV(iv, "IV"); err != nil {
+ return nil, err
}
metadata := &SSECMetadata{
- Algorithm: "AES256",
+ Algorithm: s3_constants.SSEAlgorithmAES256,
IV: base64.StdEncoding.EncodeToString(iv),
KeyMD5: keyMD5,
PartOffset: partOffset,
@@ -750,7 +657,7 @@ func DeserializeSSECMetadata(data []byte) (*SSECMetadata, error) {
}
// Validate algorithm
- if metadata.Algorithm != "AES256" {
+ if metadata.Algorithm != s3_constants.SSEAlgorithmAES256 {
return nil, fmt.Errorf("invalid SSE-C algorithm: %s", metadata.Algorithm)
}
@@ -769,7 +676,7 @@ func DeserializeSSECMetadata(data []byte) (*SSECMetadata, error) {
// AddSSEKMSResponseHeaders adds SSE-KMS response headers to an HTTP response
func AddSSEKMSResponseHeaders(w http.ResponseWriter, sseKey *SSEKMSKey) {
- w.Header().Set(s3_constants.AmzServerSideEncryption, "aws:kms")
+ w.Header().Set(s3_constants.AmzServerSideEncryption, s3_constants.SSEAlgorithmKMS)
w.Header().Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, sseKey.KeyID)
if len(sseKey.EncryptionContext) > 0 {
@@ -798,7 +705,7 @@ func IsSSEKMSRequest(r *http.Request) bool {
// According to AWS S3 specification, SSE-KMS is only valid when the encryption header
// is explicitly set to "aws:kms". The KMS key ID header alone is not sufficient.
sseAlgorithm := r.Header.Get(s3_constants.AmzServerSideEncryption)
- return sseAlgorithm == "aws:kms"
+ return sseAlgorithm == s3_constants.SSEAlgorithmKMS
}
// IsSSEKMSEncrypted checks if the metadata indicates SSE-KMS encryption
@@ -809,7 +716,7 @@ func IsSSEKMSEncrypted(metadata map[string][]byte) bool {
// The canonical way to identify an SSE-KMS encrypted object is by this header.
if sseAlgorithm, exists := metadata[s3_constants.AmzServerSideEncryption]; exists {
- return string(sseAlgorithm) == "aws:kms"
+ return string(sseAlgorithm) == s3_constants.SSEAlgorithmKMS
}
return false
@@ -831,7 +738,7 @@ func IsAnySSEEncrypted(metadata map[string][]byte) bool {
// Check for SSE-S3
if sseAlgorithm, exists := metadata[s3_constants.AmzServerSideEncryption]; exists {
- return string(sseAlgorithm) == "AES256"
+ return string(sseAlgorithm) == s3_constants.SSEAlgorithmAES256
}
return false
@@ -890,7 +797,7 @@ func (s SSEKMSCopyStrategy) String() string {
// GetSourceSSEKMSInfo extracts SSE-KMS information from source object metadata
func GetSourceSSEKMSInfo(metadata map[string][]byte) (keyID string, isEncrypted bool) {
- if sseAlgorithm, exists := metadata[s3_constants.AmzServerSideEncryption]; exists && string(sseAlgorithm) == "aws:kms" {
+ if sseAlgorithm, exists := metadata[s3_constants.AmzServerSideEncryption]; exists && string(sseAlgorithm) == s3_constants.SSEAlgorithmKMS {
if kmsKeyID, exists := metadata[s3_constants.AmzServerSideEncryptionAwsKmsKeyId]; exists {
return string(kmsKeyID), true
}
diff --git a/weed/s3api/s3_sse_kms_utils.go b/weed/s3api/s3_sse_kms_utils.go
new file mode 100644
index 000000000..be6d72626
--- /dev/null
+++ b/weed/s3api/s3_sse_kms_utils.go
@@ -0,0 +1,99 @@
+package s3api
+
+import (
+ "context"
+ "crypto/aes"
+ "crypto/cipher"
+ "fmt"
+ "strings"
+
+ "github.com/seaweedfs/seaweedfs/weed/kms"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
+)
+
+// KMSDataKeyResult holds the result of data key generation
+type KMSDataKeyResult struct {
+ Response *kms.GenerateDataKeyResponse
+ Block cipher.Block
+}
+
+// generateKMSDataKey generates a new data encryption key using KMS
+// This function encapsulates the common pattern used across all SSE-KMS functions
+func generateKMSDataKey(keyID string, encryptionContext map[string]string) (*KMSDataKeyResult, error) {
+ // Validate keyID to prevent injection attacks and malformed requests to KMS service
+ if !isValidKMSKeyID(keyID) {
+ return nil, fmt.Errorf("invalid KMS key ID format: key ID must be non-empty, without spaces or control characters")
+ }
+
+ // Validate encryption context to prevent malformed requests to KMS service
+ if encryptionContext != nil {
+ for key, value := range encryptionContext {
+ // Validate context keys and values for basic security
+ if strings.TrimSpace(key) == "" {
+ return nil, fmt.Errorf("invalid encryption context: keys cannot be empty or whitespace-only")
+ }
+ if strings.ContainsAny(key, "\x00\n\r\t") || strings.ContainsAny(value, "\x00\n\r\t") {
+ return nil, fmt.Errorf("invalid encryption context: keys and values cannot contain control characters")
+ }
+ // AWS KMS has limits on key/value lengths
+ if len(key) > 2048 || len(value) > 2048 {
+ return nil, fmt.Errorf("invalid encryption context: keys and values must be ≤ 2048 characters (key=%d, value=%d)", len(key), len(value))
+ }
+ }
+ // AWS KMS has a limit on the total number of context pairs
+ if len(encryptionContext) > s3_constants.MaxKMSEncryptionContextPairs {
+ return nil, fmt.Errorf("invalid encryption context: cannot exceed %d key-value pairs, got %d", s3_constants.MaxKMSEncryptionContextPairs, len(encryptionContext))
+ }
+ }
+
+ // Get KMS provider
+ kmsProvider := kms.GetGlobalKMS()
+ if kmsProvider == nil {
+ return nil, fmt.Errorf("KMS is not configured")
+ }
+
+ // Create data key request
+ generateDataKeyReq := &kms.GenerateDataKeyRequest{
+ KeyID: keyID,
+ KeySpec: kms.KeySpecAES256,
+ EncryptionContext: encryptionContext,
+ }
+
+ // Generate the data key
+ dataKeyResp, err := kmsProvider.GenerateDataKey(context.Background(), generateDataKeyReq)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate KMS data key: %v", err)
+ }
+
+ // Create AES cipher with the plaintext data key
+ block, err := aes.NewCipher(dataKeyResp.Plaintext)
+ if err != nil {
+ // Clear sensitive data before returning error
+ kms.ClearSensitiveData(dataKeyResp.Plaintext)
+ return nil, fmt.Errorf("failed to create AES cipher: %v", err)
+ }
+
+ return &KMSDataKeyResult{
+ Response: dataKeyResp,
+ Block: block,
+ }, nil
+}
+
+// clearKMSDataKey safely clears sensitive data from a KMSDataKeyResult
+func clearKMSDataKey(result *KMSDataKeyResult) {
+ if result != nil && result.Response != nil {
+ kms.ClearSensitiveData(result.Response.Plaintext)
+ }
+}
+
+// createSSEKMSKey creates an SSEKMSKey struct from data key result and parameters
+func createSSEKMSKey(result *KMSDataKeyResult, encryptionContext map[string]string, bucketKeyEnabled bool, iv []byte, chunkOffset int64) *SSEKMSKey {
+ return &SSEKMSKey{
+ KeyID: result.Response.KeyID,
+ EncryptedDataKey: result.Response.CiphertextBlob,
+ EncryptionContext: encryptionContext,
+ BucketKeyEnabled: bucketKeyEnabled,
+ IV: iv,
+ ChunkOffset: chunkOffset,
+ }
+}
diff --git a/weed/s3api/s3_sse_multipart_test.go b/weed/s3api/s3_sse_multipart_test.go
index fa575e411..804e4ab4a 100644
--- a/weed/s3api/s3_sse_multipart_test.go
+++ b/weed/s3api/s3_sse_multipart_test.go
@@ -6,6 +6,8 @@ import (
"io"
"strings"
"testing"
+
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
)
// TestSSECMultipartUpload tests SSE-C with multipart uploads
@@ -306,8 +308,8 @@ func TestMultipartSSEMixedScenarios(t *testing.T) {
if len(encryptedData) != 0 {
t.Errorf("Expected empty encrypted data for empty part, got %d bytes", len(encryptedData))
}
- if len(iv) != AESBlockSize {
- t.Errorf("Expected IV of size %d, got %d", AESBlockSize, len(iv))
+ if len(iv) != s3_constants.AESBlockSize {
+ t.Errorf("Expected IV of size %d, got %d", s3_constants.AESBlockSize, len(iv))
}
// Decrypt and verify
diff --git a/weed/s3api/s3_sse_s3.go b/weed/s3api/s3_sse_s3.go
index fc95b73bd..6471e04fd 100644
--- a/weed/s3api/s3_sse_s3.go
+++ b/weed/s3api/s3_sse_s3.go
@@ -4,18 +4,20 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
+ "encoding/base64"
"encoding/json"
"fmt"
"io"
mathrand "math/rand"
"net/http"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
)
// SSE-S3 uses AES-256 encryption with server-managed keys
const (
- SSES3Algorithm = "AES256"
+ SSES3Algorithm = s3_constants.SSEAlgorithmAES256
SSES3KeySize = 32 // 256 bits
)
@@ -24,11 +26,20 @@ type SSES3Key struct {
Key []byte
KeyID string
Algorithm string
+ IV []byte // Initialization Vector for this key
}
// IsSSES3RequestInternal checks if the request specifies SSE-S3 encryption
func IsSSES3RequestInternal(r *http.Request) bool {
- return r.Header.Get(s3_constants.AmzServerSideEncryption) == SSES3Algorithm
+ sseHeader := r.Header.Get(s3_constants.AmzServerSideEncryption)
+ result := sseHeader == SSES3Algorithm
+
+ // Debug: log header detection for SSE-S3 requests
+ if result {
+ glog.V(4).Infof("SSE-S3 detection: method=%s, header=%q, expected=%q, result=%t, copySource=%q", r.Method, sseHeader, SSES3Algorithm, result, r.Header.Get("X-Amz-Copy-Source"))
+ }
+
+ return result
}
// IsSSES3EncryptedInternal checks if the object metadata indicates SSE-S3 encryption
@@ -103,6 +114,10 @@ func GetSSES3Headers() map[string]string {
// SerializeSSES3Metadata serializes SSE-S3 metadata for storage
func SerializeSSES3Metadata(key *SSES3Key) ([]byte, error) {
+ if err := ValidateSSES3Key(key); err != nil {
+ return nil, err
+ }
+
// For SSE-S3, we typically don't store the actual key in metadata
// Instead, we store a key ID or reference that can be used to retrieve the key
// from a secure key management system
@@ -112,12 +127,18 @@ func SerializeSSES3Metadata(key *SSES3Key) ([]byte, error) {
"keyId": key.KeyID,
}
- // In a production system, this would be more sophisticated
- // For now, we'll use a simple JSON-like format
- serialized := fmt.Sprintf(`{"algorithm":"%s","keyId":"%s"}`,
- metadata["algorithm"], metadata["keyId"])
+ // Include IV if present (needed for chunk-level decryption)
+ if key.IV != nil {
+ metadata["iv"] = base64.StdEncoding.EncodeToString(key.IV)
+ }
+
+ // Use JSON for proper serialization
+ data, err := json.Marshal(metadata)
+ if err != nil {
+ return nil, fmt.Errorf("marshal SSE-S3 metadata: %w", err)
+ }
- return []byte(serialized), nil
+ return data, nil
}
// DeserializeSSES3Metadata deserializes SSE-S3 metadata from storage and retrieves the actual key
@@ -139,7 +160,7 @@ func DeserializeSSES3Metadata(data []byte, keyManager *SSES3KeyManager) (*SSES3K
algorithm, exists := metadata["algorithm"]
if !exists {
- algorithm = "AES256" // Default algorithm
+ algorithm = s3_constants.SSEAlgorithmAES256 // Default algorithm
}
// Retrieve the actual key using the keyId
@@ -157,6 +178,15 @@ func DeserializeSSES3Metadata(data []byte, keyManager *SSES3KeyManager) (*SSES3K
return nil, fmt.Errorf("algorithm mismatch: expected %s, got %s", algorithm, key.Algorithm)
}
+ // Restore IV if present in metadata (for chunk-level decryption)
+ if ivStr, exists := metadata["iv"]; exists {
+ iv, err := base64.StdEncoding.DecodeString(ivStr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode IV: %w", err)
+ }
+ key.IV = iv
+ }
+
return key, nil
}
@@ -241,7 +271,7 @@ func ProcessSSES3Request(r *http.Request) (map[string][]byte, error) {
// Return metadata
metadata := map[string][]byte{
s3_constants.AmzServerSideEncryption: []byte(SSES3Algorithm),
- "sse-s3-key": keyData,
+ s3_constants.SeaweedFSSSES3Key: keyData,
}
return metadata, nil
@@ -249,10 +279,38 @@ func ProcessSSES3Request(r *http.Request) (map[string][]byte, error) {
// GetSSES3KeyFromMetadata extracts SSE-S3 key from object metadata
func GetSSES3KeyFromMetadata(metadata map[string][]byte, keyManager *SSES3KeyManager) (*SSES3Key, error) {
- keyData, exists := metadata["sse-s3-key"]
+ keyData, exists := metadata[s3_constants.SeaweedFSSSES3Key]
if !exists {
return nil, fmt.Errorf("SSE-S3 key not found in metadata")
}
return DeserializeSSES3Metadata(keyData, keyManager)
}
+
+// CreateSSES3EncryptedReaderWithBaseIV creates an encrypted reader using a base IV for multipart upload consistency.
+// The returned IV is the offset-derived IV, calculated from the input baseIV and offset.
+func CreateSSES3EncryptedReaderWithBaseIV(reader io.Reader, key *SSES3Key, baseIV []byte, offset int64) (io.Reader, []byte /* derivedIV */, error) {
+ // Validate key to prevent panics and security issues
+ if key == nil {
+ return nil, nil, fmt.Errorf("SSES3Key is nil")
+ }
+ if key.Key == nil || len(key.Key) != SSES3KeySize {
+ return nil, nil, fmt.Errorf("invalid SSES3Key: must be %d bytes, got %d", SSES3KeySize, len(key.Key))
+ }
+ if err := ValidateSSES3Key(key); err != nil {
+ return nil, nil, err
+ }
+
+ block, err := aes.NewCipher(key.Key)
+ if err != nil {
+ return nil, nil, fmt.Errorf("create AES cipher: %w", err)
+ }
+
+ // Calculate the proper IV with offset to ensure unique IV per chunk/part
+ // This prevents the severe security vulnerability of IV reuse in CTR mode
+ iv := calculateIVWithOffset(baseIV, offset)
+
+ stream := cipher.NewCTR(block, iv)
+ encryptedReader := &cipher.StreamReader{S: stream, R: reader}
+ return encryptedReader, iv, nil
+}
diff --git a/weed/s3api/s3_sse_utils.go b/weed/s3api/s3_sse_utils.go
new file mode 100644
index 000000000..848bc61ea
--- /dev/null
+++ b/weed/s3api/s3_sse_utils.go
@@ -0,0 +1,42 @@
+package s3api
+
+import "github.com/seaweedfs/seaweedfs/weed/glog"
+
+// calculateIVWithOffset calculates a unique IV by combining a base IV with an offset.
+// This ensures each chunk/part uses a unique IV, preventing CTR mode IV reuse vulnerabilities.
+// This function is shared between SSE-KMS and SSE-S3 implementations for consistency.
+func calculateIVWithOffset(baseIV []byte, offset int64) []byte {
+ if len(baseIV) != 16 {
+ glog.Errorf("Invalid base IV length: expected 16, got %d", len(baseIV))
+ return baseIV // Return original IV as fallback
+ }
+
+ // Create a copy of the base IV to avoid modifying the original
+ iv := make([]byte, 16)
+ copy(iv, baseIV)
+
+ // Calculate the block offset (AES block size is 16 bytes)
+ blockOffset := offset / 16
+ originalBlockOffset := blockOffset
+
+ // Add the block offset to the IV counter (last 8 bytes, big-endian)
+ // This matches how AES-CTR mode increments the counter
+ // Process from least significant byte (index 15) to most significant byte (index 8)
+ carry := uint64(0)
+ for i := 15; i >= 8; i-- {
+ sum := uint64(iv[i]) + uint64(blockOffset&0xFF) + carry
+ iv[i] = byte(sum & 0xFF)
+ carry = sum >> 8
+ blockOffset = blockOffset >> 8
+
+ // If no more blockOffset bits and no carry, we can stop early
+ if blockOffset == 0 && carry == 0 {
+ break
+ }
+ }
+
+ // Single consolidated debug log to avoid performance impact in high-throughput scenarios
+ glog.V(4).Infof("calculateIVWithOffset: baseIV=%x, offset=%d, blockOffset=%d, derivedIV=%x",
+ baseIV, offset, originalBlockOffset, iv)
+ return iv
+}
diff --git a/weed/s3api/s3_validation_utils.go b/weed/s3api/s3_validation_utils.go
new file mode 100644
index 000000000..da53342b1
--- /dev/null
+++ b/weed/s3api/s3_validation_utils.go
@@ -0,0 +1,75 @@
+package s3api
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
+)
+
+// isValidKMSKeyID performs basic validation of KMS key identifiers.
+// Following Minio's approach: be permissive and accept any reasonable key format.
+// Only reject keys with leading/trailing spaces or other obvious issues.
+//
+// This function is used across multiple S3 API handlers to ensure consistent
+// validation of KMS key IDs in various contexts (bucket encryption, object operations, etc.).
+func isValidKMSKeyID(keyID string) bool {
+ // Reject empty keys
+ if keyID == "" {
+ return false
+ }
+
+ // Following Minio's validation: reject keys with leading/trailing spaces
+ if strings.HasPrefix(keyID, " ") || strings.HasSuffix(keyID, " ") {
+ return false
+ }
+
+ // Also reject keys with internal spaces (common sense validation)
+ if strings.Contains(keyID, " ") {
+ return false
+ }
+
+ // Reject keys with control characters or newlines
+ if strings.ContainsAny(keyID, "\t\n\r\x00") {
+ return false
+ }
+
+ // Accept any reasonable length key (be permissive for various KMS providers)
+ if len(keyID) > 0 && len(keyID) <= s3_constants.MaxKMSKeyIDLength {
+ return true
+ }
+
+ return false
+}
+
+// ValidateIV validates that an initialization vector has the correct length for AES encryption
+func ValidateIV(iv []byte, name string) error {
+ if len(iv) != s3_constants.AESBlockSize {
+ return fmt.Errorf("invalid %s length: expected %d bytes, got %d", name, s3_constants.AESBlockSize, len(iv))
+ }
+ return nil
+}
+
+// ValidateSSEKMSKey validates that an SSE-KMS key is not nil and has required fields
+func ValidateSSEKMSKey(sseKey *SSEKMSKey) error {
+ if sseKey == nil {
+ return fmt.Errorf("SSE-KMS key cannot be nil")
+ }
+ return nil
+}
+
+// ValidateSSECKey validates that an SSE-C key is not nil
+func ValidateSSECKey(customerKey *SSECustomerKey) error {
+ if customerKey == nil {
+ return fmt.Errorf("SSE-C customer key cannot be nil")
+ }
+ return nil
+}
+
+// ValidateSSES3Key validates that an SSE-S3 key is not nil
+func ValidateSSES3Key(sseKey *SSES3Key) error {
+ if sseKey == nil {
+ return fmt.Errorf("SSE-S3 key cannot be nil")
+ }
+ return nil
+}
diff --git a/weed/s3api/s3api_bucket_skip_handlers.go b/weed/s3api/s3api_bucket_skip_handlers.go
index fbc93883b..8dc4cb460 100644
--- a/weed/s3api/s3api_bucket_skip_handlers.go
+++ b/weed/s3api/s3api_bucket_skip_handlers.go
@@ -3,8 +3,6 @@ package s3api
import (
"net/http"
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
)
@@ -27,26 +25,8 @@ func (s3a *S3ApiServer) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http
}
// GetBucketEncryptionHandler Returns the default encryption configuration
-// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html
-func (s3a *S3ApiServer) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
- bucket, _ := s3_constants.GetBucketAndObject(r)
- glog.V(3).Infof("GetBucketEncryption %s", bucket)
-
- if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
- s3err.WriteErrorResponse(w, r, err)
- return
- }
-
- s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
-}
-
-func (s3a *S3ApiServer) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
- s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
-}
-
-func (s3a *S3ApiServer) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
- s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
-}
+// GetBucketEncryption, PutBucketEncryption, DeleteBucketEncryption
+// These handlers are now implemented in s3_bucket_encryption.go
// GetPublicAccessBlockHandler Retrieves the PublicAccessBlock configuration for an S3 bucket
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html
diff --git a/weed/s3api/s3api_copy_size_calculation.go b/weed/s3api/s3api_copy_size_calculation.go
index 74a05f6c1..a11c46cdf 100644
--- a/weed/s3api/s3api_copy_size_calculation.go
+++ b/weed/s3api/s3api_copy_size_calculation.go
@@ -4,6 +4,7 @@ import (
"net/http"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
)
// CopySizeCalculator handles size calculations for different copy scenarios
@@ -174,11 +175,11 @@ func (e EncryptionType) String() string {
case EncryptionTypeNone:
return "None"
case EncryptionTypeSSEC:
- return "SSE-C"
+ return s3_constants.SSETypeC
case EncryptionTypeSSEKMS:
- return "SSE-KMS"
+ return s3_constants.SSETypeKMS
case EncryptionTypeSSES3:
- return "SSE-S3"
+ return s3_constants.SSETypeS3
default:
return "Unknown"
}
diff --git a/weed/s3api/s3api_key_rotation.go b/weed/s3api/s3api_key_rotation.go
index 682f47807..e8d29ff7a 100644
--- a/weed/s3api/s3api_key_rotation.go
+++ b/weed/s3api/s3api_key_rotation.go
@@ -116,7 +116,7 @@ func (s3a *S3ApiServer) rotateSSECChunks(entry *filer_pb.Entry, sourceKey, destK
}
// Generate new IV for the destination and store it in entry metadata
- newIV := make([]byte, AESBlockSize)
+ newIV := make([]byte, s3_constants.AESBlockSize)
if _, err := io.ReadFull(rand.Reader, newIV); err != nil {
return nil, fmt.Errorf("generate new IV: %w", err)
}
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
index 140ee7a42..25647538b 100644
--- a/weed/s3api/s3api_object_handlers.go
+++ b/weed/s3api/s3api_object_handlers.go
@@ -340,7 +340,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)
if objectEntry, err := s3a.getEntry("", objectPath); err == nil {
primarySSEType := s3a.detectPrimarySSEType(objectEntry)
- if primarySSEType == "SSE-C" || primarySSEType == "SSE-KMS" {
+ if primarySSEType == s3_constants.SSETypeC || primarySSEType == s3_constants.SSETypeKMS {
sseObject = true
// Temporarily remove Range header to get full encrypted data from filer
r.Header.Del("Range")
@@ -810,20 +810,20 @@ func (s3a *S3ApiServer) handleSSEResponse(r *http.Request, proxyResponse *http.R
}
// Route based on ACTUAL object type (from chunks) rather than conflicting headers
- if actualObjectType == "SSE-C" && clientExpectsSSEC {
+ if actualObjectType == s3_constants.SSETypeC && clientExpectsSSEC {
// Object is SSE-C and client expects SSE-C → SSE-C handler
return s3a.handleSSECResponse(r, proxyResponse, w)
- } else if actualObjectType == "SSE-KMS" && !clientExpectsSSEC {
+ } else if actualObjectType == s3_constants.SSETypeKMS && !clientExpectsSSEC {
// Object is SSE-KMS and client doesn't expect SSE-C → SSE-KMS handler
return s3a.handleSSEKMSResponse(r, proxyResponse, w, kmsMetadataHeader)
} else if actualObjectType == "None" && !clientExpectsSSEC {
// Object is unencrypted and client doesn't expect SSE-C → pass through
return passThroughResponse(proxyResponse, w)
- } else if actualObjectType == "SSE-C" && !clientExpectsSSEC {
+ } else if actualObjectType == s3_constants.SSETypeC && !clientExpectsSSEC {
// Object is SSE-C but client doesn't provide SSE-C headers → Error
s3err.WriteErrorResponse(w, r, s3err.ErrSSECustomerKeyMissing)
return http.StatusBadRequest, 0
- } else if actualObjectType == "SSE-KMS" && clientExpectsSSEC {
+ } else if actualObjectType == s3_constants.SSETypeKMS && clientExpectsSSEC {
// Object is SSE-KMS but client provides SSE-C headers → Error
s3err.WriteErrorResponse(w, r, s3err.ErrSSECustomerKeyMissing)
return http.StatusBadRequest, 0
@@ -888,7 +888,7 @@ func (s3a *S3ApiServer) handleSSEKMSResponse(r *http.Request, proxyResponse *htt
// Check for multipart SSE-KMS
sseKMSChunks := 0
for _, chunk := range entry.GetChunks() {
- if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS && len(chunk.GetSseKmsMetadata()) > 0 {
+ if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS && len(chunk.GetSseMetadata()) > 0 {
sseKMSChunks++
}
}
@@ -999,7 +999,7 @@ func (s3a *S3ApiServer) addSSEHeadersToResponse(proxyResponse *http.Response, en
// Only set headers for the PRIMARY encryption type
switch primarySSEType {
- case "SSE-C":
+ case s3_constants.SSETypeC:
// Add only SSE-C headers
if algorithmBytes, exists := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm]; exists && len(algorithmBytes) > 0 {
proxyResponse.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, string(algorithmBytes))
@@ -1014,7 +1014,7 @@ func (s3a *S3ApiServer) addSSEHeadersToResponse(proxyResponse *http.Response, en
proxyResponse.Header.Set(s3_constants.SeaweedFSSSEIVHeader, ivBase64)
}
- case "SSE-KMS":
+ case s3_constants.SSETypeKMS:
// Add only SSE-KMS headers
if sseAlgorithm, exists := entry.Extended[s3_constants.AmzServerSideEncryption]; exists && len(sseAlgorithm) > 0 {
proxyResponse.Header.Set(s3_constants.AmzServerSideEncryption, string(sseAlgorithm))
@@ -1039,18 +1039,18 @@ func (s3a *S3ApiServer) detectPrimarySSEType(entry *filer_pb.Entry) string {
hasSSEKMS := entry.Extended[s3_constants.AmzServerSideEncryption] != nil
if hasSSEC && !hasSSEKMS {
- return "SSE-C"
+ return s3_constants.SSETypeC
} else if hasSSEKMS && !hasSSEC {
- return "SSE-KMS"
+ return s3_constants.SSETypeKMS
} else if hasSSEC && hasSSEKMS {
// Both present - this should only happen during cross-encryption copies
// Use content to determine actual encryption state
if len(entry.Content) > 0 {
// smallContent - check if it's encrypted (heuristic: random-looking data)
- return "SSE-C" // Default to SSE-C for mixed case
+ return s3_constants.SSETypeC // Default to SSE-C for mixed case
} else {
// No content, both headers - default to SSE-C
- return "SSE-C"
+ return s3_constants.SSETypeC
}
}
return "None"
@@ -1071,12 +1071,12 @@ func (s3a *S3ApiServer) detectPrimarySSEType(entry *filer_pb.Entry) string {
// Primary type is the one with more chunks
if ssecChunks > ssekmsChunks {
- return "SSE-C"
+ return s3_constants.SSETypeC
} else if ssekmsChunks > ssecChunks {
- return "SSE-KMS"
+ return s3_constants.SSETypeKMS
} else if ssecChunks > 0 {
// Equal number, prefer SSE-C (shouldn't happen in practice)
- return "SSE-C"
+ return s3_constants.SSETypeC
}
return "None"
@@ -1117,9 +1117,9 @@ func (s3a *S3ApiServer) createMultipartSSEKMSDecryptedReader(r *http.Request, pr
var chunkSSEKMSKey *SSEKMSKey
// Check if this chunk has per-chunk SSE-KMS metadata (new architecture)
- if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS && len(chunk.GetSseKmsMetadata()) > 0 {
+ if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS && len(chunk.GetSseMetadata()) > 0 {
// Use the per-chunk SSE-KMS metadata
- kmsKey, err := DeserializeSSEKMSMetadata(chunk.GetSseKmsMetadata())
+ kmsKey, err := DeserializeSSEKMSMetadata(chunk.GetSseMetadata())
if err != nil {
glog.Errorf("Failed to deserialize per-chunk SSE-KMS metadata for chunk %s: %v", chunk.GetFileIdString(), err)
} else {
@@ -1356,9 +1356,9 @@ func (s3a *S3ApiServer) createMultipartSSECDecryptedReader(r *http.Request, prox
if chunk.GetSseType() == filer_pb.SSEType_SSE_C {
// For SSE-C chunks, extract the IV from the stored per-chunk metadata (unified approach)
- if len(chunk.GetSseKmsMetadata()) > 0 {
+ if len(chunk.GetSseMetadata()) > 0 {
// Deserialize the SSE-C metadata stored in the unified metadata field
- ssecMetadata, decErr := DeserializeSSECMetadata(chunk.GetSseKmsMetadata())
+ ssecMetadata, decErr := DeserializeSSECMetadata(chunk.GetSseMetadata())
if decErr != nil {
return nil, fmt.Errorf("failed to deserialize SSE-C metadata for chunk %s: %v", chunk.GetFileIdString(), decErr)
}
diff --git a/weed/s3api/s3api_object_handlers_copy.go b/weed/s3api/s3api_object_handlers_copy.go
index 3876ed261..9c044bad9 100644
--- a/weed/s3api/s3api_object_handlers_copy.go
+++ b/weed/s3api/s3api_object_handlers_copy.go
@@ -172,51 +172,18 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
// If we're doing cross-encryption, skip conflicting headers
if len(entry.GetChunks()) > 0 {
- // Detect if this is a cross-encryption copy by checking request headers
+ // Detect source and destination encryption types
srcHasSSEC := IsSSECEncrypted(entry.Extended)
srcHasSSEKMS := IsSSEKMSEncrypted(entry.Extended)
+ srcHasSSES3 := IsSSES3EncryptedInternal(entry.Extended)
dstWantsSSEC := IsSSECRequest(r)
dstWantsSSEKMS := IsSSEKMSRequest(r)
+ dstWantsSSES3 := IsSSES3RequestInternal(r)
- // SSE-KMS → SSE-C: skip ALL SSE-KMS headers
- if srcHasSSEKMS && dstWantsSSEC {
- if k == s3_constants.AmzServerSideEncryption ||
- k == s3_constants.AmzServerSideEncryptionAwsKmsKeyId ||
- k == s3_constants.SeaweedFSSSEKMSKey ||
- k == s3_constants.SeaweedFSSSEKMSKeyID ||
- k == s3_constants.SeaweedFSSSEKMSEncryption ||
- k == s3_constants.SeaweedFSSSEKMSBucketKeyEnabled ||
- k == s3_constants.SeaweedFSSSEKMSEncryptionContext ||
- k == s3_constants.SeaweedFSSSEKMSBaseIV {
- skipHeader = true
- }
- }
-
- // SSE-C → SSE-KMS: skip ALL SSE-C headers
- if srcHasSSEC && dstWantsSSEKMS {
- if k == s3_constants.AmzServerSideEncryptionCustomerAlgorithm ||
- k == s3_constants.AmzServerSideEncryptionCustomerKeyMD5 ||
- k == s3_constants.SeaweedFSSSEIV {
- skipHeader = true
- }
- }
-
- // Encrypted → Unencrypted: skip ALL encryption headers
- if (srcHasSSEKMS || srcHasSSEC) && !dstWantsSSEC && !dstWantsSSEKMS {
- if k == s3_constants.AmzServerSideEncryption ||
- k == s3_constants.AmzServerSideEncryptionAwsKmsKeyId ||
- k == s3_constants.AmzServerSideEncryptionCustomerAlgorithm ||
- k == s3_constants.AmzServerSideEncryptionCustomerKeyMD5 ||
- k == s3_constants.SeaweedFSSSEKMSKey ||
- k == s3_constants.SeaweedFSSSEKMSKeyID ||
- k == s3_constants.SeaweedFSSSEKMSEncryption ||
- k == s3_constants.SeaweedFSSSEKMSBucketKeyEnabled ||
- k == s3_constants.SeaweedFSSSEKMSEncryptionContext ||
- k == s3_constants.SeaweedFSSSEKMSBaseIV ||
- k == s3_constants.SeaweedFSSSEIV {
- skipHeader = true
- }
- }
+ // Use helper function to determine if header should be skipped
+ skipHeader = shouldSkipEncryptionHeader(k,
+ srcHasSSEC, srcHasSSEKMS, srcHasSSES3,
+ dstWantsSSEC, dstWantsSSEKMS, dstWantsSSES3)
}
if !skipHeader {
@@ -435,8 +402,8 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
glog.V(3).Infof("CopyObjectPartHandler %s %s => %s part %d upload %s", srcBucket, srcObject, dstBucket, partID, uploadID)
// check partID with maximum part ID for multipart objects
- if partID > globalMaxPartID {
- s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
+ if partID > s3_constants.MaxS3MultipartParts {
+ s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
return
}
@@ -1284,12 +1251,12 @@ func (s3a *S3ApiServer) copyMultipartSSEKMSChunk(chunk *filer_pb.FileChunk, dest
var finalData []byte
// Decrypt source data using stored SSE-KMS metadata (same pattern as SSE-C)
- if len(chunk.GetSseKmsMetadata()) == 0 {
+ if len(chunk.GetSseMetadata()) == 0 {
return nil, fmt.Errorf("SSE-KMS chunk missing per-chunk metadata")
}
// Deserialize the SSE-KMS metadata (reusing unified metadata structure)
- sourceSSEKey, err := DeserializeSSEKMSMetadata(chunk.GetSseKmsMetadata())
+ sourceSSEKey, err := DeserializeSSEKMSMetadata(chunk.GetSseMetadata())
if err != nil {
return nil, fmt.Errorf("failed to deserialize SSE-KMS metadata: %w", err)
}
@@ -1337,7 +1304,7 @@ func (s3a *S3ApiServer) copyMultipartSSEKMSChunk(chunk *filer_pb.FileChunk, dest
// Set the SSE type and metadata on destination chunk (unified approach)
dstChunk.SseType = filer_pb.SSEType_SSE_KMS
- dstChunk.SseKmsMetadata = kmsMetadata
+ dstChunk.SseMetadata = kmsMetadata
glog.V(4).Infof("Re-encrypted multipart SSE-KMS chunk: %d bytes → %d bytes", len(finalData)-len(reencryptedData)+len(finalData), len(finalData))
}
@@ -1384,12 +1351,12 @@ func (s3a *S3ApiServer) copyMultipartSSECChunk(chunk *filer_pb.FileChunk, copySo
// Decrypt if source is encrypted
if copySourceKey != nil {
// Get the per-chunk SSE-C metadata
- if len(chunk.GetSseKmsMetadata()) == 0 {
+ if len(chunk.GetSseMetadata()) == 0 {
return nil, nil, fmt.Errorf("SSE-C chunk missing per-chunk metadata")
}
// Deserialize the SSE-C metadata
- ssecMetadata, err := DeserializeSSECMetadata(chunk.GetSseKmsMetadata())
+ ssecMetadata, err := DeserializeSSECMetadata(chunk.GetSseMetadata())
if err != nil {
return nil, nil, fmt.Errorf("failed to deserialize SSE-C metadata: %w", err)
}
@@ -1428,7 +1395,7 @@ func (s3a *S3ApiServer) copyMultipartSSECChunk(chunk *filer_pb.FileChunk, copySo
// Re-encrypt if destination should be encrypted
if destKey != nil {
// Generate new IV for this chunk
- newIV := make([]byte, AESBlockSize)
+ newIV := make([]byte, s3_constants.AESBlockSize)
if _, err := rand.Read(newIV); err != nil {
return nil, nil, fmt.Errorf("generate IV: %w", err)
}
@@ -1455,7 +1422,7 @@ func (s3a *S3ApiServer) copyMultipartSSECChunk(chunk *filer_pb.FileChunk, copySo
// Set the SSE type and metadata on destination chunk
dstChunk.SseType = filer_pb.SSEType_SSE_C
- dstChunk.SseKmsMetadata = ssecMetadata // Use unified metadata field
+ dstChunk.SseMetadata = ssecMetadata // Use unified metadata field
glog.V(4).Infof("Re-encrypted multipart SSE-C chunk: %d bytes → %d bytes", len(finalData)-len(reencryptedData)+len(finalData), len(finalData))
}
@@ -1556,8 +1523,8 @@ func (s3a *S3ApiServer) copyMultipartCrossEncryption(entry *filer_pb.Entry, r *h
if state.DstSSEC && destSSECKey != nil {
// For SSE-C destination, use first chunk's IV for compatibility
- if len(dstChunks) > 0 && dstChunks[0].GetSseType() == filer_pb.SSEType_SSE_C && len(dstChunks[0].GetSseKmsMetadata()) > 0 {
- if ssecMetadata, err := DeserializeSSECMetadata(dstChunks[0].GetSseKmsMetadata()); err == nil {
+ if len(dstChunks) > 0 && dstChunks[0].GetSseType() == filer_pb.SSEType_SSE_C && len(dstChunks[0].GetSseMetadata()) > 0 {
+ if ssecMetadata, err := DeserializeSSECMetadata(dstChunks[0].GetSseMetadata()); err == nil {
if iv, ivErr := base64.StdEncoding.DecodeString(ssecMetadata.IV); ivErr == nil {
StoreIVInMetadata(dstMetadata, iv)
dstMetadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte("AES256")
@@ -1615,11 +1582,11 @@ func (s3a *S3ApiServer) copyCrossEncryptionChunk(chunk *filer_pb.FileChunk, sour
// Step 1: Decrypt source data
if chunk.GetSseType() == filer_pb.SSEType_SSE_C {
// Decrypt SSE-C source
- if len(chunk.GetSseKmsMetadata()) == 0 {
+ if len(chunk.GetSseMetadata()) == 0 {
return nil, fmt.Errorf("SSE-C chunk missing per-chunk metadata")
}
- ssecMetadata, err := DeserializeSSECMetadata(chunk.GetSseKmsMetadata())
+ ssecMetadata, err := DeserializeSSECMetadata(chunk.GetSseMetadata())
if err != nil {
return nil, fmt.Errorf("failed to deserialize SSE-C metadata: %w", err)
}
@@ -1654,11 +1621,11 @@ func (s3a *S3ApiServer) copyCrossEncryptionChunk(chunk *filer_pb.FileChunk, sour
} else if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS {
// Decrypt SSE-KMS source
- if len(chunk.GetSseKmsMetadata()) == 0 {
+ if len(chunk.GetSseMetadata()) == 0 {
return nil, fmt.Errorf("SSE-KMS chunk missing per-chunk metadata")
}
- sourceSSEKey, err := DeserializeSSEKMSMetadata(chunk.GetSseKmsMetadata())
+ sourceSSEKey, err := DeserializeSSEKMSMetadata(chunk.GetSseMetadata())
if err != nil {
return nil, fmt.Errorf("failed to deserialize SSE-KMS metadata: %w", err)
}
@@ -1704,7 +1671,7 @@ func (s3a *S3ApiServer) copyCrossEncryptionChunk(chunk *filer_pb.FileChunk, sour
}
dstChunk.SseType = filer_pb.SSEType_SSE_C
- dstChunk.SseKmsMetadata = ssecMetadata
+ dstChunk.SseMetadata = ssecMetadata
previewLen := 16
if len(finalData) < previewLen {
@@ -1736,7 +1703,7 @@ func (s3a *S3ApiServer) copyCrossEncryptionChunk(chunk *filer_pb.FileChunk, sour
}
dstChunk.SseType = filer_pb.SSEType_SSE_KMS
- dstChunk.SseKmsMetadata = kmsMetadata
+ dstChunk.SseMetadata = kmsMetadata
glog.V(4).Infof("Re-encrypted chunk with SSE-KMS")
}
@@ -1759,11 +1726,11 @@ func (s3a *S3ApiServer) copyCrossEncryptionChunk(chunk *filer_pb.FileChunk, sour
// getEncryptionTypeString returns a string representation of encryption type for logging
func (s3a *S3ApiServer) getEncryptionTypeString(isSSEC, isSSEKMS, isSSES3 bool) string {
if isSSEC {
- return "SSE-C"
+ return s3_constants.SSETypeC
} else if isSSEKMS {
- return "SSE-KMS"
+ return s3_constants.SSETypeKMS
} else if isSSES3 {
- return "SSE-S3"
+ return s3_constants.SSETypeS3
}
return "Plain"
}
@@ -1790,7 +1757,7 @@ func (s3a *S3ApiServer) copyChunksWithSSEC(entry *filer_pb.Entry, r *http.Reques
isMultipartSSEC := false
sseCChunks := 0
for i, chunk := range entry.GetChunks() {
- glog.V(4).Infof("Chunk %d: sseType=%d, hasMetadata=%t", i, chunk.GetSseType(), len(chunk.GetSseKmsMetadata()) > 0)
+ glog.V(4).Infof("Chunk %d: sseType=%d, hasMetadata=%t", i, chunk.GetSseType(), len(chunk.GetSseMetadata()) > 0)
if chunk.GetSseType() == filer_pb.SSEType_SSE_C {
sseCChunks++
}
@@ -1859,7 +1826,7 @@ func (s3a *S3ApiServer) copyChunksWithReencryption(entry *filer_pb.Entry, copySo
// Generate a single IV for the destination object (if destination is encrypted)
var destIV []byte
if destKey != nil {
- destIV = make([]byte, AESBlockSize)
+ destIV = make([]byte, s3_constants.AESBlockSize)
if _, err := io.ReadFull(rand.Reader, destIV); err != nil {
return nil, nil, fmt.Errorf("failed to generate destination IV: %w", err)
}
@@ -1978,7 +1945,7 @@ func (s3a *S3ApiServer) copyChunksWithSSEKMS(entry *filer_pb.Entry, r *http.Requ
isMultipartSSEKMS := false
sseKMSChunks := 0
for i, chunk := range entry.GetChunks() {
- glog.V(4).Infof("Chunk %d: sseType=%d, hasKMSMetadata=%t", i, chunk.GetSseType(), len(chunk.GetSseKmsMetadata()) > 0)
+ glog.V(4).Infof("Chunk %d: sseType=%d, hasKMSMetadata=%t", i, chunk.GetSseType(), len(chunk.GetSseMetadata()) > 0)
if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS {
sseKMSChunks++
}
@@ -2201,3 +2168,123 @@ func getKeyIDString(key *SSEKMSKey) string {
}
return key.KeyID
}
+
+// EncryptionHeaderContext holds encryption type information and header classifications
+type EncryptionHeaderContext struct {
+ SrcSSEC, SrcSSEKMS, SrcSSES3 bool
+ DstSSEC, DstSSEKMS, DstSSES3 bool
+ IsSSECHeader, IsSSEKMSHeader, IsSSES3Header bool
+}
+
+// newEncryptionHeaderContext creates a context for encryption header processing
+func newEncryptionHeaderContext(headerKey string, srcSSEC, srcSSEKMS, srcSSES3, dstSSEC, dstSSEKMS, dstSSES3 bool) *EncryptionHeaderContext {
+ return &EncryptionHeaderContext{
+ SrcSSEC: srcSSEC, SrcSSEKMS: srcSSEKMS, SrcSSES3: srcSSES3,
+ DstSSEC: dstSSEC, DstSSEKMS: dstSSEKMS, DstSSES3: dstSSES3,
+ IsSSECHeader: isSSECHeader(headerKey),
+ IsSSEKMSHeader: isSSEKMSHeader(headerKey, srcSSEKMS, dstSSEKMS),
+ IsSSES3Header: isSSES3Header(headerKey, srcSSES3, dstSSES3),
+ }
+}
+
+// isSSECHeader checks if the header is SSE-C specific
+func isSSECHeader(headerKey string) bool {
+ return headerKey == s3_constants.AmzServerSideEncryptionCustomerAlgorithm ||
+ headerKey == s3_constants.AmzServerSideEncryptionCustomerKeyMD5 ||
+ headerKey == s3_constants.SeaweedFSSSEIV
+}
+
+// isSSEKMSHeader checks if the header is SSE-KMS specific
+func isSSEKMSHeader(headerKey string, srcSSEKMS, dstSSEKMS bool) bool {
+ return (headerKey == s3_constants.AmzServerSideEncryption && (srcSSEKMS || dstSSEKMS)) ||
+ headerKey == s3_constants.AmzServerSideEncryptionAwsKmsKeyId ||
+ headerKey == s3_constants.SeaweedFSSSEKMSKey ||
+ headerKey == s3_constants.SeaweedFSSSEKMSKeyID ||
+ headerKey == s3_constants.SeaweedFSSSEKMSEncryption ||
+ headerKey == s3_constants.SeaweedFSSSEKMSBucketKeyEnabled ||
+ headerKey == s3_constants.SeaweedFSSSEKMSEncryptionContext ||
+ headerKey == s3_constants.SeaweedFSSSEKMSBaseIV
+}
+
+// isSSES3Header checks if the header is SSE-S3 specific
+func isSSES3Header(headerKey string, srcSSES3, dstSSES3 bool) bool {
+ return (headerKey == s3_constants.AmzServerSideEncryption && (srcSSES3 || dstSSES3)) ||
+ headerKey == s3_constants.SeaweedFSSSES3Key ||
+ headerKey == s3_constants.SeaweedFSSSES3Encryption ||
+ headerKey == s3_constants.SeaweedFSSSES3BaseIV ||
+ headerKey == s3_constants.SeaweedFSSSES3KeyData
+}
+
+// shouldSkipCrossEncryptionHeader handles cross-encryption copy scenarios
+func (ctx *EncryptionHeaderContext) shouldSkipCrossEncryptionHeader() bool {
+ // SSE-C to SSE-KMS: skip SSE-C headers
+ if ctx.SrcSSEC && ctx.DstSSEKMS && ctx.IsSSECHeader {
+ return true
+ }
+
+ // SSE-KMS to SSE-C: skip SSE-KMS headers
+ if ctx.SrcSSEKMS && ctx.DstSSEC && ctx.IsSSEKMSHeader {
+ return true
+ }
+
+ // SSE-C to SSE-S3: skip SSE-C headers
+ if ctx.SrcSSEC && ctx.DstSSES3 && ctx.IsSSECHeader {
+ return true
+ }
+
+ // SSE-S3 to SSE-C: skip SSE-S3 headers
+ if ctx.SrcSSES3 && ctx.DstSSEC && ctx.IsSSES3Header {
+ return true
+ }
+
+ // SSE-KMS to SSE-S3: skip SSE-KMS headers
+ if ctx.SrcSSEKMS && ctx.DstSSES3 && ctx.IsSSEKMSHeader {
+ return true
+ }
+
+ // SSE-S3 to SSE-KMS: skip SSE-S3 headers
+ if ctx.SrcSSES3 && ctx.DstSSEKMS && ctx.IsSSES3Header {
+ return true
+ }
+
+ return false
+}
+
+// shouldSkipEncryptedToUnencryptedHeader handles encrypted to unencrypted copy scenarios
+func (ctx *EncryptionHeaderContext) shouldSkipEncryptedToUnencryptedHeader() bool {
+ // Skip all encryption headers when copying from encrypted to unencrypted
+ hasSourceEncryption := ctx.SrcSSEC || ctx.SrcSSEKMS || ctx.SrcSSES3
+ hasDestinationEncryption := ctx.DstSSEC || ctx.DstSSEKMS || ctx.DstSSES3
+ isAnyEncryptionHeader := ctx.IsSSECHeader || ctx.IsSSEKMSHeader || ctx.IsSSES3Header
+
+ return hasSourceEncryption && !hasDestinationEncryption && isAnyEncryptionHeader
+}
+
+// shouldSkipEncryptionHeader determines if a header should be skipped when copying extended attributes
+// based on the source and destination encryption types. This consolidates the repetitive logic for
+// filtering encryption-related headers during copy operations.
+func shouldSkipEncryptionHeader(headerKey string,
+ srcSSEC, srcSSEKMS, srcSSES3 bool,
+ dstSSEC, dstSSEKMS, dstSSES3 bool) bool {
+
+ // Create context to reduce complexity and improve testability
+ ctx := newEncryptionHeaderContext(headerKey, srcSSEC, srcSSEKMS, srcSSES3, dstSSEC, dstSSEKMS, dstSSES3)
+
+ // If it's not an encryption header, don't skip it
+ if !ctx.IsSSECHeader && !ctx.IsSSEKMSHeader && !ctx.IsSSES3Header {
+ return false
+ }
+
+ // Handle cross-encryption scenarios (different encryption types)
+ if ctx.shouldSkipCrossEncryptionHeader() {
+ return true
+ }
+
+ // Handle encrypted to unencrypted scenarios
+ if ctx.shouldSkipEncryptedToUnencryptedHeader() {
+ return true
+ }
+
+ // Default: don't skip the header
+ return false
+}
diff --git a/weed/s3api/s3api_object_handlers_multipart.go b/weed/s3api/s3api_object_handlers_multipart.go
index 0d6870f56..cee8f6785 100644
--- a/weed/s3api/s3api_object_handlers_multipart.go
+++ b/weed/s3api/s3api_object_handlers_multipart.go
@@ -29,7 +29,6 @@ const (
maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse.
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
- globalMaxPartID = 100000
)
// NewMultipartUploadHandler - New multipart upload.
@@ -290,8 +289,12 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
return
}
- if partID > globalMaxPartID {
- s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
+ if partID > s3_constants.MaxS3MultipartParts {
+ s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
+ return
+ }
+ if partID < 1 {
+ s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
return
}
@@ -375,6 +378,13 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
r.Header.Set(s3_constants.SeaweedFSSSEKMSBaseIVHeader, base64.StdEncoding.EncodeToString(baseIV))
glog.Infof("PutObjectPartHandler: inherited SSE-KMS settings from upload %s, keyID %s - letting putToFiler handle encryption", uploadID, keyID)
+ } else {
+ // Check if this upload uses SSE-S3
+ if err := s3a.handleSSES3MultipartHeaders(r, uploadEntry, uploadID); err != nil {
+ glog.Errorf("Failed to setup SSE-S3 multipart headers: %v", err)
+ s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
+ return
+ }
}
}
} else {
@@ -389,7 +399,7 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
}
destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)
- etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, destination, bucket)
+ etag, errCode, _ := s3a.putToFiler(r, uploadUrl, dataReader, destination, bucket, partID)
if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
return
@@ -480,3 +490,47 @@ type CompletedPart struct {
ETag string
PartNumber int
}
+
+// handleSSES3MultipartHeaders handles SSE-S3 multipart upload header setup to reduce nesting complexity
+func (s3a *S3ApiServer) handleSSES3MultipartHeaders(r *http.Request, uploadEntry *filer_pb.Entry, uploadID string) error {
+ glog.Infof("PutObjectPartHandler: checking for SSE-S3 settings in extended metadata")
+ if encryptionTypeBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSES3Encryption]; exists && string(encryptionTypeBytes) == s3_constants.SSEAlgorithmAES256 {
+ glog.Infof("PutObjectPartHandler: found SSE-S3 encryption type, setting up headers")
+
+ // Set SSE-S3 headers to indicate server-side encryption
+ r.Header.Set(s3_constants.AmzServerSideEncryption, s3_constants.SSEAlgorithmAES256)
+
+ // Retrieve and set base IV for consistent multipart encryption - REQUIRED for security
+ var baseIV []byte
+ if baseIVBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSES3BaseIV]; exists {
+ // Decode the base64 encoded base IV
+ decodedIV, decodeErr := base64.StdEncoding.DecodeString(string(baseIVBytes))
+ if decodeErr != nil {
+ return fmt.Errorf("failed to decode base IV for SSE-S3 multipart upload %s: %v", uploadID, decodeErr)
+ }
+ if len(decodedIV) != s3_constants.AESBlockSize {
+ return fmt.Errorf("invalid base IV length for SSE-S3 multipart upload %s: expected %d bytes, got %d", uploadID, s3_constants.AESBlockSize, len(decodedIV))
+ }
+ baseIV = decodedIV
+ glog.V(4).Infof("Using stored base IV %x for SSE-S3 multipart upload %s", baseIV[:8], uploadID)
+ } else {
+ return fmt.Errorf("no base IV found for SSE-S3 multipart upload %s - required for encryption consistency", uploadID)
+ }
+
+ // Retrieve and set key data for consistent multipart encryption - REQUIRED for decryption
+ if keyDataBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSES3KeyData]; exists {
+ // Key data is already base64 encoded, pass it directly
+ keyDataStr := string(keyDataBytes)
+ r.Header.Set(s3_constants.SeaweedFSSSES3KeyDataHeader, keyDataStr)
+ glog.V(4).Infof("Using stored key data for SSE-S3 multipart upload %s", uploadID)
+ } else {
+ return fmt.Errorf("no SSE-S3 key data found for multipart upload %s - required for encryption", uploadID)
+ }
+
+ // Pass the base IV to putToFiler via header for offset calculation
+ r.Header.Set(s3_constants.SeaweedFSSSES3BaseIVHeader, base64.StdEncoding.EncodeToString(baseIV))
+
+ glog.Infof("PutObjectPartHandler: inherited SSE-S3 settings from upload %s - letting putToFiler handle encryption", uploadID)
+ }
+ return nil
+}
diff --git a/weed/s3api/s3api_object_handlers_postpolicy.go b/weed/s3api/s3api_object_handlers_postpolicy.go
index e77d734ac..da986cf87 100644
--- a/weed/s3api/s3api_object_handlers_postpolicy.go
+++ b/weed/s3api/s3api_object_handlers_postpolicy.go
@@ -136,7 +136,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
}
}
- etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody, "", bucket)
+ etag, errCode, _ := s3a.putToFiler(r, uploadUrl, fileBody, "", bucket, 1)
if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
diff --git a/weed/s3api/s3api_object_handlers_put.go b/weed/s3api/s3api_object_handlers_put.go
index 9652eda52..18cd08c37 100644
--- a/weed/s3api/s3api_object_handlers_put.go
+++ b/weed/s3api/s3api_object_handlers_put.go
@@ -15,6 +15,7 @@ import (
"github.com/pquerna/cachecontrol/cacheobject"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
+ "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/seaweedfs/seaweedfs/weed/security"
@@ -45,6 +46,19 @@ var (
ErrDefaultRetentionYearsOutOfRange = errors.New("default retention years must be between 0 and 100")
)
+// hasExplicitEncryption checks if any explicit encryption was provided in the request.
+// This helper improves readability and makes the encryption check condition more explicit.
+func hasExplicitEncryption(customerKey *SSECustomerKey, sseKMSKey *SSEKMSKey, sseS3Key *SSES3Key) bool {
+ return customerKey != nil || sseKMSKey != nil || sseS3Key != nil
+}
+
+// BucketDefaultEncryptionResult holds the result of bucket default encryption processing
+type BucketDefaultEncryptionResult struct {
+ DataReader io.Reader
+ SSES3Key *SSES3Key
+ SSEKMSKey *SSEKMSKey
+}
+
func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
@@ -172,7 +186,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
dataReader = mimeDetect(r, dataReader)
}
- etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, "", bucket)
+ etag, errCode, sseType := s3a.putToFiler(r, uploadUrl, dataReader, "", bucket, 1)
if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
@@ -181,6 +195,11 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
// No version ID header for never-configured versioning
setEtag(w, etag)
+
+ // Set SSE response headers based on encryption type used
+ if sseType == s3_constants.SSETypeS3 {
+ w.Header().Set(s3_constants.AmzServerSideEncryption, s3_constants.SSEAlgorithmAES256)
+ }
}
}
stats_collect.RecordBucketActiveTime(bucket)
@@ -189,82 +208,54 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
writeSuccessResponseEmpty(w, r)
}
-func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader, destination string, bucket string) (etag string, code s3err.ErrorCode) {
-
- // Handle SSE-C encryption if requested
- customerKey, err := ParseSSECHeaders(r)
- if err != nil {
- glog.Errorf("SSE-C header validation failed: %v", err)
- // Use shared error mapping helper
- errCode := MapSSECErrorToS3Error(err)
- return "", errCode
- }
-
- // Apply SSE-C encryption if customer key is provided
- var sseIV []byte
- if customerKey != nil {
- encryptedReader, iv, encErr := CreateSSECEncryptedReader(dataReader, customerKey)
- if encErr != nil {
- glog.Errorf("Failed to create SSE-C encrypted reader: %v", encErr)
- return "", s3err.ErrInternalError
- }
- dataReader = encryptedReader
- sseIV = iv
- }
-
- // Handle SSE-KMS encryption if requested
- var sseKMSKey *SSEKMSKey
- glog.V(4).Infof("putToFiler: checking for SSE-KMS request. Headers: SSE=%s, KeyID=%s", r.Header.Get(s3_constants.AmzServerSideEncryption), r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId))
- if IsSSEKMSRequest(r) {
- glog.V(3).Infof("putToFiler: SSE-KMS request detected, processing encryption")
- // Parse SSE-KMS headers
- keyID := r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId)
- bucketKeyEnabled := strings.ToLower(r.Header.Get(s3_constants.AmzServerSideEncryptionBucketKeyEnabled)) == "true"
-
- // Build encryption context
- bucket, object := s3_constants.GetBucketAndObject(r)
- encryptionContext := BuildEncryptionContext(bucket, object, bucketKeyEnabled)
-
- // Add any user-provided encryption context
- if contextHeader := r.Header.Get(s3_constants.AmzServerSideEncryptionContext); contextHeader != "" {
- userContext, err := parseEncryptionContext(contextHeader)
- if err != nil {
- glog.Errorf("Failed to parse encryption context: %v", err)
- return "", s3err.ErrInvalidRequest
- }
- // Merge user context with default context
- for k, v := range userContext {
- encryptionContext[k] = v
- }
+func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader, destination string, bucket string, partNumber int) (etag string, code s3err.ErrorCode, sseType string) {
+ // Calculate unique offset for each part to prevent IV reuse in multipart uploads
+ // This is critical for CTR mode encryption security
+ partOffset := calculatePartOffset(partNumber)
+
+ // Handle all SSE encryption types in a unified manner to eliminate repetitive dataReader assignments
+ sseResult, sseErrorCode := s3a.handleAllSSEEncryption(r, dataReader, partOffset)
+ if sseErrorCode != s3err.ErrNone {
+ return "", sseErrorCode, ""
+ }
+
+ // Extract results from unified SSE handling
+ dataReader = sseResult.DataReader
+ customerKey := sseResult.CustomerKey
+ sseIV := sseResult.SSEIV
+ sseKMSKey := sseResult.SSEKMSKey
+ sseKMSMetadata := sseResult.SSEKMSMetadata
+ sseS3Key := sseResult.SSES3Key
+ sseS3Metadata := sseResult.SSES3Metadata
+
+ // Apply bucket default encryption if no explicit encryption was provided
+ // This implements AWS S3 behavior where bucket default encryption automatically applies
+ if !hasExplicitEncryption(customerKey, sseKMSKey, sseS3Key) {
+ glog.V(4).Infof("putToFiler: no explicit encryption detected, checking for bucket default encryption")
+
+ // Apply bucket default encryption and get the result
+ encryptionResult, applyErr := s3a.applyBucketDefaultEncryption(bucket, r, dataReader)
+ if applyErr != nil {
+ glog.Errorf("Failed to apply bucket default encryption: %v", applyErr)
+ return "", s3err.ErrInternalError, ""
}
- // Check if a base IV is provided (for multipart uploads)
- var encryptedReader io.Reader
- var sseKey *SSEKMSKey
- var encErr error
-
- baseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSEKMSBaseIVHeader)
- if baseIVHeader != "" {
- // Decode the base IV from the header
- baseIV, decodeErr := base64.StdEncoding.DecodeString(baseIVHeader)
- if decodeErr != nil || len(baseIV) != 16 {
- glog.Errorf("Invalid base IV in header: %v", decodeErr)
- return "", s3err.ErrInternalError
+ // Update variables based on the result
+ dataReader = encryptionResult.DataReader
+ sseS3Key = encryptionResult.SSES3Key
+ sseKMSKey = encryptionResult.SSEKMSKey
+
+ // If SSE-S3 was applied by bucket default, prepare metadata (if not already done)
+ if sseS3Key != nil && len(sseS3Metadata) == 0 {
+ var metaErr error
+ sseS3Metadata, metaErr = SerializeSSES3Metadata(sseS3Key)
+ if metaErr != nil {
+ glog.Errorf("Failed to serialize SSE-S3 metadata for bucket default encryption: %v", metaErr)
+ return "", s3err.ErrInternalError, ""
}
- // Use the provided base IV for multipart upload consistency
- encryptedReader, sseKey, encErr = CreateSSEKMSEncryptedReaderWithBaseIV(dataReader, keyID, encryptionContext, bucketKeyEnabled, baseIV)
- glog.V(4).Infof("Using provided base IV %x for SSE-KMS encryption", baseIV[:8])
- } else {
- // Generate a new IV for single-part uploads
- encryptedReader, sseKey, encErr = CreateSSEKMSEncryptedReaderWithBucketKey(dataReader, keyID, encryptionContext, bucketKeyEnabled)
}
-
- if encErr != nil {
- glog.Errorf("Failed to create SSE-KMS encrypted reader: %v", encErr)
- return "", s3err.ErrInternalError
- }
- dataReader = encryptedReader
- sseKMSKey = sseKey
+ } else {
+ glog.V(4).Infof("putToFiler: explicit encryption already applied, skipping bucket default encryption")
}
hash := md5.New()
@@ -274,7 +265,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
if err != nil {
glog.Errorf("NewRequest %s: %v", uploadUrl, err)
- return "", s3err.ErrInternalError
+ return "", s3err.ErrInternalError, ""
}
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
@@ -311,20 +302,22 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
// Set SSE-KMS metadata headers for the filer if KMS encryption was applied
if sseKMSKey != nil {
- // Serialize SSE-KMS metadata for storage
- kmsMetadata, err := SerializeSSEKMSMetadata(sseKMSKey)
- if err != nil {
- glog.Errorf("Failed to serialize SSE-KMS metadata: %v", err)
- return "", s3err.ErrInternalError
- }
+ // Use already-serialized SSE-KMS metadata from helper function
// Store serialized KMS metadata in a custom header that the filer can use
- proxyReq.Header.Set(s3_constants.SeaweedFSSSEKMSKeyHeader, base64.StdEncoding.EncodeToString(kmsMetadata))
+ proxyReq.Header.Set(s3_constants.SeaweedFSSSEKMSKeyHeader, base64.StdEncoding.EncodeToString(sseKMSMetadata))
glog.V(3).Infof("putToFiler: storing SSE-KMS metadata for object %s with keyID %s", uploadUrl, sseKMSKey.KeyID)
} else {
glog.V(4).Infof("putToFiler: no SSE-KMS encryption detected")
}
+ // Set SSE-S3 metadata headers for the filer if S3 encryption was applied
+ if sseS3Key != nil && len(sseS3Metadata) > 0 {
+ // Store serialized S3 metadata in a custom header that the filer can use
+ proxyReq.Header.Set(s3_constants.SeaweedFSSSES3Key, base64.StdEncoding.EncodeToString(sseS3Metadata))
+ glog.V(3).Infof("putToFiler: storing SSE-S3 metadata for object %s with keyID %s", uploadUrl, sseS3Key.KeyID)
+ }
+
// ensure that the Authorization header is overriding any previous
// Authorization header which might be already present in proxyReq
s3a.maybeAddFilerJwtAuthorization(proxyReq, true)
@@ -333,9 +326,9 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
if postErr != nil {
glog.Errorf("post to filer: %v", postErr)
if strings.Contains(postErr.Error(), s3err.ErrMsgPayloadChecksumMismatch) {
- return "", s3err.ErrInvalidDigest
+ return "", s3err.ErrInvalidDigest, ""
}
- return "", s3err.ErrInternalError
+ return "", s3err.ErrInternalError, ""
}
defer resp.Body.Close()
@@ -344,21 +337,23 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
resp_body, ra_err := io.ReadAll(resp.Body)
if ra_err != nil {
glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err)
- return etag, s3err.ErrInternalError
+ return etag, s3err.ErrInternalError, ""
}
var ret weed_server.FilerPostResult
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
- return "", s3err.ErrInternalError
+ return "", s3err.ErrInternalError, ""
}
if ret.Error != "" {
glog.Errorf("upload to filer error: %v", ret.Error)
- return "", filerErrorToS3Error(ret.Error)
+ return "", filerErrorToS3Error(ret.Error), ""
}
stats_collect.RecordBucketActiveTime(bucket)
- return etag, s3err.ErrNone
+
+ // Return the SSE type determined by the unified handler
+ return etag, s3err.ErrNone, sseResult.SSEType
}
func setEtag(w http.ResponseWriter, etag string) {
@@ -425,7 +420,7 @@ func (s3a *S3ApiServer) putSuspendedVersioningObject(r *http.Request, bucket, ob
dataReader = mimeDetect(r, dataReader)
}
- etag, errCode = s3a.putToFiler(r, uploadUrl, dataReader, "", bucket)
+ etag, errCode, _ = s3a.putToFiler(r, uploadUrl, dataReader, "", bucket, 1)
if errCode != s3err.ErrNone {
glog.Errorf("putSuspendedVersioningObject: failed to upload object: %v", errCode)
return "", errCode
@@ -567,7 +562,7 @@ func (s3a *S3ApiServer) putVersionedObject(r *http.Request, bucket, object strin
glog.V(2).Infof("putVersionedObject: uploading %s/%s version %s to %s", bucket, object, versionId, versionUploadUrl)
- etag, errCode = s3a.putToFiler(r, versionUploadUrl, body, "", bucket)
+ etag, errCode, _ = s3a.putToFiler(r, versionUploadUrl, body, "", bucket, 1)
if errCode != s3err.ErrNone {
glog.Errorf("putVersionedObject: failed to upload version: %v", errCode)
return "", "", errCode
@@ -709,6 +704,96 @@ func (s3a *S3ApiServer) extractObjectLockMetadataFromRequest(r *http.Request, en
return nil
}
+// applyBucketDefaultEncryption applies bucket default encryption settings to a new object
+// This implements AWS S3 behavior where bucket default encryption automatically applies to new objects
+// when no explicit encryption headers are provided in the upload request.
+// Returns the modified dataReader and encryption keys instead of using pointer parameters for better code clarity.
+func (s3a *S3ApiServer) applyBucketDefaultEncryption(bucket string, r *http.Request, dataReader io.Reader) (*BucketDefaultEncryptionResult, error) {
+ // Check if bucket has default encryption configured
+ encryptionConfig, err := s3a.GetBucketEncryptionConfig(bucket)
+ if err != nil || encryptionConfig == nil {
+ // No default encryption configured, return original reader
+ return &BucketDefaultEncryptionResult{DataReader: dataReader}, nil
+ }
+
+ if encryptionConfig.SseAlgorithm == "" {
+ // No encryption algorithm specified
+ return &BucketDefaultEncryptionResult{DataReader: dataReader}, nil
+ }
+
+ glog.V(3).Infof("applyBucketDefaultEncryption: applying default encryption %s for bucket %s", encryptionConfig.SseAlgorithm, bucket)
+
+ switch encryptionConfig.SseAlgorithm {
+ case EncryptionTypeAES256:
+ // Apply SSE-S3 (AES256) encryption
+ return s3a.applySSES3DefaultEncryption(dataReader)
+
+ case EncryptionTypeKMS:
+ // Apply SSE-KMS encryption
+ return s3a.applySSEKMSDefaultEncryption(bucket, r, dataReader, encryptionConfig)
+
+ default:
+ return nil, fmt.Errorf("unsupported default encryption algorithm: %s", encryptionConfig.SseAlgorithm)
+ }
+}
+
+// applySSES3DefaultEncryption applies SSE-S3 encryption as bucket default
+func (s3a *S3ApiServer) applySSES3DefaultEncryption(dataReader io.Reader) (*BucketDefaultEncryptionResult, error) {
+ // Generate SSE-S3 key
+ keyManager := GetSSES3KeyManager()
+ key, err := keyManager.GetOrCreateKey("")
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate SSE-S3 key for default encryption: %v", err)
+ }
+
+ // Create encrypted reader
+ encryptedReader, iv, encErr := CreateSSES3EncryptedReader(dataReader, key)
+ if encErr != nil {
+ return nil, fmt.Errorf("failed to create SSE-S3 encrypted reader for default encryption: %v", encErr)
+ }
+
+ // Store IV on the key object for later decryption
+ key.IV = iv
+
+ // Store key in manager for later retrieval
+ keyManager.StoreKey(key)
+ glog.V(3).Infof("applySSES3DefaultEncryption: applied SSE-S3 default encryption with key ID: %s", key.KeyID)
+
+ return &BucketDefaultEncryptionResult{
+ DataReader: encryptedReader,
+ SSES3Key: key,
+ }, nil
+}
+
+// applySSEKMSDefaultEncryption applies SSE-KMS encryption as bucket default
+func (s3a *S3ApiServer) applySSEKMSDefaultEncryption(bucket string, r *http.Request, dataReader io.Reader, encryptionConfig *s3_pb.EncryptionConfiguration) (*BucketDefaultEncryptionResult, error) {
+ // Use the KMS key ID from bucket configuration, or default if not specified
+ keyID := encryptionConfig.KmsKeyId
+ if keyID == "" {
+ keyID = "alias/aws/s3" // AWS default KMS key for S3
+ }
+
+ // Check if bucket key is enabled in configuration
+ bucketKeyEnabled := encryptionConfig.BucketKeyEnabled
+
+ // Build encryption context for KMS
+ bucket, object := s3_constants.GetBucketAndObject(r)
+ encryptionContext := BuildEncryptionContext(bucket, object, bucketKeyEnabled)
+
+ // Create SSE-KMS encrypted reader
+ encryptedReader, sseKey, encErr := CreateSSEKMSEncryptedReaderWithBucketKey(dataReader, keyID, encryptionContext, bucketKeyEnabled)
+ if encErr != nil {
+ return nil, fmt.Errorf("failed to create SSE-KMS encrypted reader for default encryption: %v", encErr)
+ }
+
+ glog.V(3).Infof("applySSEKMSDefaultEncryption: applied SSE-KMS default encryption with key ID: %s", keyID)
+
+ return &BucketDefaultEncryptionResult{
+ DataReader: encryptedReader,
+ SSEKMSKey: sseKey,
+ }, nil
+}
+
// applyBucketDefaultRetention applies bucket default retention settings to a new object
// This implements AWS S3 behavior where bucket default retention automatically applies to new objects
// when no explicit retention headers are provided in the upload request
diff --git a/weed/s3api/s3api_object_retention_test.go b/weed/s3api/s3api_object_retention_test.go
index ab5eda7e4..20ccf60d9 100644
--- a/weed/s3api/s3api_object_retention_test.go
+++ b/weed/s3api/s3api_object_retention_test.go
@@ -11,8 +11,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
)
-// TODO: If needed, re-implement TestPutObjectRetention with proper setup for buckets, objects, and versioning.
-
func TestValidateRetention(t *testing.T) {
tests := []struct {
name string
diff --git a/weed/s3api/s3api_put_handlers.go b/weed/s3api/s3api_put_handlers.go
new file mode 100644
index 000000000..fafd2f329
--- /dev/null
+++ b/weed/s3api/s3api_put_handlers.go
@@ -0,0 +1,270 @@
+package s3api
+
+import (
+ "encoding/base64"
+ "io"
+ "net/http"
+ "strings"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
+)
+
+// PutToFilerEncryptionResult holds the result of encryption processing
+type PutToFilerEncryptionResult struct {
+ DataReader io.Reader
+ SSEType string
+ CustomerKey *SSECustomerKey
+ SSEIV []byte
+ SSEKMSKey *SSEKMSKey
+ SSES3Key *SSES3Key
+ SSEKMSMetadata []byte
+ SSES3Metadata []byte
+}
+
+// calculatePartOffset calculates unique offset for each part to prevent IV reuse in multipart uploads
+// AWS S3 part numbers must start from 1, never 0 or negative
+func calculatePartOffset(partNumber int) int64 {
+ // AWS S3 part numbers must start from 1, never 0 or negative
+ if partNumber < 1 {
+ glog.Errorf("Invalid partNumber: %d. Must be >= 1.", partNumber)
+ return 0
+ }
+ // Using a large multiplier to ensure block offsets for different parts do not overlap.
+ // S3 part size limit is 5GB, so this provides a large safety margin.
+ partOffset := int64(partNumber-1) * s3_constants.PartOffsetMultiplier
+ return partOffset
+}
+
+// handleSSECEncryption processes SSE-C encryption for the data reader
+func (s3a *S3ApiServer) handleSSECEncryption(r *http.Request, dataReader io.Reader) (io.Reader, *SSECustomerKey, []byte, s3err.ErrorCode) {
+ // Handle SSE-C encryption if requested
+ customerKey, err := ParseSSECHeaders(r)
+ if err != nil {
+ glog.Errorf("SSE-C header validation failed: %v", err)
+ // Use shared error mapping helper
+ errCode := MapSSECErrorToS3Error(err)
+ return nil, nil, nil, errCode
+ }
+
+ // Apply SSE-C encryption if customer key is provided
+ var sseIV []byte
+ if customerKey != nil {
+ encryptedReader, iv, encErr := CreateSSECEncryptedReader(dataReader, customerKey)
+ if encErr != nil {
+ return nil, nil, nil, s3err.ErrInternalError
+ }
+ dataReader = encryptedReader
+ sseIV = iv
+ }
+
+ return dataReader, customerKey, sseIV, s3err.ErrNone
+}
+
+// handleSSEKMSEncryption processes SSE-KMS encryption for the data reader
+func (s3a *S3ApiServer) handleSSEKMSEncryption(r *http.Request, dataReader io.Reader, partOffset int64) (io.Reader, *SSEKMSKey, []byte, s3err.ErrorCode) {
+ // Handle SSE-KMS encryption if requested
+ if !IsSSEKMSRequest(r) {
+ return dataReader, nil, nil, s3err.ErrNone
+ }
+
+ glog.V(3).Infof("handleSSEKMSEncryption: SSE-KMS request detected, processing encryption")
+
+ // Parse SSE-KMS headers
+ keyID := r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId)
+ bucketKeyEnabled := strings.ToLower(r.Header.Get(s3_constants.AmzServerSideEncryptionBucketKeyEnabled)) == "true"
+
+ // Build encryption context
+ bucket, object := s3_constants.GetBucketAndObject(r)
+ encryptionContext := BuildEncryptionContext(bucket, object, bucketKeyEnabled)
+
+ // Add any user-provided encryption context
+ if contextHeader := r.Header.Get(s3_constants.AmzServerSideEncryptionContext); contextHeader != "" {
+ userContext, err := parseEncryptionContext(contextHeader)
+ if err != nil {
+ return nil, nil, nil, s3err.ErrInvalidRequest
+ }
+ // Merge user context with default context
+ for k, v := range userContext {
+ encryptionContext[k] = v
+ }
+ }
+
+ // Check if a base IV is provided (for multipart uploads)
+ var encryptedReader io.Reader
+ var sseKey *SSEKMSKey
+ var encErr error
+
+ baseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSEKMSBaseIVHeader)
+ if baseIVHeader != "" {
+ // Decode the base IV from the header
+ baseIV, decodeErr := base64.StdEncoding.DecodeString(baseIVHeader)
+ if decodeErr != nil || len(baseIV) != 16 {
+ return nil, nil, nil, s3err.ErrInternalError
+ }
+ // Use the provided base IV with unique part offset for multipart upload consistency
+ encryptedReader, sseKey, encErr = CreateSSEKMSEncryptedReaderWithBaseIVAndOffset(dataReader, keyID, encryptionContext, bucketKeyEnabled, baseIV, partOffset)
+ glog.V(4).Infof("Using provided base IV %x for SSE-KMS encryption", baseIV[:8])
+ } else {
+ // Generate a new IV for single-part uploads
+ encryptedReader, sseKey, encErr = CreateSSEKMSEncryptedReaderWithBucketKey(dataReader, keyID, encryptionContext, bucketKeyEnabled)
+ }
+
+ if encErr != nil {
+ return nil, nil, nil, s3err.ErrInternalError
+ }
+
+ // Prepare SSE-KMS metadata for later header setting
+ sseKMSMetadata, metaErr := SerializeSSEKMSMetadata(sseKey)
+ if metaErr != nil {
+ return nil, nil, nil, s3err.ErrInternalError
+ }
+
+ return encryptedReader, sseKey, sseKMSMetadata, s3err.ErrNone
+}
+
+// handleSSES3MultipartEncryption handles multipart upload logic for SSE-S3 encryption
+func (s3a *S3ApiServer) handleSSES3MultipartEncryption(r *http.Request, dataReader io.Reader, partOffset int64) (io.Reader, *SSES3Key, s3err.ErrorCode) {
+ keyDataHeader := r.Header.Get(s3_constants.SeaweedFSSSES3KeyDataHeader)
+ baseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSES3BaseIVHeader)
+
+ glog.V(4).Infof("handleSSES3MultipartEncryption: using provided key and base IV for multipart part")
+
+ // Decode the key data
+ keyData, decodeErr := base64.StdEncoding.DecodeString(keyDataHeader)
+ if decodeErr != nil {
+ return nil, nil, s3err.ErrInternalError
+ }
+
+ // Deserialize the SSE-S3 key
+ keyManager := GetSSES3KeyManager()
+ key, deserializeErr := DeserializeSSES3Metadata(keyData, keyManager)
+ if deserializeErr != nil {
+ return nil, nil, s3err.ErrInternalError
+ }
+
+ // Decode the base IV
+ baseIV, decodeErr := base64.StdEncoding.DecodeString(baseIVHeader)
+ if decodeErr != nil || len(baseIV) != s3_constants.AESBlockSize {
+ return nil, nil, s3err.ErrInternalError
+ }
+
+ // Use the provided base IV with unique part offset for multipart upload consistency
+ encryptedReader, _, encErr := CreateSSES3EncryptedReaderWithBaseIV(dataReader, key, baseIV, partOffset)
+ if encErr != nil {
+ return nil, nil, s3err.ErrInternalError
+ }
+
+ glog.V(4).Infof("handleSSES3MultipartEncryption: using provided base IV %x", baseIV[:8])
+ return encryptedReader, key, s3err.ErrNone
+}
+
+// handleSSES3SinglePartEncryption handles single-part upload logic for SSE-S3 encryption
+func (s3a *S3ApiServer) handleSSES3SinglePartEncryption(dataReader io.Reader) (io.Reader, *SSES3Key, s3err.ErrorCode) {
+ glog.V(4).Infof("handleSSES3SinglePartEncryption: generating new key for single-part upload")
+
+ keyManager := GetSSES3KeyManager()
+ key, err := keyManager.GetOrCreateKey("")
+ if err != nil {
+ return nil, nil, s3err.ErrInternalError
+ }
+
+ // Create encrypted reader
+ encryptedReader, iv, encErr := CreateSSES3EncryptedReader(dataReader, key)
+ if encErr != nil {
+ return nil, nil, s3err.ErrInternalError
+ }
+
+ // Store IV on the key object for later decryption
+ key.IV = iv
+
+ // Store the key for later use
+ keyManager.StoreKey(key)
+
+ return encryptedReader, key, s3err.ErrNone
+}
+
+// handleSSES3Encryption processes SSE-S3 encryption for the data reader
+func (s3a *S3ApiServer) handleSSES3Encryption(r *http.Request, dataReader io.Reader, partOffset int64) (io.Reader, *SSES3Key, []byte, s3err.ErrorCode) {
+ if !IsSSES3RequestInternal(r) {
+ return dataReader, nil, nil, s3err.ErrNone
+ }
+
+ glog.V(3).Infof("handleSSES3Encryption: SSE-S3 request detected, processing encryption")
+
+ var encryptedReader io.Reader
+ var sseS3Key *SSES3Key
+ var errCode s3err.ErrorCode
+
+ // Check if this is multipart upload (key data and base IV provided)
+ keyDataHeader := r.Header.Get(s3_constants.SeaweedFSSSES3KeyDataHeader)
+ baseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSES3BaseIVHeader)
+
+ if keyDataHeader != "" && baseIVHeader != "" {
+ // Multipart upload: use provided key and base IV
+ encryptedReader, sseS3Key, errCode = s3a.handleSSES3MultipartEncryption(r, dataReader, partOffset)
+ } else {
+ // Single-part upload: generate new key and IV
+ encryptedReader, sseS3Key, errCode = s3a.handleSSES3SinglePartEncryption(dataReader)
+ }
+
+ if errCode != s3err.ErrNone {
+ return nil, nil, nil, errCode
+ }
+
+ // Prepare SSE-S3 metadata for later header setting
+ sseS3Metadata, metaErr := SerializeSSES3Metadata(sseS3Key)
+ if metaErr != nil {
+ return nil, nil, nil, s3err.ErrInternalError
+ }
+
+ glog.V(3).Infof("handleSSES3Encryption: prepared SSE-S3 metadata for object")
+ return encryptedReader, sseS3Key, sseS3Metadata, s3err.ErrNone
+}
+
+// handleAllSSEEncryption processes all SSE types in sequence and returns the final encrypted reader
+// This eliminates repetitive dataReader assignments and centralizes SSE processing
+func (s3a *S3ApiServer) handleAllSSEEncryption(r *http.Request, dataReader io.Reader, partOffset int64) (*PutToFilerEncryptionResult, s3err.ErrorCode) {
+ result := &PutToFilerEncryptionResult{
+ DataReader: dataReader,
+ }
+
+ // Handle SSE-C encryption first
+ encryptedReader, customerKey, sseIV, errCode := s3a.handleSSECEncryption(r, result.DataReader)
+ if errCode != s3err.ErrNone {
+ return nil, errCode
+ }
+ result.DataReader = encryptedReader
+ result.CustomerKey = customerKey
+ result.SSEIV = sseIV
+
+ // Handle SSE-KMS encryption
+ encryptedReader, sseKMSKey, sseKMSMetadata, errCode := s3a.handleSSEKMSEncryption(r, result.DataReader, partOffset)
+ if errCode != s3err.ErrNone {
+ return nil, errCode
+ }
+ result.DataReader = encryptedReader
+ result.SSEKMSKey = sseKMSKey
+ result.SSEKMSMetadata = sseKMSMetadata
+
+ // Handle SSE-S3 encryption
+ encryptedReader, sseS3Key, sseS3Metadata, errCode := s3a.handleSSES3Encryption(r, result.DataReader, partOffset)
+ if errCode != s3err.ErrNone {
+ return nil, errCode
+ }
+ result.DataReader = encryptedReader
+ result.SSES3Key = sseS3Key
+ result.SSES3Metadata = sseS3Metadata
+
+ // Set SSE type for response headers
+ if customerKey != nil {
+ result.SSEType = s3_constants.SSETypeC
+ } else if sseKMSKey != nil {
+ result.SSEType = s3_constants.SSETypeKMS
+ } else if sseS3Key != nil {
+ result.SSEType = s3_constants.SSETypeS3
+ }
+
+ return result, s3err.ErrNone
+}
diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go
index cf4ee9d35..3f3102d14 100644
--- a/weed/server/filer_server_handlers_write_upload.go
+++ b/weed/server/filer_server_handlers_write_upload.go
@@ -246,7 +246,7 @@ func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request,
// Extract SSE metadata from request headers if available
var sseType filer_pb.SSEType = filer_pb.SSEType_NONE
- var sseKmsMetadata []byte
+ var sseMetadata []byte
if r != nil {
@@ -255,7 +255,7 @@ func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request,
if sseKMSHeaderValue != "" {
sseType = filer_pb.SSEType_SSE_KMS
if kmsData, err := base64.StdEncoding.DecodeString(sseKMSHeaderValue); err == nil {
- sseKmsMetadata = kmsData
+ sseMetadata = kmsData
glog.V(4).InfofCtx(ctx, "Storing SSE-KMS metadata for chunk %s at offset %d", fileId, chunkOffset)
} else {
glog.V(1).InfofCtx(ctx, "Failed to decode SSE-KMS metadata for chunk %s: %v", fileId, err)
@@ -284,7 +284,7 @@ func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request,
PartOffset: chunkOffset,
}
if ssecMetadata, serErr := json.Marshal(ssecMetadataStruct); serErr == nil {
- sseKmsMetadata = ssecMetadata
+ sseMetadata = ssecMetadata
} else {
glog.V(1).InfofCtx(ctx, "Failed to serialize SSE-C metadata for chunk %s: %v", fileId, serErr)
}
@@ -294,14 +294,29 @@ func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request,
} else {
glog.V(4).InfofCtx(ctx, "SSE-C chunk %s missing IV or KeyMD5 header", fileId)
}
- } else {
+ } else if r.Header.Get(s3_constants.SeaweedFSSSES3Key) != "" {
+ // SSE-S3: Server-side encryption with server-managed keys
+ // Set the correct SSE type for SSE-S3 chunks to maintain proper tracking
+ sseType = filer_pb.SSEType_SSE_S3
+
+ // Get SSE-S3 metadata from headers
+ sseS3Header := r.Header.Get(s3_constants.SeaweedFSSSES3Key)
+ if sseS3Header != "" {
+ if s3Data, err := base64.StdEncoding.DecodeString(sseS3Header); err == nil {
+ // For SSE-S3, store metadata at chunk level for consistency with SSE-KMS/SSE-C
+ glog.V(4).InfofCtx(ctx, "Storing SSE-S3 metadata for chunk %s at offset %d", fileId, chunkOffset)
+ sseMetadata = s3Data
+ } else {
+ glog.V(1).InfofCtx(ctx, "Failed to decode SSE-S3 metadata for chunk %s: %v", fileId, err)
+ }
+ }
}
}
// Create chunk with SSE metadata if available
var chunk *filer_pb.FileChunk
if sseType != filer_pb.SSEType_NONE {
- chunk = uploadResult.ToPbFileChunkWithSSE(fileId, chunkOffset, time.Now().UnixNano(), sseType, sseKmsMetadata)
+ chunk = uploadResult.ToPbFileChunkWithSSE(fileId, chunkOffset, time.Now().UnixNano(), sseType, sseMetadata)
} else {
chunk = uploadResult.ToPbFileChunk(fileId, chunkOffset, time.Now().UnixNano())
}