diff options
| author | Chris Lu <chrislusf@users.noreply.github.com> | 2025-11-25 09:56:20 -0800 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-11-25 09:56:20 -0800 |
| commit | f6a604c538c0afa3caec55f8f717a784878e064b (patch) | |
| tree | 35c2226e98e9eb721a0b8c95d2fd9deab3059b1a /weed/s3api/s3api_object_handlers_copy.go | |
| parent | a24c31de06093337cd31e42ed94de67ce1d5c95b (diff) | |
| download | seaweedfs-f6a604c538c0afa3caec55f8f717a784878e064b.tar.xz seaweedfs-f6a604c538c0afa3caec55f8f717a784878e064b.zip | |
S3: Fix encrypted file copy with multiple chunks (#7530) (#7546)
* S3: Fix encrypted file copy with multiple chunks (#7530)
When copying encrypted files with multiple chunks (encrypted volumes via
-filer.encryptVolumeData), the copied file could not be read. This was
caused by the chunk copy operation not preserving the IsCompressed flag,
which led to improper handling of compressed/encrypted data during upload.
The fix:
1. Modified uploadChunkData to accept an isCompressed parameter
2. Updated copySingleChunk to pass the source chunk's IsCompressed flag
3. Updated copySingleChunkForRange for partial copy operations
4. Updated all other callers to pass the appropriate compression flag
5. Added comprehensive tests for encrypted volume copy scenarios
This ensures that when copying chunks:
- The IsCompressed flag from the source chunk is passed to the upload
- Compressed data is marked as compressed, preventing double-compression
- Already-encrypted data is not re-encrypted (Cipher: false is correct)
- All chunk metadata (CipherKey, IsCompressed, ETag) is preserved
Tests added:
- TestCreateDestinationChunkPreservesEncryption: Verifies metadata preservation
- TestCopySingleChunkWithEncryption: Tests various encryption/compression scenarios
- TestCopyChunksPreservesMetadata: Tests multi-chunk metadata preservation
- TestEncryptedVolumeScenario: Documents and tests the exact issue #7530 scenario
Fixes #7530
* Address PR review feedback: simplify tests and improve clarity
- Removed TestUploadChunkDataCompressionFlag (panic-based test)
- Removed TestCopySingleChunkWithEncryption (duplicate coverage)
- Removed TestCopyChunksPreservesMetadata (duplicate coverage)
- Added ETag verification to TestEncryptedVolumeCopyScenario
- Renamed to TestEncryptedVolumeCopyScenario for better clarity
- All test coverage now in TestCreateDestinationChunkPreservesEncryption
and TestEncryptedVolumeCopyScenario which focus on the actual behavior
Diffstat (limited to 'weed/s3api/s3api_object_handlers_copy.go')
| -rw-r--r-- | weed/s3api/s3api_object_handlers_copy.go | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/weed/s3api/s3api_object_handlers_copy.go b/weed/s3api/s3api_object_handlers_copy.go index 86a7bc74b..91da98a0e 100644 --- a/weed/s3api/s3api_object_handlers_copy.go +++ b/weed/s3api/s3api_object_handlers_copy.go @@ -817,7 +817,7 @@ func (s3a *S3ApiServer) copySingleChunk(chunk *filer_pb.FileChunk, dstPath strin return nil, fmt.Errorf("download chunk data: %w", err) } - if err := s3a.uploadChunkData(chunkData, assignResult); err != nil { + if err := s3a.uploadChunkData(chunkData, assignResult, chunk.IsCompressed); err != nil { return nil, fmt.Errorf("upload chunk data: %w", err) } @@ -852,7 +852,7 @@ func (s3a *S3ApiServer) copySingleChunkForRange(originalChunk, rangeChunk *filer return nil, fmt.Errorf("download chunk range data: %w", err) } - if err := s3a.uploadChunkData(chunkData, assignResult); err != nil { + if err := s3a.uploadChunkData(chunkData, assignResult, originalChunk.IsCompressed); err != nil { return nil, fmt.Errorf("upload chunk range data: %w", err) } @@ -1140,13 +1140,14 @@ func (s3a *S3ApiServer) prepareChunkCopy(sourceFileId, dstPath string) (*filer_p } // uploadChunkData uploads chunk data to the destination using common upload logic -func (s3a *S3ApiServer) uploadChunkData(chunkData []byte, assignResult *filer_pb.AssignVolumeResponse) error { +// isCompressed indicates if the data is already compressed and should not be compressed again +func (s3a *S3ApiServer) uploadChunkData(chunkData []byte, assignResult *filer_pb.AssignVolumeResponse, isCompressed bool) error { dstUrl := fmt.Sprintf("http://%s/%s", assignResult.Location.Url, assignResult.FileId) uploadOption := &operation.UploadOption{ UploadUrl: dstUrl, - Cipher: false, - IsInputCompressed: false, + Cipher: false, // Data is already encrypted if source had CipherKey; don't re-encrypt + IsInputCompressed: isCompressed, MimeType: "", PairMap: nil, Jwt: security.EncodedJwt(assignResult.Auth), @@ -1367,7 +1368,7 @@ func (s3a *S3ApiServer) copyMultipartSSEKMSChunk(chunk *filer_pb.FileChunk, dest } // Upload the final data - if err := s3a.uploadChunkData(finalData, assignResult); err != nil { + if err := s3a.uploadChunkData(finalData, assignResult, false); err != nil { return nil, fmt.Errorf("upload chunk data: %w", err) } @@ -1497,7 +1498,7 @@ func (s3a *S3ApiServer) copyMultipartSSECChunk(chunk *filer_pb.FileChunk, copySo } // Upload the final data - if err := s3a.uploadChunkData(finalData, assignResult); err != nil { + if err := s3a.uploadChunkData(finalData, assignResult, false); err != nil { return nil, nil, fmt.Errorf("upload chunk data: %w", err) } @@ -1780,7 +1781,7 @@ func (s3a *S3ApiServer) copyCrossEncryptionChunk(chunk *filer_pb.FileChunk, sour // For unencrypted destination, finalData remains as decrypted plaintext // Upload the final data - if err := s3a.uploadChunkData(finalData, assignResult); err != nil { + if err := s3a.uploadChunkData(finalData, assignResult, false); err != nil { return nil, fmt.Errorf("upload chunk data: %w", err) } @@ -1991,7 +1992,7 @@ func (s3a *S3ApiServer) copyChunkWithReencryption(chunk *filer_pb.FileChunk, cop } // Upload the processed data - if err := s3a.uploadChunkData(finalData, assignResult); err != nil { + if err := s3a.uploadChunkData(finalData, assignResult, false); err != nil { return nil, fmt.Errorf("upload processed chunk data: %w", err) } @@ -2214,7 +2215,7 @@ func (s3a *S3ApiServer) copyChunkWithSSEKMSReencryption(chunk *filer_pb.FileChun } // Upload the processed data - if err := s3a.uploadChunkData(finalData, assignResult); err != nil { + if err := s3a.uploadChunkData(finalData, assignResult, false); err != nil { return nil, fmt.Errorf("upload processed chunk data: %w", err) } |
