diff options
| author | chrislu <chris.lu@gmail.com> | 2025-08-30 11:18:03 -0700 |
|---|---|---|
| committer | chrislu <chris.lu@gmail.com> | 2025-08-30 11:18:03 -0700 |
| commit | 87021a146027f83f911619f71b9c27bd51e9d55a (patch) | |
| tree | c7720f1c285683ce19d28931bd7c11b5475a2844 /weed/server | |
| parent | 0748214c8e2f497a84b9392d2d7d4ec976bc84eb (diff) | |
| parent | 879d512b552d834136cfb746a239e6168e5c4ffb (diff) | |
| download | seaweedfs-origin/add-ec-vacuum.tar.xz seaweedfs-origin/add-ec-vacuum.zip | |
Merge branch 'master' into add-ec-vacuumorigin/add-ec-vacuum
Diffstat (limited to 'weed/server')
| -rw-r--r-- | weed/server/common.go | 11 | ||||
| -rw-r--r-- | weed/server/filer_server_handlers_read.go | 30 | ||||
| -rw-r--r-- | weed/server/filer_server_handlers_write_autochunk.go | 41 | ||||
| -rw-r--r-- | weed/server/filer_server_handlers_write_merge.go | 10 | ||||
| -rw-r--r-- | weed/server/filer_server_handlers_write_upload.go | 94 |
5 files changed, 175 insertions, 11 deletions
diff --git a/weed/server/common.go b/weed/server/common.go index cf65bd29d..49dd78ce0 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -19,12 +19,12 @@ import ( "time" "github.com/google/uuid" + "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/util/request_id" "github.com/seaweedfs/seaweedfs/weed/util/version" "google.golang.org/grpc/metadata" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "google.golang.org/grpc" @@ -271,9 +271,12 @@ func handleStaticResources2(r *mux.Router) { } func AdjustPassthroughHeaders(w http.ResponseWriter, r *http.Request, filename string) { - for header, values := range r.Header { - if normalizedHeader, ok := s3_constants.PassThroughHeaders[strings.ToLower(header)]; ok { - w.Header()[normalizedHeader] = values + // Apply S3 passthrough headers from query parameters + // AWS S3 supports overriding response headers via query parameters like: + // ?response-cache-control=no-cache&response-content-type=application/json + for queryParam, headerValue := range r.URL.Query() { + if normalizedHeader, ok := s3_constants.PassThroughHeaders[strings.ToLower(queryParam)]; ok && len(headerValue) > 0 { + w.Header().Set(normalizedHeader, headerValue[0]) } } adjustHeaderContentDisposition(w, r, filename) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 9ffb57bb4..ab474eef0 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -192,8 +192,9 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) // print out the header from extended properties for k, v := range entry.Extended { - if !strings.HasPrefix(k, "xattr-") { + if !strings.HasPrefix(k, "xattr-") && !strings.HasPrefix(k, "x-seaweedfs-") { // "xattr-" prefix is set in filesys.XATTR_PREFIX + // "x-seaweedfs-" prefix is for internal metadata that should not become HTTP headers w.Header().Set(k, string(v)) } } @@ -219,11 +220,36 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) w.Header().Set(s3_constants.AmzTagCount, strconv.Itoa(tagCount)) } + // Set SSE metadata headers for S3 API consumption + if sseIV, exists := entry.Extended[s3_constants.SeaweedFSSSEIV]; exists { + // Convert binary IV to base64 for HTTP header + ivBase64 := base64.StdEncoding.EncodeToString(sseIV) + w.Header().Set(s3_constants.SeaweedFSSSEIVHeader, ivBase64) + } + + // Set SSE-C algorithm and key MD5 headers for S3 API response + if sseAlgorithm, exists := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm]; exists { + w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, string(sseAlgorithm)) + } + if sseKeyMD5, exists := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerKeyMD5]; exists { + w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, string(sseKeyMD5)) + } + + if sseKMSKey, exists := entry.Extended[s3_constants.SeaweedFSSSEKMSKey]; exists { + // Convert binary KMS metadata to base64 for HTTP header + kmsBase64 := base64.StdEncoding.EncodeToString(sseKMSKey) + w.Header().Set(s3_constants.SeaweedFSSSEKMSKeyHeader, kmsBase64) + } + SetEtag(w, etag) filename := entry.Name() AdjustPassthroughHeaders(w, r, filename) - totalSize := int64(entry.Size()) + + // For range processing, use the original content size, not the encrypted size + // entry.Size() returns max(chunk_sizes, file_size) where chunk_sizes include encryption overhead + // For SSE objects, we need the original unencrypted size for proper range validation + totalSize := int64(entry.FileSize) if r.Method == http.MethodHead { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 76e320908..0d6462c11 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -3,6 +3,7 @@ package weed_server import ( "bytes" "context" + "encoding/base64" "errors" "fmt" "io" @@ -336,6 +337,37 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } } + // Process SSE metadata headers sent by S3 API and store in entry extended metadata + if sseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSEIVHeader); sseIVHeader != "" { + // Decode base64-encoded IV and store in metadata + if ivData, err := base64.StdEncoding.DecodeString(sseIVHeader); err == nil { + entry.Extended[s3_constants.SeaweedFSSSEIV] = ivData + glog.V(4).Infof("Stored SSE-C IV metadata for %s", entry.FullPath) + } else { + glog.Errorf("Failed to decode SSE-C IV header for %s: %v", entry.FullPath, err) + } + } + + // Store SSE-C algorithm and key MD5 for proper S3 API response headers + if sseAlgorithm := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm); sseAlgorithm != "" { + entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte(sseAlgorithm) + glog.V(4).Infof("Stored SSE-C algorithm metadata for %s", entry.FullPath) + } + if sseKeyMD5 := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5); sseKeyMD5 != "" { + entry.Extended[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(sseKeyMD5) + glog.V(4).Infof("Stored SSE-C key MD5 metadata for %s", entry.FullPath) + } + + if sseKMSHeader := r.Header.Get(s3_constants.SeaweedFSSSEKMSKeyHeader); sseKMSHeader != "" { + // Decode base64-encoded KMS metadata and store + if kmsData, err := base64.StdEncoding.DecodeString(sseKMSHeader); err == nil { + entry.Extended[s3_constants.SeaweedFSSSEKMSKey] = kmsData + glog.V(4).Infof("Stored SSE-KMS metadata for %s", entry.FullPath) + } else { + glog.Errorf("Failed to decode SSE-KMS metadata header for %s: %v", entry.FullPath, err) + } + } + dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, skipCheckParentDirEntry(r), so.MaxFileNameLength) // In test_bucket_listv2_delimiter_basic, the valid object key is the parent folder if dbErr != nil && strings.HasSuffix(dbErr.Error(), " is a file") && isS3Request(r) { @@ -488,6 +520,15 @@ func SaveAmzMetaData(r *http.Request, existing map[string][]byte, isReplace bool } } + // Handle SSE-C headers + if algorithm := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm); algorithm != "" { + metadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte(algorithm) + } + if keyMD5 := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5); keyMD5 != "" { + // Store as-is; SSE-C MD5 is base64 and case-sensitive + metadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(keyMD5) + } + //acp-owner acpOwner := r.Header.Get(s3_constants.ExtAmzOwnerKey) if len(acpOwner) > 0 { diff --git a/weed/server/filer_server_handlers_write_merge.go b/weed/server/filer_server_handlers_write_merge.go index 4207200cb..24e642bd6 100644 --- a/weed/server/filer_server_handlers_write_merge.go +++ b/weed/server/filer_server_handlers_write_merge.go @@ -15,6 +15,14 @@ import ( const MergeChunkMinCount int = 1000 func (fs *FilerServer) maybeMergeChunks(ctx context.Context, so *operation.StorageOption, inputChunks []*filer_pb.FileChunk) (mergedChunks []*filer_pb.FileChunk, err error) { + // Don't merge SSE-encrypted chunks to preserve per-chunk metadata + for _, chunk := range inputChunks { + if chunk.GetSseType() != 0 { // Any SSE type (SSE-C or SSE-KMS) + glog.V(3).InfofCtx(ctx, "Skipping chunk merge for SSE-encrypted chunks") + return inputChunks, nil + } + } + // Only merge small chunks more than half of the file var chunkSize = fs.option.MaxMB * 1024 * 1024 var smallChunk, sumChunk int @@ -44,7 +52,7 @@ func (fs *FilerServer) mergeChunks(ctx context.Context, so *operation.StorageOpt if mergeErr != nil { return nil, mergeErr } - mergedChunks, _, _, mergeErr, _ = fs.uploadReaderToChunks(ctx, chunkedFileReader, chunkOffset, int32(fs.option.MaxMB*1024*1024), "", "", true, so) + mergedChunks, _, _, mergeErr, _ = fs.uploadReaderToChunks(ctx, nil, chunkedFileReader, chunkOffset, int32(fs.option.MaxMB*1024*1024), "", "", true, so) if mergeErr != nil { return } diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go index 76e41257f..3f3102d14 100644 --- a/weed/server/filer_server_handlers_write_upload.go +++ b/weed/server/filer_server_handlers_write_upload.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "crypto/md5" + "encoding/base64" "fmt" "hash" "io" @@ -14,9 +15,12 @@ import ( "slices" + "encoding/json" + "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/util" @@ -46,10 +50,10 @@ func (fs *FilerServer) uploadRequestToChunks(ctx context.Context, w http.Respons chunkOffset = offsetInt } - return fs.uploadReaderToChunks(ctx, reader, chunkOffset, chunkSize, fileName, contentType, isAppend, so) + return fs.uploadReaderToChunks(ctx, r, reader, chunkOffset, chunkSize, fileName, contentType, isAppend, so) } -func (fs *FilerServer) uploadReaderToChunks(ctx context.Context, reader io.Reader, startOffset int64, chunkSize int32, fileName, contentType string, isAppend bool, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) { +func (fs *FilerServer) uploadReaderToChunks(ctx context.Context, r *http.Request, reader io.Reader, startOffset int64, chunkSize int32, fileName, contentType string, isAppend bool, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) { md5Hash = md5.New() chunkOffset = startOffset @@ -118,7 +122,7 @@ func (fs *FilerServer) uploadReaderToChunks(ctx context.Context, reader io.Reade wg.Done() }() - chunks, toChunkErr := fs.dataToChunk(ctx, fileName, contentType, buf.Bytes(), offset, so) + chunks, toChunkErr := fs.dataToChunkWithSSE(ctx, r, fileName, contentType, buf.Bytes(), offset, so) if toChunkErr != nil { uploadErrLock.Lock() if uploadErr == nil { @@ -193,6 +197,10 @@ func (fs *FilerServer) doUpload(ctx context.Context, urlLocation string, limited } func (fs *FilerServer) dataToChunk(ctx context.Context, fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, error) { + return fs.dataToChunkWithSSE(ctx, nil, fileName, contentType, data, chunkOffset, so) +} + +func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request, fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, error) { dataReader := util.NewBytesReader(data) // retry to assign a different file id @@ -235,5 +243,83 @@ func (fs *FilerServer) dataToChunk(ctx context.Context, fileName, contentType st if uploadResult.Size == 0 { return nil, nil } - return []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, chunkOffset, time.Now().UnixNano())}, nil + + // Extract SSE metadata from request headers if available + var sseType filer_pb.SSEType = filer_pb.SSEType_NONE + var sseMetadata []byte + + if r != nil { + + // Check for SSE-KMS + sseKMSHeaderValue := r.Header.Get(s3_constants.SeaweedFSSSEKMSKeyHeader) + if sseKMSHeaderValue != "" { + sseType = filer_pb.SSEType_SSE_KMS + if kmsData, err := base64.StdEncoding.DecodeString(sseKMSHeaderValue); err == nil { + sseMetadata = kmsData + glog.V(4).InfofCtx(ctx, "Storing SSE-KMS metadata for chunk %s at offset %d", fileId, chunkOffset) + } else { + glog.V(1).InfofCtx(ctx, "Failed to decode SSE-KMS metadata for chunk %s: %v", fileId, err) + } + } else if r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) != "" { + // SSE-C: Create per-chunk metadata for unified handling + sseType = filer_pb.SSEType_SSE_C + + // Get SSE-C metadata from headers to create unified per-chunk metadata + sseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSEIVHeader) + keyMD5Header := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5) + + if sseIVHeader != "" && keyMD5Header != "" { + // Decode IV from header + if ivData, err := base64.StdEncoding.DecodeString(sseIVHeader); err == nil { + // Create SSE-C metadata with chunk offset = chunkOffset for proper IV calculation + ssecMetadataStruct := struct { + Algorithm string `json:"algorithm"` + IV string `json:"iv"` + KeyMD5 string `json:"keyMD5"` + PartOffset int64 `json:"partOffset"` + }{ + Algorithm: "AES256", + IV: base64.StdEncoding.EncodeToString(ivData), + KeyMD5: keyMD5Header, + PartOffset: chunkOffset, + } + if ssecMetadata, serErr := json.Marshal(ssecMetadataStruct); serErr == nil { + sseMetadata = ssecMetadata + } else { + glog.V(1).InfofCtx(ctx, "Failed to serialize SSE-C metadata for chunk %s: %v", fileId, serErr) + } + } else { + glog.V(1).InfofCtx(ctx, "Failed to decode SSE-C IV for chunk %s: %v", fileId, err) + } + } else { + glog.V(4).InfofCtx(ctx, "SSE-C chunk %s missing IV or KeyMD5 header", fileId) + } + } else if r.Header.Get(s3_constants.SeaweedFSSSES3Key) != "" { + // SSE-S3: Server-side encryption with server-managed keys + // Set the correct SSE type for SSE-S3 chunks to maintain proper tracking + sseType = filer_pb.SSEType_SSE_S3 + + // Get SSE-S3 metadata from headers + sseS3Header := r.Header.Get(s3_constants.SeaweedFSSSES3Key) + if sseS3Header != "" { + if s3Data, err := base64.StdEncoding.DecodeString(sseS3Header); err == nil { + // For SSE-S3, store metadata at chunk level for consistency with SSE-KMS/SSE-C + glog.V(4).InfofCtx(ctx, "Storing SSE-S3 metadata for chunk %s at offset %d", fileId, chunkOffset) + sseMetadata = s3Data + } else { + glog.V(1).InfofCtx(ctx, "Failed to decode SSE-S3 metadata for chunk %s: %v", fileId, err) + } + } + } + } + + // Create chunk with SSE metadata if available + var chunk *filer_pb.FileChunk + if sseType != filer_pb.SSEType_NONE { + chunk = uploadResult.ToPbFileChunkWithSSE(fileId, chunkOffset, time.Now().UnixNano(), sseType, sseMetadata) + } else { + chunk = uploadResult.ToPbFileChunk(fileId, chunkOffset, time.Now().UnixNano()) + } + + return []*filer_pb.FileChunk{chunk}, nil } |
