aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorChris Lu <chrislusf@users.noreply.github.com>2025-07-11 18:51:32 -0700
committerGitHub <noreply@github.com>2025-07-11 18:51:32 -0700
commitd892538d3271b6b8010194b5ac36152be00abf0b (patch)
treedcdf9dc4267a8fee65bc90c216b044e953b68281 /test
parent4fcbdc1f612eabbeb5c97b6d54a95ae20f61820e (diff)
downloadseaweedfs-d892538d3271b6b8010194b5ac36152be00abf0b.tar.xz
seaweedfs-d892538d3271b6b8010194b5ac36152be00abf0b.zip
More efficient copy object (#6665)
* it compiles * refactored * reduce to 4 concurrent chunk upload * CopyObjectPartHandler * copy a range of the chunk data, fix offset size in copied chunks * Update s3api_object_handlers_copy.go What the PR Accomplishes: CopyObjectHandler - Now copies entire objects by copying chunks individually instead of downloading/uploading the entire file CopyObjectPartHandler - Handles copying parts of objects for multipart uploads by copying only the relevant chunk portions Efficient Chunk Copying - Uses direct chunk-to-chunk copying with proper volume assignment and concurrent processing (limited to 4 concurrent operations) Range Support - Properly handles range-based copying for partial object copies * fix compilation * fix part destination * handling small objects * use mkFile * copy to existing file or part * add testing tools * adjust tests * fix chunk lookup * refactoring * fix TestObjectCopyRetainingMetadata * ensure bucket name not conflicting * fix conditional copying tests * remove debug messages * add custom s3 copy tests
Diffstat (limited to 'test')
-rw-r--r--test/s3/copying/Makefile234
-rw-r--r--test/s3/copying/README.md325
-rw-r--r--test/s3/copying/s3_copying_test.go1014
-rw-r--r--test/s3/copying/test_config.json9
4 files changed, 1582 insertions, 0 deletions
diff --git a/test/s3/copying/Makefile b/test/s3/copying/Makefile
new file mode 100644
index 000000000..81e3fc19d
--- /dev/null
+++ b/test/s3/copying/Makefile
@@ -0,0 +1,234 @@
+# Makefile for S3 Copying Tests
+# This Makefile provides targets for running comprehensive S3 copying tests
+
+# Default values
+SEAWEEDFS_BINARY ?= weed
+S3_PORT ?= 8333
+FILER_PORT ?= 8888
+VOLUME_PORT ?= 8080
+MASTER_PORT ?= 9333
+TEST_TIMEOUT ?= 10m
+BUCKET_PREFIX ?= test-copying-
+ACCESS_KEY ?= some_access_key1
+SECRET_KEY ?= some_secret_key1
+VOLUME_MAX_SIZE_MB ?= 50
+
+# Test directory
+TEST_DIR := $(shell pwd)
+SEAWEEDFS_ROOT := $(shell cd ../../../ && pwd)
+
+# Colors for output
+RED := \033[0;31m
+GREEN := \033[0;32m
+YELLOW := \033[1;33m
+NC := \033[0m # No Color
+
+.PHONY: all test clean start-seaweedfs stop-seaweedfs check-binary help
+
+all: test-basic
+
+help:
+ @echo "SeaweedFS S3 Copying Tests"
+ @echo ""
+ @echo "Available targets:"
+ @echo " test-basic - Run basic S3 put/get tests first"
+ @echo " test - Run all S3 copying tests"
+ @echo " test-quick - Run quick tests only"
+ @echo " test-full - Run full test suite including large files"
+ @echo " start-seaweedfs - Start SeaweedFS server for testing"
+ @echo " stop-seaweedfs - Stop SeaweedFS server"
+ @echo " clean - Clean up test artifacts"
+ @echo " check-binary - Check if SeaweedFS binary exists"
+ @echo ""
+ @echo "Configuration:"
+ @echo " SEAWEEDFS_BINARY=$(SEAWEEDFS_BINARY)"
+ @echo " S3_PORT=$(S3_PORT)"
+ @echo " FILER_PORT=$(FILER_PORT)"
+ @echo " VOLUME_PORT=$(VOLUME_PORT)"
+ @echo " MASTER_PORT=$(MASTER_PORT)"
+ @echo " TEST_TIMEOUT=$(TEST_TIMEOUT)"
+ @echo " VOLUME_MAX_SIZE_MB=$(VOLUME_MAX_SIZE_MB)"
+
+check-binary:
+ @if ! command -v $(SEAWEEDFS_BINARY) > /dev/null 2>&1; then \
+ echo "$(RED)Error: SeaweedFS binary '$(SEAWEEDFS_BINARY)' not found in PATH$(NC)"; \
+ echo "Please build SeaweedFS first by running 'make' in the root directory"; \
+ exit 1; \
+ fi
+ @echo "$(GREEN)SeaweedFS binary found: $$(which $(SEAWEEDFS_BINARY))$(NC)"
+
+start-seaweedfs: check-binary
+ @echo "$(YELLOW)Starting SeaweedFS server...$(NC)"
+ @pkill -f "weed master" || true
+ @pkill -f "weed volume" || true
+ @pkill -f "weed filer" || true
+ @pkill -f "weed s3" || true
+ @sleep 2
+
+ # Create necessary directories
+ @mkdir -p /tmp/seaweedfs-test-copying-master
+ @mkdir -p /tmp/seaweedfs-test-copying-volume
+
+ # Start master server with volume size limit
+ @nohup $(SEAWEEDFS_BINARY) master -port=$(MASTER_PORT) -mdir=/tmp/seaweedfs-test-copying-master -volumeSizeLimitMB=$(VOLUME_MAX_SIZE_MB) -ip=127.0.0.1 > /tmp/seaweedfs-master.log 2>&1 &
+ @sleep 3
+
+ # Start volume server
+ @nohup $(SEAWEEDFS_BINARY) volume -port=$(VOLUME_PORT) -mserver=127.0.0.1:$(MASTER_PORT) -dir=/tmp/seaweedfs-test-copying-volume -ip=127.0.0.1 > /tmp/seaweedfs-volume.log 2>&1 &
+ @sleep 3
+
+ # Start filer server (using standard SeaweedFS gRPC port convention: HTTP port + 10000)
+ @nohup $(SEAWEEDFS_BINARY) filer -port=$(FILER_PORT) -port.grpc=$$(( $(FILER_PORT) + 10000 )) -master=127.0.0.1:$(MASTER_PORT) -ip=127.0.0.1 > /tmp/seaweedfs-filer.log 2>&1 &
+ @sleep 3
+
+ # Create S3 configuration
+ @echo '{"identities":[{"name":"$(ACCESS_KEY)","credentials":[{"accessKey":"$(ACCESS_KEY)","secretKey":"$(SECRET_KEY)"}],"actions":["Admin","Read","Write"]}]}' > /tmp/seaweedfs-s3.json
+
+ # Start S3 server
+ @nohup $(SEAWEEDFS_BINARY) s3 -port=$(S3_PORT) -filer=127.0.0.1:$(FILER_PORT) -config=/tmp/seaweedfs-s3.json -ip.bind=127.0.0.1 > /tmp/seaweedfs-s3.log 2>&1 &
+ @sleep 5
+
+ # Wait for S3 service to be ready
+ @echo "$(YELLOW)Waiting for S3 service to be ready...$(NC)"
+ @for i in $$(seq 1 30); do \
+ if curl -s -f http://127.0.0.1:$(S3_PORT) > /dev/null 2>&1; then \
+ echo "$(GREEN)S3 service is ready$(NC)"; \
+ break; \
+ fi; \
+ echo "Waiting for S3 service... ($$i/30)"; \
+ sleep 1; \
+ done
+
+ # Additional wait for filer gRPC to be ready
+ @echo "$(YELLOW)Waiting for filer gRPC to be ready...$(NC)"
+ @sleep 2
+ @echo "$(GREEN)SeaweedFS server started successfully$(NC)"
+ @echo "Master: http://localhost:$(MASTER_PORT)"
+ @echo "Volume: http://localhost:$(VOLUME_PORT)"
+ @echo "Filer: http://localhost:$(FILER_PORT)"
+ @echo "S3: http://localhost:$(S3_PORT)"
+ @echo "Volume Max Size: $(VOLUME_MAX_SIZE_MB)MB"
+
+stop-seaweedfs:
+ @echo "$(YELLOW)Stopping SeaweedFS server...$(NC)"
+ @pkill -f "weed master" || true
+ @pkill -f "weed volume" || true
+ @pkill -f "weed filer" || true
+ @pkill -f "weed s3" || true
+ @sleep 2
+ @echo "$(GREEN)SeaweedFS server stopped$(NC)"
+
+clean:
+ @echo "$(YELLOW)Cleaning up test artifacts...$(NC)"
+ @rm -rf /tmp/seaweedfs-test-copying-*
+ @rm -f /tmp/seaweedfs-*.log
+ @rm -f /tmp/seaweedfs-s3.json
+ @echo "$(GREEN)Cleanup completed$(NC)"
+
+test-basic: check-binary
+ @echo "$(YELLOW)Running basic S3 put/get tests...$(NC)"
+ @$(MAKE) start-seaweedfs
+ @sleep 5
+ @echo "$(GREEN)Starting basic tests...$(NC)"
+ @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasic" ./test/s3/copying || (echo "$(RED)Basic tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1)
+ @$(MAKE) stop-seaweedfs
+ @echo "$(GREEN)Basic tests completed successfully!$(NC)"
+
+test: test-basic
+ @echo "$(YELLOW)Running S3 copying tests...$(NC)"
+ @$(MAKE) start-seaweedfs
+ @sleep 5
+ @echo "$(GREEN)Starting tests...$(NC)"
+ @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "Test.*" ./test/s3/copying || (echo "$(RED)Tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1)
+ @$(MAKE) stop-seaweedfs
+ @echo "$(GREEN)All tests completed successfully!$(NC)"
+
+test-quick: check-binary
+ @echo "$(YELLOW)Running quick S3 copying tests...$(NC)"
+ @$(MAKE) start-seaweedfs
+ @sleep 5
+ @echo "$(GREEN)Starting quick tests...$(NC)"
+ @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestObjectCopy|TestCopyObjectIf" ./test/s3/copying || (echo "$(RED)Tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1)
+ @$(MAKE) stop-seaweedfs
+ @echo "$(GREEN)Quick tests completed successfully!$(NC)"
+
+test-full: check-binary
+ @echo "$(YELLOW)Running full S3 copying test suite...$(NC)"
+ @$(MAKE) start-seaweedfs
+ @sleep 5
+ @echo "$(GREEN)Starting full test suite...$(NC)"
+ @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=30m -run "Test.*" ./test/s3/copying || (echo "$(RED)Tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1)
+ @$(MAKE) stop-seaweedfs
+ @echo "$(GREEN)Full test suite completed successfully!$(NC)"
+
+test-multipart: check-binary
+ @echo "$(YELLOW)Running multipart copying tests...$(NC)"
+ @$(MAKE) start-seaweedfs
+ @sleep 5
+ @echo "$(GREEN)Starting multipart tests...$(NC)"
+ @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestMultipart" ./test/s3/copying || (echo "$(RED)Tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1)
+ @$(MAKE) stop-seaweedfs
+ @echo "$(GREEN)Multipart tests completed successfully!$(NC)"
+
+test-conditional: check-binary
+ @echo "$(YELLOW)Running conditional copying tests...$(NC)"
+ @$(MAKE) start-seaweedfs
+ @sleep 5
+ @echo "$(GREEN)Starting conditional tests...$(NC)"
+ @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestCopyObjectIf" ./test/s3/copying || (echo "$(RED)Tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1)
+ @$(MAKE) stop-seaweedfs
+ @echo "$(GREEN)Conditional tests completed successfully!$(NC)"
+
+# Debug targets
+debug-logs:
+ @echo "$(YELLOW)=== Master Log ===$(NC)"
+ @tail -n 50 /tmp/seaweedfs-master.log || echo "No master log found"
+ @echo "$(YELLOW)=== Volume Log ===$(NC)"
+ @tail -n 50 /tmp/seaweedfs-volume.log || echo "No volume log found"
+ @echo "$(YELLOW)=== Filer Log ===$(NC)"
+ @tail -n 50 /tmp/seaweedfs-filer.log || echo "No filer log found"
+ @echo "$(YELLOW)=== S3 Log ===$(NC)"
+ @tail -n 50 /tmp/seaweedfs-s3.log || echo "No S3 log found"
+
+debug-status:
+ @echo "$(YELLOW)=== Process Status ===$(NC)"
+ @ps aux | grep -E "(weed|seaweedfs)" | grep -v grep || echo "No SeaweedFS processes found"
+ @echo "$(YELLOW)=== Port Status ===$(NC)"
+ @netstat -an | grep -E "($(MASTER_PORT)|$(VOLUME_PORT)|$(FILER_PORT)|$(S3_PORT))" || echo "No ports in use"
+
+# Manual test targets for development
+manual-start: start-seaweedfs
+ @echo "$(GREEN)SeaweedFS is now running for manual testing$(NC)"
+ @echo "Run 'make manual-stop' when finished"
+
+manual-stop: stop-seaweedfs clean
+
+# CI/CD targets
+ci-test: test-quick
+
+# Benchmark targets
+benchmark: check-binary
+ @echo "$(YELLOW)Running S3 copying benchmarks...$(NC)"
+ @$(MAKE) start-seaweedfs
+ @sleep 5
+ @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=30m -bench=. -run=Benchmark ./test/s3/copying || (echo "$(RED)Benchmarks failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1)
+ @$(MAKE) stop-seaweedfs
+ @echo "$(GREEN)Benchmarks completed!$(NC)"
+
+# Stress test
+stress: check-binary
+ @echo "$(YELLOW)Running S3 copying stress tests...$(NC)"
+ @$(MAKE) start-seaweedfs
+ @sleep 5
+ @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=60m -run="TestMultipartCopyMultipleSizes" -count=10 ./test/s3/copying || (echo "$(RED)Stress tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1)
+ @$(MAKE) stop-seaweedfs
+ @echo "$(GREEN)Stress tests completed!$(NC)"
+
+# Performance test with larger files
+perf: check-binary
+ @echo "$(YELLOW)Running S3 copying performance tests...$(NC)"
+ @$(MAKE) start-seaweedfs
+ @sleep 5
+ @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=60m -run="TestMultipartCopyMultipleSizes" ./test/s3/copying || (echo "$(RED)Performance tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1)
+ @$(MAKE) stop-seaweedfs
+ @echo "$(GREEN)Performance tests completed!$(NC)" \ No newline at end of file
diff --git a/test/s3/copying/README.md b/test/s3/copying/README.md
new file mode 100644
index 000000000..a5605e196
--- /dev/null
+++ b/test/s3/copying/README.md
@@ -0,0 +1,325 @@
+# SeaweedFS S3 Copying Tests
+
+This directory contains comprehensive Go tests for SeaweedFS S3 copying functionality, converted from the failing Python tests in the s3-tests repository.
+
+## Overview
+
+These tests verify that SeaweedFS correctly implements S3 operations, starting with basic put/get operations and progressing to advanced copy operations, including:
+- **Basic S3 Operations**: Put/Get operations, bucket management, and metadata handling
+- **Basic object copying**: within the same bucket
+- **Cross-bucket copying**: across different buckets
+- **Multipart copy operations**: for large files
+- **Conditional copy operations**: ETag-based conditional copying
+- **Metadata handling**: during copy operations
+- **ACL handling**: during copy operations
+
+## Test Coverage
+
+### Basic S3 Operations (Run First)
+- **TestBasicPutGet**: Tests fundamental S3 put/get operations with various object types
+- **TestBasicBucketOperations**: Tests bucket creation, listing, and deletion
+- **TestBasicLargeObject**: Tests handling of larger objects (up to 10MB)
+
+### Basic Copy Operations
+- **TestObjectCopySameBucket**: Tests copying objects within the same bucket
+- **TestObjectCopyDiffBucket**: Tests copying objects to different buckets
+- **TestObjectCopyCannedAcl**: Tests copying with ACL settings
+- **TestObjectCopyRetainingMetadata**: Tests metadata preservation during copy
+
+### Multipart Copy Operations
+- **TestMultipartCopySmall**: Tests multipart copying of small files
+- **TestMultipartCopyWithoutRange**: Tests multipart copying without range specification
+- **TestMultipartCopySpecialNames**: Tests multipart copying with special character names
+- **TestMultipartCopyMultipleSizes**: Tests multipart copying with various file sizes
+
+### Conditional Copy Operations
+- **TestCopyObjectIfMatchGood**: Tests copying with matching ETag condition
+- **TestCopyObjectIfMatchFailed**: Tests copying with non-matching ETag condition (should fail)
+- **TestCopyObjectIfNoneMatchFailed**: Tests copying with non-matching ETag condition (should succeed)
+- **TestCopyObjectIfNoneMatchGood**: Tests copying with matching ETag condition (should fail)
+
+## Requirements
+
+1. **Go 1.19+**: Required for AWS SDK v2 and modern Go features
+2. **SeaweedFS Binary**: Built from source (`../../../weed/weed`)
+3. **Free Ports**: 8333 (S3), 8888 (Filer), 8080 (Volume), 9333 (Master)
+4. **Dependencies**: Uses the main repository's go.mod with existing AWS SDK v2 and testify dependencies
+
+## Quick Start
+
+### 1. Build SeaweedFS
+```bash
+cd ../../../
+make
+```
+
+### 2. Run Tests
+```bash
+# Run basic S3 operations first (recommended)
+make test-basic
+
+# Run all tests (starts with basic, then copy tests)
+make test
+
+# Run quick tests only
+make test-quick
+
+# Run multipart tests only
+make test-multipart
+
+# Run conditional tests only
+make test-conditional
+```
+
+## Available Make Targets
+
+### Basic Test Execution
+- `make test-basic` - Run basic S3 put/get operations (recommended first)
+- `make test` - Run all S3 tests (starts with basic, then copying)
+- `make test-quick` - Run quick tests only (basic copying)
+- `make test-full` - Run full test suite including large files
+- `make test-multipart` - Run multipart copying tests only
+- `make test-conditional` - Run conditional copying tests only
+
+### Server Management
+- `make start-seaweedfs` - Start SeaweedFS server for testing
+- `make stop-seaweedfs` - Stop SeaweedFS server
+- `make manual-start` - Start server for manual testing
+- `make manual-stop` - Stop server and clean up
+
+### Debugging
+- `make debug-logs` - Show recent log entries from all services
+- `make debug-status` - Show process and port status
+- `make check-binary` - Verify SeaweedFS binary exists
+
+### Performance Testing
+- `make benchmark` - Run performance benchmarks
+- `make stress` - Run stress tests with multiple iterations
+- `make perf` - Run performance tests with large files
+
+### Cleanup
+- `make clean` - Clean up test artifacts and temporary files
+
+## Configuration
+
+The tests use the following default configuration:
+
+```json
+{
+ "endpoint": "http://localhost:8333",
+ "access_key": "some_access_key1",
+ "secret_key": "some_secret_key1",
+ "region": "us-east-1",
+ "bucket_prefix": "test-copying-",
+ "use_ssl": false,
+ "skip_verify_ssl": true
+}
+```
+
+You can modify these values in `test_config.json` or by setting environment variables:
+
+```bash
+export SEAWEEDFS_BINARY=/path/to/weed
+export S3_PORT=8333
+export FILER_PORT=8888
+export VOLUME_PORT=8080
+export MASTER_PORT=9333
+export TEST_TIMEOUT=10m
+export VOLUME_MAX_SIZE_MB=50
+```
+
+**Note**: The volume size limit is set to 50MB to ensure proper testing of volume boundaries and multipart operations.
+
+## Test Details
+
+### TestBasicPutGet
+- Tests fundamental S3 put/get operations with various object types:
+ - Simple text objects
+ - Empty objects
+ - Binary objects (1KB random data)
+ - Objects with metadata and content-type
+- Verifies ETag consistency between put and get operations
+- Tests metadata preservation
+
+### TestBasicBucketOperations
+- Tests bucket creation and existence verification
+- Tests object listing in buckets
+- Tests object creation and listing with directory-like prefixes
+- Tests bucket deletion and cleanup
+- Verifies proper error handling for operations on non-existent buckets
+
+### TestBasicLargeObject
+- Tests handling of progressively larger objects:
+ - 1KB, 10KB, 100KB, 1MB, 5MB, 10MB
+- Verifies data integrity for large objects
+- Tests memory handling and streaming for large files
+- Ensures proper handling up to the 50MB volume limit
+
+### TestObjectCopySameBucket
+- Creates a bucket with a source object
+- Copies the object to a different key within the same bucket
+- Verifies the copied object has the same content
+
+### TestObjectCopyDiffBucket
+- Creates source and destination buckets
+- Copies an object from source to destination bucket
+- Verifies the copied object has the same content
+
+### TestObjectCopyCannedAcl
+- Tests copying with ACL settings (`public-read`)
+- Tests metadata replacement during copy with ACL
+- Verifies both basic copying and metadata handling
+
+### TestObjectCopyRetainingMetadata
+- Tests with different file sizes (3 bytes, 1MB)
+- Verifies metadata and content-type preservation
+- Checks that all metadata is correctly copied
+
+### TestMultipartCopySmall
+- Tests multipart copy with 1-byte files
+- Uses range-based copying (`bytes=0-0`)
+- Verifies multipart upload completion
+
+### TestMultipartCopyWithoutRange
+- Tests multipart copy without specifying range
+- Should copy entire source object
+- Verifies correct content length and data
+
+### TestMultipartCopySpecialNames
+- Tests with special character names: `" "`, `"_"`, `"__"`, `"?versionId"`
+- Verifies proper URL encoding and handling
+- Each special name is tested in isolation
+
+### TestMultipartCopyMultipleSizes
+- Tests with various copy sizes:
+ - 5MB (single part)
+ - 5MB + 100KB (multi-part)
+ - 5MB + 600KB (multi-part)
+ - 10MB + 100KB (multi-part)
+ - 10MB + 600KB (multi-part)
+ - 10MB (exact multi-part boundary)
+- Uses 5MB part size for all copies
+- Verifies data integrity across all sizes
+
+### TestCopyObjectIfMatchGood
+- Tests conditional copy with matching ETag
+- Should succeed when ETag matches
+- Verifies successful copy operation
+
+### TestCopyObjectIfMatchFailed
+- Tests conditional copy with non-matching ETag
+- Should fail with precondition error
+- Verifies proper error handling
+
+### TestCopyObjectIfNoneMatchFailed
+- Tests conditional copy with non-matching ETag for IfNoneMatch
+- Should succeed when ETag doesn't match
+- Verifies successful copy operation
+
+### TestCopyObjectIfNoneMatchGood
+- Tests conditional copy with matching ETag for IfNoneMatch
+- Should fail with precondition error
+- Verifies proper error handling
+
+## Expected Behavior
+
+These tests verify that SeaweedFS correctly implements:
+
+1. **Basic S3 Operations**: Standard `PutObject`, `GetObject`, `ListBuckets`, `ListObjects` APIs
+2. **Bucket Management**: Bucket creation, deletion, and listing
+3. **Object Storage**: Binary and text data storage with metadata
+4. **Large Object Handling**: Efficient storage and retrieval of large files
+5. **Basic S3 Copy Operations**: Standard `CopyObject` API
+6. **Multipart Copy Operations**: `UploadPartCopy` API with range support
+7. **Conditional Operations**: ETag-based conditional copying
+8. **Metadata Handling**: Proper metadata preservation and replacement
+9. **ACL Handling**: Access control list management during copy
+10. **Error Handling**: Proper error responses for invalid operations
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Port Already in Use**
+ ```bash
+ make stop-seaweedfs
+ make clean
+ ```
+
+2. **SeaweedFS Binary Not Found**
+ ```bash
+ cd ../../../
+ make
+ ```
+
+3. **Test Timeouts**
+ ```bash
+ export TEST_TIMEOUT=30m
+ make test
+ ```
+
+4. **Permission Denied**
+ ```bash
+ sudo make clean
+ ```
+
+### Debug Information
+
+```bash
+# Check server status
+make debug-status
+
+# View recent logs
+make debug-logs
+
+# Manual server start for investigation
+make manual-start
+# ... perform manual testing ...
+make manual-stop
+```
+
+### Log Locations
+
+When running tests, logs are stored in:
+- Master: `/tmp/seaweedfs-master.log`
+- Volume: `/tmp/seaweedfs-volume.log`
+- Filer: `/tmp/seaweedfs-filer.log`
+- S3: `/tmp/seaweedfs-s3.log`
+
+## Contributing
+
+When adding new tests:
+
+1. Follow the existing naming convention (`TestXxxYyy`)
+2. Use the helper functions for common operations
+3. Add cleanup with `defer deleteBucket(t, client, bucketName)`
+4. Include error checking with `require.NoError(t, err)`
+5. Use assertions with `assert.Equal(t, expected, actual)`
+6. Add the test to the appropriate Make target
+
+## Performance Notes
+
+- **TestMultipartCopyMultipleSizes** is the most resource-intensive test
+- Large file tests may take several minutes to complete
+- Memory usage scales with file sizes being tested
+- Network latency affects multipart copy performance
+
+## Integration with CI/CD
+
+For automated testing:
+
+```bash
+# Basic validation (recommended first)
+make test-basic
+
+# Quick validation
+make ci-test
+
+# Full validation
+make test-full
+
+# Performance validation
+make perf
+```
+
+The tests are designed to be self-contained and can run in containerized environments. \ No newline at end of file
diff --git a/test/s3/copying/s3_copying_test.go b/test/s3/copying/s3_copying_test.go
new file mode 100644
index 000000000..4bad01de4
--- /dev/null
+++ b/test/s3/copying/s3_copying_test.go
@@ -0,0 +1,1014 @@
+package copying_test
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "fmt"
+ "io"
+ mathrand "math/rand"
+ "net/url"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// S3TestConfig holds configuration for S3 tests
+type S3TestConfig struct {
+ Endpoint string
+ AccessKey string
+ SecretKey string
+ Region string
+ BucketPrefix string
+ UseSSL bool
+ SkipVerifySSL bool
+}
+
+// Default test configuration - should match test_config.json
+var defaultConfig = &S3TestConfig{
+ Endpoint: "http://127.0.0.1:8000", // Use explicit IPv4 address
+ AccessKey: "some_access_key1",
+ SecretKey: "some_secret_key1",
+ Region: "us-east-1",
+ BucketPrefix: "test-copying-",
+ UseSSL: false,
+ SkipVerifySSL: true,
+}
+
+// Initialize math/rand with current time to ensure randomness
+func init() {
+ mathrand.Seed(time.Now().UnixNano())
+}
+
+// getS3Client creates an AWS S3 client for testing
+func getS3Client(t *testing.T) *s3.Client {
+ cfg, err := config.LoadDefaultConfig(context.TODO(),
+ config.WithRegion(defaultConfig.Region),
+ config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
+ defaultConfig.AccessKey,
+ defaultConfig.SecretKey,
+ "",
+ )),
+ config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc(
+ func(service, region string, options ...interface{}) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ URL: defaultConfig.Endpoint,
+ SigningRegion: defaultConfig.Region,
+ HostnameImmutable: true,
+ }, nil
+ })),
+ )
+ require.NoError(t, err)
+
+ return s3.NewFromConfig(cfg, func(o *s3.Options) {
+ o.UsePathStyle = true // Important for SeaweedFS
+ })
+}
+
+// waitForS3Service waits for the S3 service to be ready
+func waitForS3Service(t *testing.T, client *s3.Client, timeout time.Duration) {
+ start := time.Now()
+ for time.Since(start) < timeout {
+ _, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
+ if err == nil {
+ return
+ }
+ t.Logf("Waiting for S3 service to be ready... (error: %v)", err)
+ time.Sleep(time.Second)
+ }
+ t.Fatalf("S3 service not ready after %v", timeout)
+}
+
+// getNewBucketName generates a unique bucket name
+func getNewBucketName() string {
+ timestamp := time.Now().UnixNano()
+ // Add random suffix to prevent collisions when tests run quickly
+ randomSuffix := mathrand.Intn(100000)
+ return fmt.Sprintf("%s%d-%d", defaultConfig.BucketPrefix, timestamp, randomSuffix)
+}
+
+// cleanupTestBuckets removes any leftover test buckets from previous runs
+func cleanupTestBuckets(t *testing.T, client *s3.Client) {
+ resp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
+ if err != nil {
+ t.Logf("Warning: failed to list buckets for cleanup: %v", err)
+ return
+ }
+
+ for _, bucket := range resp.Buckets {
+ bucketName := *bucket.Name
+ // Only delete buckets that match our test prefix
+ if strings.HasPrefix(bucketName, defaultConfig.BucketPrefix) {
+ t.Logf("Cleaning up leftover test bucket: %s", bucketName)
+ deleteBucket(t, client, bucketName)
+ }
+ }
+}
+
+// createBucket creates a new bucket for testing
+func createBucket(t *testing.T, client *s3.Client, bucketName string) {
+ // First, try to delete the bucket if it exists (cleanup from previous failed tests)
+ deleteBucket(t, client, bucketName)
+
+ // Create the bucket
+ _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ require.NoError(t, err)
+}
+
+// deleteBucket deletes a bucket and all its contents
+func deleteBucket(t *testing.T, client *s3.Client, bucketName string) {
+ // First, delete all objects
+ deleteAllObjects(t, client, bucketName)
+
+ // Then delete the bucket
+ _, err := client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ if err != nil {
+ // Only log warnings for actual errors, not "bucket doesn't exist"
+ if !strings.Contains(err.Error(), "NoSuchBucket") {
+ t.Logf("Warning: failed to delete bucket %s: %v", bucketName, err)
+ }
+ }
+}
+
+// deleteAllObjects deletes all objects in a bucket
+func deleteAllObjects(t *testing.T, client *s3.Client, bucketName string) {
+ // List all objects
+ paginator := s3.NewListObjectsV2Paginator(client, &s3.ListObjectsV2Input{
+ Bucket: aws.String(bucketName),
+ })
+
+ for paginator.HasMorePages() {
+ page, err := paginator.NextPage(context.TODO())
+ if err != nil {
+ // Only log warnings for actual errors, not "bucket doesn't exist"
+ if !strings.Contains(err.Error(), "NoSuchBucket") {
+ t.Logf("Warning: failed to list objects in bucket %s: %v", bucketName, err)
+ }
+ return
+ }
+
+ if len(page.Contents) == 0 {
+ break
+ }
+
+ var objectsToDelete []types.ObjectIdentifier
+ for _, obj := range page.Contents {
+ objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
+ Key: obj.Key,
+ })
+ }
+
+ // Delete objects in batches
+ if len(objectsToDelete) > 0 {
+ _, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
+ Bucket: aws.String(bucketName),
+ Delete: &types.Delete{
+ Objects: objectsToDelete,
+ Quiet: aws.Bool(true),
+ },
+ })
+ if err != nil {
+ t.Logf("Warning: failed to delete objects in bucket %s: %v", bucketName, err)
+ }
+ }
+ }
+}
+
+// putObject puts an object into a bucket
+func putObject(t *testing.T, client *s3.Client, bucketName, key, content string) *s3.PutObjectOutput {
+ resp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(key),
+ Body: strings.NewReader(content),
+ })
+ require.NoError(t, err)
+ return resp
+}
+
+// putObjectWithMetadata puts an object with metadata into a bucket
+func putObjectWithMetadata(t *testing.T, client *s3.Client, bucketName, key, content string, metadata map[string]string, contentType string) *s3.PutObjectOutput {
+ input := &s3.PutObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(key),
+ Body: strings.NewReader(content),
+ }
+
+ if metadata != nil {
+ input.Metadata = metadata
+ }
+
+ if contentType != "" {
+ input.ContentType = aws.String(contentType)
+ }
+
+ resp, err := client.PutObject(context.TODO(), input)
+ require.NoError(t, err)
+ return resp
+}
+
+// getObject gets an object from a bucket
+func getObject(t *testing.T, client *s3.Client, bucketName, key string) *s3.GetObjectOutput {
+ resp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(key),
+ })
+ require.NoError(t, err)
+ return resp
+}
+
+// getObjectBody gets the body content of an object
+func getObjectBody(t *testing.T, resp *s3.GetObjectOutput) string {
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ resp.Body.Close()
+ return string(body)
+}
+
+// generateRandomData generates random data of specified size
+func generateRandomData(size int) []byte {
+ data := make([]byte, size)
+ _, err := rand.Read(data)
+ if err != nil {
+ panic(err)
+ }
+ return data
+}
+
+// createCopySource creates a properly URL-encoded copy source string
+func createCopySource(bucketName, key string) string {
+ // URL encode the key to handle special characters like spaces
+ encodedKey := url.PathEscape(key)
+ return fmt.Sprintf("%s/%s", bucketName, encodedKey)
+}
+
+// TestBasicPutGet tests basic S3 put and get operations
+func TestBasicPutGet(t *testing.T) {
+ client := getS3Client(t)
+ bucketName := getNewBucketName()
+
+ // Create bucket
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ // Test 1: Put and get a simple text object
+ t.Run("Simple text object", func(t *testing.T) {
+ key := "test-simple.txt"
+ content := "Hello, SeaweedFS S3!"
+
+ // Put object
+ putResp := putObject(t, client, bucketName, key, content)
+ assert.NotNil(t, putResp.ETag)
+
+ // Get object
+ getResp := getObject(t, client, bucketName, key)
+ body := getObjectBody(t, getResp)
+ assert.Equal(t, content, body)
+ assert.Equal(t, putResp.ETag, getResp.ETag)
+ })
+
+ // Test 2: Put and get an empty object
+ t.Run("Empty object", func(t *testing.T) {
+ key := "test-empty.txt"
+ content := ""
+
+ putResp := putObject(t, client, bucketName, key, content)
+ assert.NotNil(t, putResp.ETag)
+
+ getResp := getObject(t, client, bucketName, key)
+ body := getObjectBody(t, getResp)
+ assert.Equal(t, content, body)
+ assert.Equal(t, putResp.ETag, getResp.ETag)
+ })
+
+ // Test 3: Put and get a binary object
+ t.Run("Binary object", func(t *testing.T) {
+ key := "test-binary.bin"
+ content := string(generateRandomData(1024)) // 1KB of random data
+
+ putResp := putObject(t, client, bucketName, key, content)
+ assert.NotNil(t, putResp.ETag)
+
+ getResp := getObject(t, client, bucketName, key)
+ body := getObjectBody(t, getResp)
+ assert.Equal(t, content, body)
+ assert.Equal(t, putResp.ETag, getResp.ETag)
+ })
+
+ // Test 4: Put and get object with metadata
+ t.Run("Object with metadata", func(t *testing.T) {
+ key := "test-metadata.txt"
+ content := "Content with metadata"
+ metadata := map[string]string{
+ "author": "test",
+ "description": "test object with metadata",
+ }
+ contentType := "text/plain"
+
+ putResp := putObjectWithMetadata(t, client, bucketName, key, content, metadata, contentType)
+ assert.NotNil(t, putResp.ETag)
+
+ getResp := getObject(t, client, bucketName, key)
+ body := getObjectBody(t, getResp)
+ assert.Equal(t, content, body)
+ assert.Equal(t, putResp.ETag, getResp.ETag)
+ assert.Equal(t, contentType, *getResp.ContentType)
+ assert.Equal(t, metadata["author"], getResp.Metadata["author"])
+ assert.Equal(t, metadata["description"], getResp.Metadata["description"])
+ })
+}
+
+// TestBasicBucketOperations tests basic bucket operations
+func TestBasicBucketOperations(t *testing.T) {
+ client := getS3Client(t)
+ bucketName := getNewBucketName()
+
+ // Test 1: Create bucket
+ t.Run("Create bucket", func(t *testing.T) {
+ createBucket(t, client, bucketName)
+
+ // Verify bucket exists by listing buckets
+ resp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
+ require.NoError(t, err)
+
+ found := false
+ for _, bucket := range resp.Buckets {
+ if *bucket.Name == bucketName {
+ found = true
+ break
+ }
+ }
+ assert.True(t, found, "Bucket should exist after creation")
+ })
+
+ // Test 2: Put objects and list them
+ t.Run("List objects", func(t *testing.T) {
+ // Put multiple objects
+ objects := []string{"test1.txt", "test2.txt", "dir/test3.txt"}
+ for _, key := range objects {
+ putObject(t, client, bucketName, key, fmt.Sprintf("content of %s", key))
+ }
+
+ // List objects
+ resp, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
+ Bucket: aws.String(bucketName),
+ })
+ require.NoError(t, err)
+
+ assert.Equal(t, len(objects), len(resp.Contents))
+
+ // Verify each object exists
+ for _, obj := range resp.Contents {
+ found := false
+ for _, expected := range objects {
+ if *obj.Key == expected {
+ found = true
+ break
+ }
+ }
+ assert.True(t, found, "Object %s should be in list", *obj.Key)
+ }
+ })
+
+ // Test 3: Delete bucket (cleanup)
+ t.Run("Delete bucket", func(t *testing.T) {
+ deleteBucket(t, client, bucketName)
+
+ // Verify bucket is deleted by trying to list its contents
+ _, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
+ Bucket: aws.String(bucketName),
+ })
+ assert.Error(t, err, "Bucket should not exist after deletion")
+ })
+}
+
+// TestBasicLargeObject tests handling of larger objects (up to volume limit)
+func TestBasicLargeObject(t *testing.T) {
+ client := getS3Client(t)
+ bucketName := getNewBucketName()
+
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ // Test with progressively larger objects
+ sizes := []int{
+ 1024, // 1KB
+ 1024 * 10, // 10KB
+ 1024 * 100, // 100KB
+ 1024 * 1024, // 1MB
+ 1024 * 1024 * 5, // 5MB
+ 1024 * 1024 * 10, // 10MB
+ }
+
+ for _, size := range sizes {
+ t.Run(fmt.Sprintf("Size_%dMB", size/(1024*1024)), func(t *testing.T) {
+ key := fmt.Sprintf("large-object-%d.bin", size)
+ content := string(generateRandomData(size))
+
+ putResp := putObject(t, client, bucketName, key, content)
+ assert.NotNil(t, putResp.ETag)
+
+ getResp := getObject(t, client, bucketName, key)
+ body := getObjectBody(t, getResp)
+ assert.Equal(t, len(content), len(body))
+ assert.Equal(t, content, body)
+ assert.Equal(t, putResp.ETag, getResp.ETag)
+ })
+ }
+}
+
+// TestObjectCopySameBucket tests copying an object within the same bucket
+func TestObjectCopySameBucket(t *testing.T) {
+ client := getS3Client(t)
+
+ // Wait for S3 service to be ready
+ waitForS3Service(t, client, 30*time.Second)
+
+ bucketName := getNewBucketName()
+
+ // Create bucket
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ // Put source object
+ sourceKey := "foo123bar"
+ sourceContent := "foo"
+ putObject(t, client, bucketName, sourceKey, sourceContent)
+
+ // Copy object within the same bucket
+ destKey := "bar321foo"
+ copySource := createCopySource(bucketName, sourceKey)
+ _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(copySource),
+ })
+ require.NoError(t, err, "Failed to copy object within same bucket")
+
+ // Verify the copied object
+ resp := getObject(t, client, bucketName, destKey)
+ body := getObjectBody(t, resp)
+ assert.Equal(t, sourceContent, body)
+}
+
+// TestObjectCopyDiffBucket tests copying an object to a different bucket
+func TestObjectCopyDiffBucket(t *testing.T) {
+ client := getS3Client(t)
+ sourceBucketName := getNewBucketName()
+ destBucketName := getNewBucketName()
+
+ // Create buckets
+ createBucket(t, client, sourceBucketName)
+ defer deleteBucket(t, client, sourceBucketName)
+ createBucket(t, client, destBucketName)
+ defer deleteBucket(t, client, destBucketName)
+
+ // Put source object
+ sourceKey := "foo123bar"
+ sourceContent := "foo"
+ putObject(t, client, sourceBucketName, sourceKey, sourceContent)
+
+ // Copy object to different bucket
+ destKey := "bar321foo"
+ copySource := createCopySource(sourceBucketName, sourceKey)
+ _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(copySource),
+ })
+ require.NoError(t, err)
+
+ // Verify the copied object
+ resp := getObject(t, client, destBucketName, destKey)
+ body := getObjectBody(t, resp)
+ assert.Equal(t, sourceContent, body)
+}
+
+// TestObjectCopyCannedAcl tests copying with ACL settings
+func TestObjectCopyCannedAcl(t *testing.T) {
+ client := getS3Client(t)
+ bucketName := getNewBucketName()
+
+ // Create bucket
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ // Put source object
+ sourceKey := "foo123bar"
+ sourceContent := "foo"
+ putObject(t, client, bucketName, sourceKey, sourceContent)
+
+ // Copy object with public-read ACL
+ destKey := "bar321foo"
+ copySource := createCopySource(bucketName, sourceKey)
+ _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(copySource),
+ ACL: types.ObjectCannedACLPublicRead,
+ })
+ require.NoError(t, err)
+
+ // Verify the copied object
+ resp := getObject(t, client, bucketName, destKey)
+ body := getObjectBody(t, resp)
+ assert.Equal(t, sourceContent, body)
+
+ // Test metadata replacement with ACL
+ metadata := map[string]string{"abc": "def"}
+ destKey2 := "foo123bar2"
+ copySource2 := createCopySource(bucketName, destKey)
+ _, err = client.CopyObject(context.TODO(), &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey2),
+ CopySource: aws.String(copySource2),
+ ACL: types.ObjectCannedACLPublicRead,
+ Metadata: metadata,
+ MetadataDirective: types.MetadataDirectiveReplace,
+ })
+ require.NoError(t, err)
+
+ // Verify the copied object with metadata
+ resp2 := getObject(t, client, bucketName, destKey2)
+ body2 := getObjectBody(t, resp2)
+ assert.Equal(t, sourceContent, body2)
+ assert.Equal(t, metadata, resp2.Metadata)
+}
+
+// TestObjectCopyRetainingMetadata tests copying while retaining metadata
+func TestObjectCopyRetainingMetadata(t *testing.T) {
+ client := getS3Client(t)
+ bucketName := getNewBucketName()
+
+ // Create bucket
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ // Test with different sizes
+ sizes := []int{3, 1024 * 1024} // 3 bytes and 1MB
+ for _, size := range sizes {
+ t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) {
+ sourceKey := fmt.Sprintf("foo123bar_%d", size)
+ sourceContent := string(generateRandomData(size))
+ contentType := "audio/ogg"
+ metadata := map[string]string{"key1": "value1", "key2": "value2"}
+
+ // Put source object with metadata
+ putObjectWithMetadata(t, client, bucketName, sourceKey, sourceContent, metadata, contentType)
+
+ // Copy object (should retain metadata)
+ destKey := fmt.Sprintf("bar321foo_%d", size)
+ copySource := createCopySource(bucketName, sourceKey)
+ _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(copySource),
+ })
+ require.NoError(t, err)
+
+ // Verify the copied object
+ resp := getObject(t, client, bucketName, destKey)
+ body := getObjectBody(t, resp)
+ assert.Equal(t, sourceContent, body)
+ assert.Equal(t, contentType, *resp.ContentType)
+ assert.Equal(t, metadata, resp.Metadata)
+ require.NotNil(t, resp.ContentLength)
+ assert.Equal(t, int64(size), *resp.ContentLength)
+ })
+ }
+}
+
+// TestMultipartCopySmall tests multipart copying of small files
+func TestMultipartCopySmall(t *testing.T) {
+ client := getS3Client(t)
+
+ // Clean up any leftover buckets from previous test runs
+ cleanupTestBuckets(t, client)
+
+ sourceBucketName := getNewBucketName()
+ destBucketName := getNewBucketName()
+
+ // Create buckets
+ createBucket(t, client, sourceBucketName)
+ defer deleteBucket(t, client, sourceBucketName)
+ createBucket(t, client, destBucketName)
+ defer deleteBucket(t, client, destBucketName)
+
+ // Put source object
+ sourceKey := "foo"
+ sourceContent := "x" // 1 byte
+ putObject(t, client, sourceBucketName, sourceKey, sourceContent)
+
+ // Create multipart upload
+ destKey := "mymultipart"
+ createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ })
+ require.NoError(t, err)
+ uploadID := *createResp.UploadId
+
+ // Upload part copy
+ copySource := createCopySource(sourceBucketName, sourceKey)
+ copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ UploadId: aws.String(uploadID),
+ PartNumber: aws.Int32(1),
+ CopySource: aws.String(copySource),
+ CopySourceRange: aws.String("bytes=0-0"),
+ })
+ require.NoError(t, err)
+
+ // Complete multipart upload
+ _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ UploadId: aws.String(uploadID),
+ MultipartUpload: &types.CompletedMultipartUpload{
+ Parts: []types.CompletedPart{
+ {
+ ETag: copyResp.CopyPartResult.ETag,
+ PartNumber: aws.Int32(1),
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ // Verify the copied object
+ resp := getObject(t, client, destBucketName, destKey)
+ body := getObjectBody(t, resp)
+ assert.Equal(t, sourceContent, body)
+ require.NotNil(t, resp.ContentLength)
+ assert.Equal(t, int64(1), *resp.ContentLength)
+}
+
+// TestMultipartCopyWithoutRange tests multipart copying without range specification
+func TestMultipartCopyWithoutRange(t *testing.T) {
+ client := getS3Client(t)
+
+ // Clean up any leftover buckets from previous test runs
+ cleanupTestBuckets(t, client)
+
+ sourceBucketName := getNewBucketName()
+ destBucketName := getNewBucketName()
+
+ // Create buckets
+ createBucket(t, client, sourceBucketName)
+ defer deleteBucket(t, client, sourceBucketName)
+ createBucket(t, client, destBucketName)
+ defer deleteBucket(t, client, destBucketName)
+
+ // Put source object
+ sourceKey := "source"
+ sourceContent := string(generateRandomData(10))
+ putObject(t, client, sourceBucketName, sourceKey, sourceContent)
+
+ // Create multipart upload
+ destKey := "mymultipartcopy"
+ createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ })
+ require.NoError(t, err)
+ uploadID := *createResp.UploadId
+
+ // Upload part copy without range (should copy entire object)
+ copySource := createCopySource(sourceBucketName, sourceKey)
+ copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ UploadId: aws.String(uploadID),
+ PartNumber: aws.Int32(1),
+ CopySource: aws.String(copySource),
+ })
+ require.NoError(t, err)
+
+ // Complete multipart upload
+ _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ UploadId: aws.String(uploadID),
+ MultipartUpload: &types.CompletedMultipartUpload{
+ Parts: []types.CompletedPart{
+ {
+ ETag: copyResp.CopyPartResult.ETag,
+ PartNumber: aws.Int32(1),
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ // Verify the copied object
+ resp := getObject(t, client, destBucketName, destKey)
+ body := getObjectBody(t, resp)
+ assert.Equal(t, sourceContent, body)
+ require.NotNil(t, resp.ContentLength)
+ assert.Equal(t, int64(10), *resp.ContentLength)
+}
+
+// TestMultipartCopySpecialNames tests multipart copying with special character names
+func TestMultipartCopySpecialNames(t *testing.T) {
+ client := getS3Client(t)
+
+ // Clean up any leftover buckets from previous test runs
+ cleanupTestBuckets(t, client)
+
+ sourceBucketName := getNewBucketName()
+ destBucketName := getNewBucketName()
+
+ // Create buckets
+ createBucket(t, client, sourceBucketName)
+ defer deleteBucket(t, client, sourceBucketName)
+ createBucket(t, client, destBucketName)
+ defer deleteBucket(t, client, destBucketName)
+
+ // Test with special key names
+ specialKeys := []string{" ", "_", "__", "?versionId"}
+ sourceContent := "x" // 1 byte
+ destKey := "mymultipart"
+
+ for i, sourceKey := range specialKeys {
+ t.Run(fmt.Sprintf("special_key_%d", i), func(t *testing.T) {
+ // Put source object
+ putObject(t, client, sourceBucketName, sourceKey, sourceContent)
+
+ // Create multipart upload
+ createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ })
+ require.NoError(t, err)
+ uploadID := *createResp.UploadId
+
+ // Upload part copy
+ copySource := createCopySource(sourceBucketName, sourceKey)
+ copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ UploadId: aws.String(uploadID),
+ PartNumber: aws.Int32(1),
+ CopySource: aws.String(copySource),
+ CopySourceRange: aws.String("bytes=0-0"),
+ })
+ require.NoError(t, err)
+
+ // Complete multipart upload
+ _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ UploadId: aws.String(uploadID),
+ MultipartUpload: &types.CompletedMultipartUpload{
+ Parts: []types.CompletedPart{
+ {
+ ETag: copyResp.CopyPartResult.ETag,
+ PartNumber: aws.Int32(1),
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ // Verify the copied object
+ resp := getObject(t, client, destBucketName, destKey)
+ body := getObjectBody(t, resp)
+ assert.Equal(t, sourceContent, body)
+ require.NotNil(t, resp.ContentLength)
+ assert.Equal(t, int64(1), *resp.ContentLength)
+ })
+ }
+}
+
+// TestMultipartCopyMultipleSizes tests multipart copying with various file sizes
+func TestMultipartCopyMultipleSizes(t *testing.T) {
+ client := getS3Client(t)
+
+ // Clean up any leftover buckets from previous test runs
+ cleanupTestBuckets(t, client)
+
+ sourceBucketName := getNewBucketName()
+ destBucketName := getNewBucketName()
+
+ // Create buckets
+ createBucket(t, client, sourceBucketName)
+ defer deleteBucket(t, client, sourceBucketName)
+ createBucket(t, client, destBucketName)
+ defer deleteBucket(t, client, destBucketName)
+
+ // Put source object (12MB)
+ sourceKey := "foo"
+ sourceSize := 12 * 1024 * 1024
+ sourceContent := generateRandomData(sourceSize)
+ _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{
+ Bucket: aws.String(sourceBucketName),
+ Key: aws.String(sourceKey),
+ Body: bytes.NewReader(sourceContent),
+ })
+ require.NoError(t, err)
+
+ destKey := "mymultipart"
+ partSize := 5 * 1024 * 1024 // 5MB parts
+
+ // Test different copy sizes
+ testSizes := []int{
+ 5 * 1024 * 1024, // 5MB
+ 5*1024*1024 + 100*1024, // 5MB + 100KB
+ 5*1024*1024 + 600*1024, // 5MB + 600KB
+ 10*1024*1024 + 100*1024, // 10MB + 100KB
+ 10*1024*1024 + 600*1024, // 10MB + 600KB
+ 10 * 1024 * 1024, // 10MB
+ }
+
+ for _, size := range testSizes {
+ t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) {
+ // Create multipart upload
+ createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ })
+ require.NoError(t, err)
+ uploadID := *createResp.UploadId
+
+ // Upload parts
+ var parts []types.CompletedPart
+ copySource := createCopySource(sourceBucketName, sourceKey)
+
+ for i := 0; i < size; i += partSize {
+ partNum := int32(len(parts) + 1)
+ endOffset := i + partSize - 1
+ if endOffset >= size {
+ endOffset = size - 1
+ }
+
+ copyRange := fmt.Sprintf("bytes=%d-%d", i, endOffset)
+ copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ UploadId: aws.String(uploadID),
+ PartNumber: aws.Int32(partNum),
+ CopySource: aws.String(copySource),
+ CopySourceRange: aws.String(copyRange),
+ })
+ require.NoError(t, err)
+
+ parts = append(parts, types.CompletedPart{
+ ETag: copyResp.CopyPartResult.ETag,
+ PartNumber: aws.Int32(partNum),
+ })
+ }
+
+ // Complete multipart upload
+ _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
+ Bucket: aws.String(destBucketName),
+ Key: aws.String(destKey),
+ UploadId: aws.String(uploadID),
+ MultipartUpload: &types.CompletedMultipartUpload{
+ Parts: parts,
+ },
+ })
+ require.NoError(t, err)
+
+ // Verify the copied object
+ resp := getObject(t, client, destBucketName, destKey)
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ resp.Body.Close()
+
+ require.NotNil(t, resp.ContentLength)
+ assert.Equal(t, int64(size), *resp.ContentLength)
+ assert.Equal(t, sourceContent[:size], body)
+ })
+ }
+}
+
+// TestCopyObjectIfMatchGood tests copying with matching ETag condition
+func TestCopyObjectIfMatchGood(t *testing.T) {
+ client := getS3Client(t)
+ bucketName := getNewBucketName()
+
+ // Create bucket
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ // Put source object
+ sourceKey := "foo"
+ sourceContent := "bar"
+ putResp := putObject(t, client, bucketName, sourceKey, sourceContent)
+
+ // Copy object with matching ETag
+ destKey := "bar"
+ copySource := createCopySource(bucketName, sourceKey)
+ _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(copySource),
+ CopySourceIfMatch: putResp.ETag,
+ })
+ require.NoError(t, err)
+
+ // Verify the copied object
+ resp := getObject(t, client, bucketName, destKey)
+ body := getObjectBody(t, resp)
+ assert.Equal(t, sourceContent, body)
+}
+
+// TestCopyObjectIfNoneMatchFailed tests copying with non-matching ETag condition
+func TestCopyObjectIfNoneMatchFailed(t *testing.T) {
+ client := getS3Client(t)
+ bucketName := getNewBucketName()
+
+ // Create bucket
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ // Put source object
+ sourceKey := "foo"
+ sourceContent := "bar"
+ putObject(t, client, bucketName, sourceKey, sourceContent)
+
+ // Copy object with non-matching ETag (should succeed)
+ destKey := "bar"
+ copySource := createCopySource(bucketName, sourceKey)
+ _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(copySource),
+ CopySourceIfNoneMatch: aws.String("ABCORZ"),
+ })
+ require.NoError(t, err)
+
+ // Verify the copied object
+ resp := getObject(t, client, bucketName, destKey)
+ body := getObjectBody(t, resp)
+ assert.Equal(t, sourceContent, body)
+}
+
+// TestCopyObjectIfMatchFailed tests copying with non-matching ETag condition (should fail)
+func TestCopyObjectIfMatchFailed(t *testing.T) {
+ client := getS3Client(t)
+ bucketName := getNewBucketName()
+
+ // Create bucket
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ // Put source object
+ sourceKey := "foo"
+ sourceContent := "bar"
+ putObject(t, client, bucketName, sourceKey, sourceContent)
+
+ // Copy object with non-matching ETag (should fail)
+ destKey := "bar"
+ copySource := createCopySource(bucketName, sourceKey)
+ _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(copySource),
+ CopySourceIfMatch: aws.String("ABCORZ"),
+ })
+
+ // Should fail with precondition failed
+ require.Error(t, err)
+ // Note: We could check for specific error types, but SeaweedFS might return different error codes
+}
+
+// TestCopyObjectIfNoneMatchGood tests copying with matching ETag condition (should fail)
+func TestCopyObjectIfNoneMatchGood(t *testing.T) {
+ client := getS3Client(t)
+ bucketName := getNewBucketName()
+
+ // Create bucket
+ createBucket(t, client, bucketName)
+ defer deleteBucket(t, client, bucketName)
+
+ // Put source object
+ sourceKey := "foo"
+ sourceContent := "bar"
+ putResp := putObject(t, client, bucketName, sourceKey, sourceContent)
+
+ // Copy object with matching ETag for IfNoneMatch (should fail)
+ destKey := "bar"
+ copySource := createCopySource(bucketName, sourceKey)
+ _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
+ Bucket: aws.String(bucketName),
+ Key: aws.String(destKey),
+ CopySource: aws.String(copySource),
+ CopySourceIfNoneMatch: putResp.ETag,
+ })
+
+ // Should fail with precondition failed
+ require.Error(t, err)
+}
diff --git a/test/s3/copying/test_config.json b/test/s3/copying/test_config.json
new file mode 100644
index 000000000..0453f8501
--- /dev/null
+++ b/test/s3/copying/test_config.json
@@ -0,0 +1,9 @@
+{
+ "endpoint": "http://localhost:8333",
+ "access_key": "some_access_key1",
+ "secret_key": "some_secret_key1",
+ "region": "us-east-1",
+ "bucket_prefix": "test-copying-",
+ "use_ssl": false,
+ "skip_verify_ssl": true
+} \ No newline at end of file