From 51c2ab0107ce8bc38d6d4f1d3ef3190d4be94161 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 13 Dec 2025 19:04:12 -0800 Subject: fix: admin UI bucket deletion with filer group configured (#7735) --- .github/workflows/s3-filer-group-tests.yml | 85 +++++++ test/s3/filer_group/Makefile | 166 +++++++++++++ test/s3/filer_group/README.md | 99 ++++++++ test/s3/filer_group/s3_filer_group_test.go | 332 ++++++++++++++++++++++++++ test/s3/filer_group/test_config.json | 8 + weed/admin/dash/admin_server.go | 111 +++++---- weed/s3api/s3_constants/buckets.go | 6 + weed/s3api/s3_objectlock/object_lock_check.go | 1 - 8 files changed, 767 insertions(+), 41 deletions(-) create mode 100644 .github/workflows/s3-filer-group-tests.yml create mode 100644 test/s3/filer_group/Makefile create mode 100644 test/s3/filer_group/README.md create mode 100644 test/s3/filer_group/s3_filer_group_test.go create mode 100644 test/s3/filer_group/test_config.json create mode 100644 weed/s3api/s3_constants/buckets.go diff --git a/.github/workflows/s3-filer-group-tests.yml b/.github/workflows/s3-filer-group-tests.yml new file mode 100644 index 000000000..a4711005f --- /dev/null +++ b/.github/workflows/s3-filer-group-tests.yml @@ -0,0 +1,85 @@ +name: "S3 Filer Group Tests" + +on: + pull_request: + +concurrency: + group: ${{ github.head_ref }}/s3-filer-group-tests + cancel-in-progress: true + +permissions: + contents: read + +defaults: + run: + working-directory: weed + +jobs: + s3-filer-group-tests: + name: S3 Filer Group Integration Tests + runs-on: ubuntu-22.04 + timeout-minutes: 20 + + steps: + - name: Check out code + uses: actions/checkout@v6 + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version-file: 'go.mod' + id: go + + - name: Install SeaweedFS + run: | + go install -buildvcs=false + + - name: Run S3 Filer Group Tests + timeout-minutes: 15 + working-directory: test/s3/filer_group + run: | + set -x + echo "=== System Information ===" + uname -a + free -h + df -h + echo "=== Starting Filer Group Tests ===" + + # Run tests with automatic server management + # The test-with-server target handles server startup/shutdown automatically + # Server is started with -filer.group=testgroup + make test-with-server || { + echo "❌ Filer group tests failed, checking logs..." + if [ -f weed-test.log ]; then + echo "=== Server logs ===" + tail -100 weed-test.log + fi + echo "=== Process information ===" + ps aux | grep -E "(weed|test)" || true + exit 1 + } + + - name: Show server logs on failure + if: failure() + working-directory: test/s3/filer_group + run: | + echo "=== Server Logs ===" + if [ -f weed-test.log ]; then + echo "Last 100 lines of server logs:" + tail -100 weed-test.log + else + echo "No server log file found" + fi + + echo "=== Test Environment ===" + ps aux | grep -E "(weed|test)" || true + netstat -tlnp | grep -E "(8333|9333|8080)" || true + + - name: Upload test logs on failure + if: failure() + uses: actions/upload-artifact@v5 + with: + name: s3-filer-group-test-logs + path: test/s3/filer_group/weed-test*.log + retention-days: 3 + diff --git a/test/s3/filer_group/Makefile b/test/s3/filer_group/Makefile new file mode 100644 index 000000000..df243d2b0 --- /dev/null +++ b/test/s3/filer_group/Makefile @@ -0,0 +1,166 @@ +# S3 Filer Group Test Makefile +# This Makefile provides targets for running S3 filer group integration tests +# These tests verify that S3 bucket operations work correctly when a filer group is configured. + +.PHONY: help build-weed check-deps start-server stop-server test test-with-server clean logs health-check + +# Configuration +WEED_BINARY := ../../../weed/weed_binary +S3_PORT := 8333 +MASTER_PORT := 9333 +MASTER_GRPC_PORT := 19333 +VOLUME_PORT := 8080 +FILER_PORT := 8888 +TEST_TIMEOUT := 10m +TEST_PATTERN := Test + +# Filer group configuration +FILER_GROUP := testgroup + +# Default target +help: + @echo "S3 Filer Group Test Makefile" + @echo "" + @echo "Available targets:" + @echo " help - Show this help message" + @echo " build-weed - Build the SeaweedFS binary" + @echo " check-deps - Check dependencies and build binary if needed" + @echo " start-server - Start SeaweedFS server with filer group" + @echo " stop-server - Stop SeaweedFS server" + @echo " test - Run filer group tests (server must be running)" + @echo " test-with-server - Start server, run tests, stop server" + @echo " logs - Show server logs" + @echo " clean - Clean up test artifacts and stop server" + @echo " health-check - Check if server is accessible" + @echo "" + @echo "Configuration:" + @echo " S3_PORT=${S3_PORT}" + @echo " MASTER_GRPC_PORT=${MASTER_GRPC_PORT}" + @echo " FILER_GROUP=${FILER_GROUP}" + @echo " TEST_TIMEOUT=${TEST_TIMEOUT}" + +# Build the SeaweedFS binary +build-weed: + @echo "Building SeaweedFS binary..." + @cd ../../../weed && go build -o weed_binary . + @chmod +x $(WEED_BINARY) + @echo "✅ SeaweedFS binary built at $(WEED_BINARY)" + +check-deps: build-weed + @echo "Checking dependencies..." + @command -v go >/dev/null 2>&1 || (echo "Go is required but not installed" && exit 1) + @echo "Go version: $$(go version)" + @test -f $(WEED_BINARY) || (echo "SeaweedFS binary not found at $(WEED_BINARY)" && exit 1) + @go list -m github.com/aws/aws-sdk-go-v2 >/dev/null 2>&1 || (echo "AWS SDK Go v2 not found. Run 'go mod tidy'." && exit 1) + @go list -m github.com/stretchr/testify >/dev/null 2>&1 || (echo "Testify not found. Run 'go mod tidy'." && exit 1) + @echo "✅ All dependencies are available" + +# Start SeaweedFS server with filer group configured +start-server: check-deps + @echo "Starting SeaweedFS server with filer group: $(FILER_GROUP)..." + @rm -f weed-server.pid + @mkdir -p ./test-volume-data + @if netstat -tlnp 2>/dev/null | grep $(S3_PORT) >/dev/null; then \ + echo "⚠️ Port $(S3_PORT) is already in use"; \ + exit 1; \ + fi + @echo "Launching SeaweedFS server with filer group $(FILER_GROUP)..." + @$(WEED_BINARY) server \ + -debug \ + -s3 \ + -s3.port=$(S3_PORT) \ + -s3.allowDeleteBucketNotEmpty=true \ + -s3.config=../../../docker/compose/s3.json \ + -filer \ + -filer.maxMB=64 \ + -filer.filerGroup=$(FILER_GROUP) \ + -master.volumeSizeLimitMB=50 \ + -volume.max=100 \ + -dir=./test-volume-data \ + -volume.preStopSeconds=1 \ + -metricsPort=9325 \ + > weed-test.log 2>&1 & echo $$! > weed-server.pid + @echo "Server PID: $$(cat weed-server.pid 2>/dev/null || echo 'PID file not found')" + @echo "Waiting for server to start (up to 90 seconds)..." + @for i in $$(seq 1 90); do \ + if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \ + echo "✅ SeaweedFS server started successfully on port $(S3_PORT) with filer group $(FILER_GROUP)"; \ + exit 0; \ + fi; \ + if [ $$i -eq 30 ]; then \ + echo "⚠️ Server taking longer than expected (30s), checking logs..."; \ + if [ -f weed-test.log ]; then \ + tail -20 weed-test.log; \ + fi; \ + fi; \ + sleep 1; \ + done; \ + echo "❌ Server failed to start within 90 seconds"; \ + if [ -f weed-test.log ]; then \ + cat weed-test.log; \ + fi; \ + exit 1 + +# Stop SeaweedFS server +stop-server: + @echo "Stopping SeaweedFS server..." + @if [ -f weed-server.pid ]; then \ + SERVER_PID=$$(cat weed-server.pid); \ + echo "Killing server PID $$SERVER_PID"; \ + if ps -p $$SERVER_PID >/dev/null 2>&1; then \ + kill -TERM $$SERVER_PID 2>/dev/null || true; \ + sleep 2; \ + if ps -p $$SERVER_PID >/dev/null 2>&1; then \ + echo "Process still running, sending KILL signal..."; \ + kill -KILL $$SERVER_PID 2>/dev/null || true; \ + sleep 1; \ + fi; \ + else \ + echo "Process $$SERVER_PID not found (already stopped)"; \ + fi; \ + rm -f weed-server.pid; \ + else \ + echo "No PID file found"; \ + fi + @echo "✅ SeaweedFS server stopped" + +# Show server logs +logs: + @if test -f weed-test.log; then \ + echo "=== SeaweedFS Server Logs ==="; \ + tail -f weed-test.log; \ + else \ + echo "No log file found. Server may not be running."; \ + fi + +# Run filer group tests (assumes server is already running) +test: check-deps + @echo "Running filer group S3 tests..." + @FILER_GROUP=$(FILER_GROUP) S3_ENDPOINT=http://localhost:$(S3_PORT) MASTER_ADDRESS=localhost:$(MASTER_GRPC_PORT) \ + go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" . + @echo "✅ Filer group tests completed" + +# Run tests with automatic server management +test-with-server: start-server + @echo "Server started successfully, running filer group tests..." + @echo "Test pattern: $(TEST_PATTERN)" + @echo "Test timeout: $(TEST_TIMEOUT)" + @trap "$(MAKE) stop-server" EXIT; \ + $(MAKE) test || (echo "❌ Tests failed, showing server logs:" && echo "=== Last 50 lines of server logs ===" && tail -50 weed-test.log && echo "=== End of server logs ===" && exit 1) + @$(MAKE) stop-server + @echo "✅ Tests completed and server stopped" + +# Clean up test artifacts +clean: + @echo "Cleaning up test artifacts..." + @$(MAKE) stop-server + @rm -f weed-test*.log weed-server.pid + @rm -rf test-volume-data/ + @go clean -testcache + @echo "✅ Cleanup completed" + +# Quick health check +health-check: + @echo "Running health check..." + @curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1 && echo "✅ S3 API is accessible" || echo "❌ S3 API is not accessible" + @curl -s http://localhost:9325/metrics >/dev/null 2>&1 && echo "✅ Metrics endpoint is accessible" || echo "❌ Metrics endpoint is not accessible" diff --git a/test/s3/filer_group/README.md b/test/s3/filer_group/README.md new file mode 100644 index 000000000..a0d724afc --- /dev/null +++ b/test/s3/filer_group/README.md @@ -0,0 +1,99 @@ +# Filer Group S3 Tests + +These tests verify that S3 bucket operations work correctly when a filer group is configured. + +## Background + +When SeaweedFS is configured with a filer group (via `-filer.group` option), collections are named with the filer group prefix: + +```text +Collection name = {filerGroup}_{bucketName} +``` + +For example, with filer group `mygroup` and bucket `mybucket`, the collection will be named `mygroup_mybucket`. + +## Issue Being Tested + +This test suite was created to verify the fix for a bug where: +- The admin UI was using just the bucket name when deleting collections +- This caused collection deletion to fail when a filer group was configured +- After bucket deletion via admin UI, the collection data would be orphaned + +## Running the Tests + +### Prerequisites + +1. SeaweedFS servers must be running with a filer group configured +2. The S3 gateway must be accessible +3. Master server must be accessible for collection verification + +### Quick Start + +```bash +# Set environment variables +export FILER_GROUP=testgroup +export S3_ENDPOINT=http://localhost:8333 +export MASTER_ADDRESS=localhost:9333 + +# Run tests +go test -v ./... +``` + +### Using the Makefile + +```bash +# Start test servers with filer group configured +make start-servers FILER_GROUP=testgroup + +# Run tests +make test + +# Stop servers +make stop-servers + +# Or run full test cycle +make full-test +``` + +### Configuration + +Tests can be configured via: + +1. Environment variables: + - `FILER_GROUP`: The filer group name (required for tests to run) + - `S3_ENDPOINT`: S3 API endpoint (default: `http://localhost:8333`) + - `MASTER_ADDRESS`: Master server address (default: localhost:9333) + +2. `test_config.json` file + +## Test Cases + +### TestFilerGroupCollectionNaming +Verifies that when a bucket is created and objects are uploaded: +1. The collection is created with the correct filer group prefix +2. Bucket deletion removes the correctly-named collection + +### TestBucketDeletionWithFilerGroup +Specifically tests that bucket deletion via S3 API correctly deletes +the collection when filer group is configured. + +### TestMultipleBucketsWithFilerGroup +Tests creating and deleting multiple buckets to ensure the filer group +prefix is correctly applied and removed for all buckets. + +## Expected Behavior + +With filer group `testgroup`: + +| Bucket Name | Expected Collection Name | +|-------------|-------------------------| +| mybucket | testgroup_mybucket | +| test-123 | testgroup_test-123 | + +Without filer group: + +| Bucket Name | Expected Collection Name | +|-------------|-------------------------| +| mybucket | mybucket | +| test-123 | test-123 | + diff --git a/test/s3/filer_group/s3_filer_group_test.go b/test/s3/filer_group/s3_filer_group_test.go new file mode 100644 index 000000000..31d581ed9 --- /dev/null +++ b/test/s3/filer_group/s3_filer_group_test.go @@ -0,0 +1,332 @@ +package filer_group + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" +) + +// TestConfig holds configuration for filer group S3 tests +type TestConfig struct { + S3Endpoint string `json:"s3_endpoint"` + MasterAddress string `json:"master_address"` + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + Region string `json:"region"` + FilerGroup string `json:"filer_group"` +} + +var testConfig = &TestConfig{ + S3Endpoint: "http://localhost:8333", + MasterAddress: "localhost:19333", // gRPC port = 10000 + master HTTP port (9333) + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + Region: "us-east-1", + FilerGroup: "testgroup", // Expected filer group for these tests +} + +func init() { + // Load config from file if exists + if data, err := os.ReadFile("test_config.json"); err == nil { + if err := json.Unmarshal(data, testConfig); err != nil { + // Log but don't fail - env vars can still override + fmt.Fprintf(os.Stderr, "Warning: failed to parse test_config.json: %v\n", err) + } + } + + // Override from environment variables + if v := os.Getenv("S3_ENDPOINT"); v != "" { + testConfig.S3Endpoint = v + } + if v := os.Getenv("MASTER_ADDRESS"); v != "" { + testConfig.MasterAddress = v + } + if v := os.Getenv("FILER_GROUP"); v != "" { + testConfig.FilerGroup = v + } +} + +func getS3Client(t *testing.T) *s3.Client { + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithRegion(testConfig.Region), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( + testConfig.AccessKey, + testConfig.SecretKey, + "", + )), + ) + require.NoError(t, err) + + return s3.NewFromConfig(cfg, func(o *s3.Options) { + o.BaseEndpoint = aws.String(testConfig.S3Endpoint) + o.UsePathStyle = true + }) +} + +func getMasterClient(t *testing.T) master_pb.SeaweedClient { + conn, err := grpc.NewClient(testConfig.MasterAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + return master_pb.NewSeaweedClient(conn) +} + +func getNewBucketName() string { + return fmt.Sprintf("filergroup-test-%d", time.Now().UnixNano()) +} + +// getExpectedCollectionName returns the expected collection name for a bucket +// When filer group is configured, collections are named: {filerGroup}_{bucketName} +func getExpectedCollectionName(bucketName string) string { + if testConfig.FilerGroup != "" { + return fmt.Sprintf("%s_%s", testConfig.FilerGroup, bucketName) + } + return bucketName +} + +// listAllCollections returns a list of all collection names from the master +func listAllCollections(t *testing.T, masterClient master_pb.SeaweedClient) []string { + collectionResp, err := masterClient.CollectionList(context.Background(), &master_pb.CollectionListRequest{ + IncludeNormalVolumes: true, + IncludeEcVolumes: true, + }) + if err != nil { + t.Logf("Warning: failed to list collections: %v", err) + return nil + } + var names []string + for _, c := range collectionResp.Collections { + names = append(names, c.Name) + } + return names +} + +// collectionExists checks if a collection exists in the master +func collectionExists(t *testing.T, masterClient master_pb.SeaweedClient, collectionName string) bool { + for _, name := range listAllCollections(t, masterClient) { + if name == collectionName { + return true + } + } + return false +} + +// waitForCollectionExists waits for a collection to exist using polling +func waitForCollectionExists(t *testing.T, masterClient master_pb.SeaweedClient, collectionName string) { + var lastCollections []string + success := assert.Eventually(t, func() bool { + lastCollections = listAllCollections(t, masterClient) + for _, name := range lastCollections { + if name == collectionName { + return true + } + } + return false + }, 10*time.Second, 200*time.Millisecond) + if !success { + t.Fatalf("collection %s should be created; existing collections: %v", collectionName, lastCollections) + } +} + +// waitForCollectionDeleted waits for a collection to be deleted using polling +func waitForCollectionDeleted(t *testing.T, masterClient master_pb.SeaweedClient, collectionName string) { + require.Eventually(t, func() bool { + return !collectionExists(t, masterClient, collectionName) + }, 10*time.Second, 200*time.Millisecond, "collection %s should be deleted", collectionName) +} + +// TestFilerGroupCollectionNaming verifies that when a filer group is configured, +// collections are created with the correct prefix ({filerGroup}_{bucketName}) +func TestFilerGroupCollectionNaming(t *testing.T) { + if testConfig.FilerGroup == "" { + t.Skip("Skipping test: FILER_GROUP not configured. Set FILER_GROUP environment variable or configure in test_config.json") + } + + s3Client := getS3Client(t) + masterClient := getMasterClient(t) + bucketName := getNewBucketName() + expectedCollection := getExpectedCollectionName(bucketName) + + t.Logf("Testing with filer group: %s", testConfig.FilerGroup) + t.Logf("Bucket name: %s", bucketName) + t.Logf("Expected collection name: %s", expectedCollection) + + // Create bucket + _, err := s3Client.CreateBucket(context.Background(), &s3.CreateBucketInput{ + Bucket: aws.String(bucketName), + }) + require.NoError(t, err, "CreateBucket should succeed") + + // Upload an object to trigger collection creation + _, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String("test-object"), + Body: strings.NewReader("test content"), + }) + require.NoError(t, err, "PutObject should succeed") + + // Wait for collection to be visible using polling + waitForCollectionExists(t, masterClient, expectedCollection) + + // Verify collection exists with correct name + require.True(t, collectionExists(t, masterClient, expectedCollection), + "Collection %s should exist (filer group prefix applied)", expectedCollection) + + // Cleanup: delete object and bucket + _, err = s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String("test-object"), + }) + require.NoError(t, err, "DeleteObject should succeed") + + _, err = s3Client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{ + Bucket: aws.String(bucketName), + }) + require.NoError(t, err, "DeleteBucket should succeed") + + // Wait for collection to be deleted using polling + waitForCollectionDeleted(t, masterClient, expectedCollection) + + t.Log("SUCCESS: Collection naming with filer group works correctly") +} + +// TestBucketDeletionWithFilerGroup verifies that bucket deletion correctly +// deletes the collection when filer group is configured +func TestBucketDeletionWithFilerGroup(t *testing.T) { + if testConfig.FilerGroup == "" { + t.Skip("Skipping test: FILER_GROUP not configured") + } + + s3Client := getS3Client(t) + masterClient := getMasterClient(t) + bucketName := getNewBucketName() + expectedCollection := getExpectedCollectionName(bucketName) + + // Create bucket and add an object + _, err := s3Client.CreateBucket(context.Background(), &s3.CreateBucketInput{ + Bucket: aws.String(bucketName), + }) + require.NoError(t, err) + + _, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String("test-object"), + Body: strings.NewReader("test content"), + }) + require.NoError(t, err) + + // Wait for collection to be created using polling + waitForCollectionExists(t, masterClient, expectedCollection) + + // Verify collection exists before deletion + require.True(t, collectionExists(t, masterClient, expectedCollection), + "Collection should exist before bucket deletion") + + // Delete object first + _, err = s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String("test-object"), + }) + require.NoError(t, err) + + // Delete bucket + _, err = s3Client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{ + Bucket: aws.String(bucketName), + }) + require.NoError(t, err, "DeleteBucket should succeed") + + // Wait for collection to be deleted using polling + waitForCollectionDeleted(t, masterClient, expectedCollection) + + // Verify collection was deleted + require.False(t, collectionExists(t, masterClient, expectedCollection), + "Collection %s should be deleted after bucket deletion", expectedCollection) + + t.Log("SUCCESS: Bucket deletion with filer group correctly deletes collection") +} + +// TestMultipleBucketsWithFilerGroup tests creating and deleting multiple buckets +func TestMultipleBucketsWithFilerGroup(t *testing.T) { + if testConfig.FilerGroup == "" { + t.Skip("Skipping test: FILER_GROUP not configured") + } + + s3Client := getS3Client(t) + masterClient := getMasterClient(t) + + buckets := make([]string, 3) + for i := 0; i < 3; i++ { + buckets[i] = getNewBucketName() + } + + // Create all buckets and add objects + for _, bucket := range buckets { + _, err := s3Client.CreateBucket(context.Background(), &s3.CreateBucketInput{ + Bucket: aws.String(bucket), + }) + require.NoError(t, err) + + _, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String("test-object"), + Body: strings.NewReader("test content"), + }) + require.NoError(t, err) + } + + // Wait for all collections to be created using polling + for _, bucket := range buckets { + expectedCollection := getExpectedCollectionName(bucket) + waitForCollectionExists(t, masterClient, expectedCollection) + } + + // Verify all collections exist with correct naming + for _, bucket := range buckets { + expectedCollection := getExpectedCollectionName(bucket) + require.True(t, collectionExists(t, masterClient, expectedCollection), + "Collection %s should exist for bucket %s", expectedCollection, bucket) + } + + // Delete all buckets + for _, bucket := range buckets { + _, err := s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String("test-object"), + }) + require.NoError(t, err) + + _, err = s3Client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{ + Bucket: aws.String(bucket), + }) + require.NoError(t, err) + } + + // Wait for all collections to be deleted using polling + for _, bucket := range buckets { + expectedCollection := getExpectedCollectionName(bucket) + waitForCollectionDeleted(t, masterClient, expectedCollection) + } + + // Verify all collections are deleted + for _, bucket := range buckets { + expectedCollection := getExpectedCollectionName(bucket) + require.False(t, collectionExists(t, masterClient, expectedCollection), + "Collection %s should be deleted for bucket %s", expectedCollection, bucket) + } + + t.Log("SUCCESS: Multiple bucket operations with filer group work correctly") +} diff --git a/test/s3/filer_group/test_config.json b/test/s3/filer_group/test_config.json new file mode 100644 index 000000000..3e0f8f17c --- /dev/null +++ b/test/s3/filer_group/test_config.json @@ -0,0 +1,8 @@ +{ + "s3_endpoint": "http://localhost:8333", + "master_address": "localhost:9333", + "access_key": "some_access_key1", + "secret_key": "some_secret_key1", + "region": "us-east-1", + "filer_group": "testgroup" +} \ No newline at end of file diff --git a/weed/admin/dash/admin_server.go b/weed/admin/dash/admin_server.go index c62b9da07..0e5886cfa 100644 --- a/weed/admin/dash/admin_server.go +++ b/weed/admin/dash/admin_server.go @@ -30,10 +30,41 @@ import ( "github.com/seaweedfs/seaweedfs/weed/worker/tasks" ) -const ( - // DefaultBucketsPath is the default path for S3 buckets in the filer - DefaultBucketsPath = "/buckets" -) +// FilerConfig holds filer configuration needed for bucket operations +type FilerConfig struct { + BucketsPath string + FilerGroup string +} + +// getFilerConfig retrieves the filer configuration (buckets path and filer group) +func (s *AdminServer) getFilerConfig() (*FilerConfig, error) { + config := &FilerConfig{ + BucketsPath: s3_constants.DefaultBucketsPath, + FilerGroup: "", + } + + err := s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer configuration: %w", err) + } + if resp.DirBuckets != "" { + config.BucketsPath = resp.DirBuckets + } + config.FilerGroup = resp.FilerGroup + return nil + }) + + return config, err +} + +// getCollectionName returns the collection name for a bucket, prefixed with filer group if configured +func getCollectionName(filerGroup, bucketName string) string { + if filerGroup != "" { + return fmt.Sprintf("%s_%s", filerGroup, bucketName) + } + return bucketName +} type AdminServer struct { masterClient *wdclient.MasterClient @@ -255,28 +286,17 @@ func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) { return nil, fmt.Errorf("failed to get volume information: %w", err) } - // Get filer configuration to determine FilerGroup - var filerGroup string - err = s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - configResp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) - if err != nil { - glog.Warningf("Failed to get filer configuration: %v", err) - // Continue without filer group - return nil - } - filerGroup = configResp.FilerGroup - return nil - }) - + // Get filer configuration (buckets path and filer group) + filerConfig, err := s.getFilerConfig() if err != nil { - return nil, fmt.Errorf("failed to get filer configuration: %w", err) + glog.Warningf("Failed to get filer configuration, using defaults: %v", err) } // Now list buckets from the filer and match with collection data err = s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - // List buckets by looking at the /buckets directory + // List buckets by looking at the buckets directory stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: DefaultBucketsPath, + Directory: filerConfig.BucketsPath, Prefix: "", StartFromFileName: "", InclusiveStartFrom: false, @@ -299,12 +319,7 @@ func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) { bucketName := resp.Entry.Name // Determine collection name for this bucket - var collectionName string - if filerGroup != "" { - collectionName = fmt.Sprintf("%s_%s", filerGroup, bucketName) - } else { - collectionName = bucketName - } + collectionName := getCollectionName(filerConfig.FilerGroup, bucketName) // Get size and object count from collection data var size int64 @@ -373,7 +388,13 @@ func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) { // GetBucketDetails retrieves detailed information about a specific bucket func (s *AdminServer) GetBucketDetails(bucketName string) (*BucketDetails, error) { - bucketPath := fmt.Sprintf("/buckets/%s", bucketName) + // Get filer configuration (buckets path) + filerConfig, err := s.getFilerConfig() + if err != nil { + glog.Warningf("Failed to get filer configuration, using defaults: %v", err) + } + + bucketPath := fmt.Sprintf("%s/%s", filerConfig.BucketsPath, bucketName) details := &BucketDetails{ Bucket: S3Bucket{ @@ -383,10 +404,10 @@ func (s *AdminServer) GetBucketDetails(bucketName string) (*BucketDetails, error UpdatedAt: time.Now(), } - err := s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { // Get bucket info bucketResp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ - Directory: DefaultBucketsPath, + Directory: filerConfig.BucketsPath, Name: bucketName, }) if err != nil { @@ -434,7 +455,7 @@ func (s *AdminServer) GetBucketDetails(bucketName string) (*BucketDetails, error details.Bucket.Owner = owner // List objects in bucket (recursively) - return s.listBucketObjects(client, bucketPath, "", details) + return s.listBucketObjects(client, bucketPath, bucketPath, "", details) }) if err != nil { @@ -445,7 +466,8 @@ func (s *AdminServer) GetBucketDetails(bucketName string) (*BucketDetails, error } // listBucketObjects recursively lists all objects in a bucket -func (s *AdminServer) listBucketObjects(client filer_pb.SeaweedFilerClient, directory, prefix string, details *BucketDetails) error { +// bucketBasePath is the full path to the bucket (e.g., /buckets/mybucket) +func (s *AdminServer) listBucketObjects(client filer_pb.SeaweedFilerClient, bucketBasePath, directory, prefix string, details *BucketDetails) error { stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ Directory: directory, Prefix: prefix, @@ -470,16 +492,16 @@ func (s *AdminServer) listBucketObjects(client filer_pb.SeaweedFilerClient, dire if entry.IsDirectory { // Recursively list subdirectories subDir := fmt.Sprintf("%s/%s", directory, entry.Name) - err := s.listBucketObjects(client, subDir, "", details) + err := s.listBucketObjects(client, bucketBasePath, subDir, "", details) if err != nil { return err } } else { // Add file object objectKey := entry.Name - if directory != fmt.Sprintf("/buckets/%s", details.Bucket.Name) { + if directory != bucketBasePath { // Remove bucket prefix to get relative path - relativePath := directory[len(fmt.Sprintf("/buckets/%s", details.Bucket.Name))+1:] + relativePath := directory[len(bucketBasePath)+1:] objectKey = fmt.Sprintf("%s/%s", relativePath, entry.Name) } @@ -511,10 +533,17 @@ func (s *AdminServer) CreateS3Bucket(bucketName string) error { // DeleteS3Bucket deletes an S3 bucket and all its contents func (s *AdminServer) DeleteS3Bucket(bucketName string) error { - // First, check if bucket has Object Lock enabled and if there are locked objects ctx := context.Background() - err := s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - return s3api.CheckBucketForLockedObjects(ctx, client, DefaultBucketsPath, bucketName) + + // Get filer configuration (buckets path and filer group) + filerConfig, err := s.getFilerConfig() + if err != nil { + return fmt.Errorf("failed to get filer configuration: %w", err) + } + + // Check if bucket has Object Lock enabled and if there are locked objects + err = s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return s3api.CheckBucketForLockedObjects(ctx, client, filerConfig.BucketsPath, bucketName) }) if err != nil { return err @@ -522,21 +551,23 @@ func (s *AdminServer) DeleteS3Bucket(bucketName string) error { // Delete the collection first (same as s3.bucket.delete shell command) // This ensures volume data is cleaned up properly + // Collection name must be prefixed with filer group if configured + collectionName := getCollectionName(filerConfig.FilerGroup, bucketName) err = s.WithMasterClient(func(client master_pb.SeaweedClient) error { _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ - Name: bucketName, + Name: collectionName, }) return err }) if err != nil { - return fmt.Errorf("failed to delete collection: %w", err) + return fmt.Errorf("failed to delete collection %s: %w", collectionName, err) } // Then delete bucket directory recursively from filer // Use same parameters as s3.bucket.delete shell command and S3 API return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { _, err := client.DeleteEntry(ctx, &filer_pb.DeleteEntryRequest{ - Directory: DefaultBucketsPath, + Directory: filerConfig.BucketsPath, Name: bucketName, IsDeleteData: false, // Collection already deleted, just remove metadata IsRecursive: true, diff --git a/weed/s3api/s3_constants/buckets.go b/weed/s3api/s3_constants/buckets.go new file mode 100644 index 000000000..a10cf0f50 --- /dev/null +++ b/weed/s3api/s3_constants/buckets.go @@ -0,0 +1,6 @@ +package s3_constants + +const ( + // DefaultBucketsPath is the default path for S3 buckets in the filer + DefaultBucketsPath = "/buckets" +) diff --git a/weed/s3api/s3_objectlock/object_lock_check.go b/weed/s3api/s3_objectlock/object_lock_check.go index a66e587c5..2cd79b340 100644 --- a/weed/s3api/s3_objectlock/object_lock_check.go +++ b/weed/s3api/s3_objectlock/object_lock_check.go @@ -229,4 +229,3 @@ func CheckBucketForLockedObjects(ctx context.Context, client filer_pb.SeaweedFil return nil } - -- cgit v1.2.3