aboutsummaryrefslogtreecommitdiff
path: root/test/s3/filer_group/s3_filer_group_test.go
blob: 31d581ed9d59ecf0c4e76ea171658046bcce2b7c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
package filer_group

import (
	"context"
	"encoding/json"
	"fmt"
	"os"
	"strings"
	"testing"
	"time"

	"github.com/aws/aws-sdk-go-v2/aws"
	"github.com/aws/aws-sdk-go-v2/config"
	"github.com/aws/aws-sdk-go-v2/credentials"
	"github.com/aws/aws-sdk-go-v2/service/s3"
	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
	"google.golang.org/grpc"
	"google.golang.org/grpc/credentials/insecure"

	"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)

// TestConfig holds configuration for filer group S3 tests
type TestConfig struct {
	S3Endpoint    string `json:"s3_endpoint"`
	MasterAddress string `json:"master_address"`
	AccessKey     string `json:"access_key"`
	SecretKey     string `json:"secret_key"`
	Region        string `json:"region"`
	FilerGroup    string `json:"filer_group"`
}

var testConfig = &TestConfig{
	S3Endpoint:    "http://localhost:8333",
	MasterAddress: "localhost:19333", // gRPC port = 10000 + master HTTP port (9333)
	AccessKey:     "some_access_key1",
	SecretKey:     "some_secret_key1",
	Region:        "us-east-1",
	FilerGroup:    "testgroup", // Expected filer group for these tests
}

func init() {
	// Load config from file if exists
	if data, err := os.ReadFile("test_config.json"); err == nil {
		if err := json.Unmarshal(data, testConfig); err != nil {
			// Log but don't fail - env vars can still override
			fmt.Fprintf(os.Stderr, "Warning: failed to parse test_config.json: %v\n", err)
		}
	}

	// Override from environment variables
	if v := os.Getenv("S3_ENDPOINT"); v != "" {
		testConfig.S3Endpoint = v
	}
	if v := os.Getenv("MASTER_ADDRESS"); v != "" {
		testConfig.MasterAddress = v
	}
	if v := os.Getenv("FILER_GROUP"); v != "" {
		testConfig.FilerGroup = v
	}
}

func getS3Client(t *testing.T) *s3.Client {
	cfg, err := config.LoadDefaultConfig(context.TODO(),
		config.WithRegion(testConfig.Region),
		config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
			testConfig.AccessKey,
			testConfig.SecretKey,
			"",
		)),
	)
	require.NoError(t, err)

	return s3.NewFromConfig(cfg, func(o *s3.Options) {
		o.BaseEndpoint = aws.String(testConfig.S3Endpoint)
		o.UsePathStyle = true
	})
}

func getMasterClient(t *testing.T) master_pb.SeaweedClient {
	conn, err := grpc.NewClient(testConfig.MasterAddress, grpc.WithTransportCredentials(insecure.NewCredentials()))
	require.NoError(t, err)
	t.Cleanup(func() { conn.Close() })
	return master_pb.NewSeaweedClient(conn)
}

func getNewBucketName() string {
	return fmt.Sprintf("filergroup-test-%d", time.Now().UnixNano())
}

// getExpectedCollectionName returns the expected collection name for a bucket
// When filer group is configured, collections are named: {filerGroup}_{bucketName}
func getExpectedCollectionName(bucketName string) string {
	if testConfig.FilerGroup != "" {
		return fmt.Sprintf("%s_%s", testConfig.FilerGroup, bucketName)
	}
	return bucketName
}

// listAllCollections returns a list of all collection names from the master
func listAllCollections(t *testing.T, masterClient master_pb.SeaweedClient) []string {
	collectionResp, err := masterClient.CollectionList(context.Background(), &master_pb.CollectionListRequest{
		IncludeNormalVolumes: true,
		IncludeEcVolumes:     true,
	})
	if err != nil {
		t.Logf("Warning: failed to list collections: %v", err)
		return nil
	}
	var names []string
	for _, c := range collectionResp.Collections {
		names = append(names, c.Name)
	}
	return names
}

// collectionExists checks if a collection exists in the master
func collectionExists(t *testing.T, masterClient master_pb.SeaweedClient, collectionName string) bool {
	for _, name := range listAllCollections(t, masterClient) {
		if name == collectionName {
			return true
		}
	}
	return false
}

// waitForCollectionExists waits for a collection to exist using polling
func waitForCollectionExists(t *testing.T, masterClient master_pb.SeaweedClient, collectionName string) {
	var lastCollections []string
	success := assert.Eventually(t, func() bool {
		lastCollections = listAllCollections(t, masterClient)
		for _, name := range lastCollections {
			if name == collectionName {
				return true
			}
		}
		return false
	}, 10*time.Second, 200*time.Millisecond)
	if !success {
		t.Fatalf("collection %s should be created; existing collections: %v", collectionName, lastCollections)
	}
}

// waitForCollectionDeleted waits for a collection to be deleted using polling
func waitForCollectionDeleted(t *testing.T, masterClient master_pb.SeaweedClient, collectionName string) {
	require.Eventually(t, func() bool {
		return !collectionExists(t, masterClient, collectionName)
	}, 10*time.Second, 200*time.Millisecond, "collection %s should be deleted", collectionName)
}

// TestFilerGroupCollectionNaming verifies that when a filer group is configured,
// collections are created with the correct prefix ({filerGroup}_{bucketName})
func TestFilerGroupCollectionNaming(t *testing.T) {
	if testConfig.FilerGroup == "" {
		t.Skip("Skipping test: FILER_GROUP not configured. Set FILER_GROUP environment variable or configure in test_config.json")
	}

	s3Client := getS3Client(t)
	masterClient := getMasterClient(t)
	bucketName := getNewBucketName()
	expectedCollection := getExpectedCollectionName(bucketName)

	t.Logf("Testing with filer group: %s", testConfig.FilerGroup)
	t.Logf("Bucket name: %s", bucketName)
	t.Logf("Expected collection name: %s", expectedCollection)

	// Create bucket
	_, err := s3Client.CreateBucket(context.Background(), &s3.CreateBucketInput{
		Bucket: aws.String(bucketName),
	})
	require.NoError(t, err, "CreateBucket should succeed")

	// Upload an object to trigger collection creation
	_, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{
		Bucket: aws.String(bucketName),
		Key:    aws.String("test-object"),
		Body:   strings.NewReader("test content"),
	})
	require.NoError(t, err, "PutObject should succeed")

	// Wait for collection to be visible using polling
	waitForCollectionExists(t, masterClient, expectedCollection)

	// Verify collection exists with correct name
	require.True(t, collectionExists(t, masterClient, expectedCollection),
		"Collection %s should exist (filer group prefix applied)", expectedCollection)

	// Cleanup: delete object and bucket
	_, err = s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
		Bucket: aws.String(bucketName),
		Key:    aws.String("test-object"),
	})
	require.NoError(t, err, "DeleteObject should succeed")

	_, err = s3Client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
		Bucket: aws.String(bucketName),
	})
	require.NoError(t, err, "DeleteBucket should succeed")

	// Wait for collection to be deleted using polling
	waitForCollectionDeleted(t, masterClient, expectedCollection)

	t.Log("SUCCESS: Collection naming with filer group works correctly")
}

// TestBucketDeletionWithFilerGroup verifies that bucket deletion correctly
// deletes the collection when filer group is configured
func TestBucketDeletionWithFilerGroup(t *testing.T) {
	if testConfig.FilerGroup == "" {
		t.Skip("Skipping test: FILER_GROUP not configured")
	}

	s3Client := getS3Client(t)
	masterClient := getMasterClient(t)
	bucketName := getNewBucketName()
	expectedCollection := getExpectedCollectionName(bucketName)

	// Create bucket and add an object
	_, err := s3Client.CreateBucket(context.Background(), &s3.CreateBucketInput{
		Bucket: aws.String(bucketName),
	})
	require.NoError(t, err)

	_, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{
		Bucket: aws.String(bucketName),
		Key:    aws.String("test-object"),
		Body:   strings.NewReader("test content"),
	})
	require.NoError(t, err)

	// Wait for collection to be created using polling
	waitForCollectionExists(t, masterClient, expectedCollection)

	// Verify collection exists before deletion
	require.True(t, collectionExists(t, masterClient, expectedCollection),
		"Collection should exist before bucket deletion")

	// Delete object first
	_, err = s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
		Bucket: aws.String(bucketName),
		Key:    aws.String("test-object"),
	})
	require.NoError(t, err)

	// Delete bucket
	_, err = s3Client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
		Bucket: aws.String(bucketName),
	})
	require.NoError(t, err, "DeleteBucket should succeed")

	// Wait for collection to be deleted using polling
	waitForCollectionDeleted(t, masterClient, expectedCollection)

	// Verify collection was deleted
	require.False(t, collectionExists(t, masterClient, expectedCollection),
		"Collection %s should be deleted after bucket deletion", expectedCollection)

	t.Log("SUCCESS: Bucket deletion with filer group correctly deletes collection")
}

// TestMultipleBucketsWithFilerGroup tests creating and deleting multiple buckets
func TestMultipleBucketsWithFilerGroup(t *testing.T) {
	if testConfig.FilerGroup == "" {
		t.Skip("Skipping test: FILER_GROUP not configured")
	}

	s3Client := getS3Client(t)
	masterClient := getMasterClient(t)

	buckets := make([]string, 3)
	for i := 0; i < 3; i++ {
		buckets[i] = getNewBucketName()
	}

	// Create all buckets and add objects
	for _, bucket := range buckets {
		_, err := s3Client.CreateBucket(context.Background(), &s3.CreateBucketInput{
			Bucket: aws.String(bucket),
		})
		require.NoError(t, err)

		_, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{
			Bucket: aws.String(bucket),
			Key:    aws.String("test-object"),
			Body:   strings.NewReader("test content"),
		})
		require.NoError(t, err)
	}

	// Wait for all collections to be created using polling
	for _, bucket := range buckets {
		expectedCollection := getExpectedCollectionName(bucket)
		waitForCollectionExists(t, masterClient, expectedCollection)
	}

	// Verify all collections exist with correct naming
	for _, bucket := range buckets {
		expectedCollection := getExpectedCollectionName(bucket)
		require.True(t, collectionExists(t, masterClient, expectedCollection),
			"Collection %s should exist for bucket %s", expectedCollection, bucket)
	}

	// Delete all buckets
	for _, bucket := range buckets {
		_, err := s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
			Bucket: aws.String(bucket),
			Key:    aws.String("test-object"),
		})
		require.NoError(t, err)

		_, err = s3Client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
			Bucket: aws.String(bucket),
		})
		require.NoError(t, err)
	}

	// Wait for all collections to be deleted using polling
	for _, bucket := range buckets {
		expectedCollection := getExpectedCollectionName(bucket)
		waitForCollectionDeleted(t, masterClient, expectedCollection)
	}

	// Verify all collections are deleted
	for _, bucket := range buckets {
		expectedCollection := getExpectedCollectionName(bucket)
		require.False(t, collectionExists(t, masterClient, expectedCollection),
			"Collection %s should be deleted for bucket %s", expectedCollection, bucket)
	}

	t.Log("SUCCESS: Multiple bucket operations with filer group work correctly")
}