diff options
| author | LHHDZ <changlin.shi@ly.com> | 2022-09-30 03:29:01 +0800 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2022-09-29 12:29:01 -0700 |
| commit | 3de1e1978091e9675c9d54655dcde0c7dea9d222 (patch) | |
| tree | 86d5b68876c9e964e9ff3ae6b05cef66aa62abf5 /weed/s3api/bucket_metadata_test.go | |
| parent | 5e9039d728221b69dc30010e73c3a0a4e7c7e7e8 (diff) | |
| download | seaweedfs-3de1e1978091e9675c9d54655dcde0c7dea9d222.tar.xz seaweedfs-3de1e1978091e9675c9d54655dcde0c7dea9d222.zip | |
s3: sync bucket info from filer (#3759)
Diffstat (limited to 'weed/s3api/bucket_metadata_test.go')
| -rw-r--r-- | weed/s3api/bucket_metadata_test.go | 236 |
1 files changed, 236 insertions, 0 deletions
diff --git a/weed/s3api/bucket_metadata_test.go b/weed/s3api/bucket_metadata_test.go new file mode 100644 index 000000000..f3c3610cc --- /dev/null +++ b/weed/s3api/bucket_metadata_test.go @@ -0,0 +1,236 @@ +package s3api + +import ( + "fmt" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" + "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "reflect" + "sync" + "testing" + "time" +) + +type BucketMetadataTestCase struct { + filerEntry *filer_pb.Entry + expectBucketMetadata *BucketMetaData +} + +var ( + //bad entry + badEntry = &filer_pb.Entry{ + Name: "badEntry", + } + + //good entry + goodEntryAcp, _ = jsonutil.BuildJSON(&s3.AccessControlPolicy{ + Owner: &s3.Owner{ + DisplayName: &AccountAdmin.Name, + ID: &AccountAdmin.Id, + }, + Grants: s3_constants.PublicRead, + }) + goodEntry = &filer_pb.Entry{ + Name: "entryWithValidAcp", + Extended: map[string][]byte{ + s3_constants.ExtOwnershipKey: []byte(s3_constants.OwnershipBucketOwnerEnforced), + s3_constants.ExtAcpKey: goodEntryAcp, + }, + } + + //ownership is "" + ownershipEmptyStr = &filer_pb.Entry{ + Name: "ownershipEmptyStr", + Extended: map[string][]byte{ + s3_constants.ExtOwnershipKey: []byte(""), + }, + } + + //ownership valid + ownershipValid = &filer_pb.Entry{ + Name: "ownershipValid", + Extended: map[string][]byte{ + s3_constants.ExtOwnershipKey: []byte(s3_constants.OwnershipBucketOwnerEnforced), + }, + } + + //acp is "" + acpEmptyStr = &filer_pb.Entry{ + Name: "acpEmptyStr", + Extended: map[string][]byte{ + s3_constants.ExtAcpKey: []byte(""), + }, + } + + //acp is empty object + acpEmptyObjectAcp, _ = jsonutil.BuildJSON(&s3.AccessControlPolicy{ + Owner: nil, + Grants: nil, + }) + acpEmptyObject = &filer_pb.Entry{ + Name: "acpEmptyObject", + Extended: map[string][]byte{ + s3_constants.ExtAcpKey: acpEmptyObjectAcp, + }, + } + + //acp owner is nil + acpOwnerNilAcp, _ = jsonutil.BuildJSON(&s3.AccessControlPolicy{ + Owner: nil, + Grants: make([]*s3.Grant, 1), + }) + acpOwnerNil = &filer_pb.Entry{ + Name: "acpOwnerNil", + Extended: map[string][]byte{ + s3_constants.ExtAcpKey: acpOwnerNilAcp, + }, + } + + //load filer is + loadFilerBucket = make(map[string]int, 1) + //override `loadBucketMetadataFromFiler` to avoid really load from filer +) + +var tcs = []*BucketMetadataTestCase{ + { + badEntry, &BucketMetaData{ + Name: badEntry.Name, + ObjectOwnership: s3_constants.DefaultOwnershipForExists, + Owner: &s3.Owner{ + DisplayName: &AccountAdmin.Name, + ID: &AccountAdmin.Id, + }, + Acl: nil, + }, + }, + { + goodEntry, &BucketMetaData{ + Name: goodEntry.Name, + ObjectOwnership: s3_constants.OwnershipBucketOwnerEnforced, + Owner: &s3.Owner{ + DisplayName: &AccountAdmin.Name, + ID: &AccountAdmin.Id, + }, + Acl: s3_constants.PublicRead, + }, + }, + { + ownershipEmptyStr, &BucketMetaData{ + Name: ownershipEmptyStr.Name, + ObjectOwnership: s3_constants.DefaultOwnershipForExists, + Owner: &s3.Owner{ + DisplayName: &AccountAdmin.Name, + ID: &AccountAdmin.Id, + }, + Acl: nil, + }, + }, + { + ownershipValid, &BucketMetaData{ + Name: ownershipValid.Name, + ObjectOwnership: s3_constants.OwnershipBucketOwnerEnforced, + Owner: &s3.Owner{ + DisplayName: &AccountAdmin.Name, + ID: &AccountAdmin.Id, + }, + Acl: nil, + }, + }, + { + acpEmptyStr, &BucketMetaData{ + Name: acpEmptyStr.Name, + ObjectOwnership: s3_constants.DefaultOwnershipForExists, + Owner: &s3.Owner{ + DisplayName: &AccountAdmin.Name, + ID: &AccountAdmin.Id, + }, + Acl: nil, + }, + }, + { + acpEmptyObject, &BucketMetaData{ + Name: acpEmptyObject.Name, + ObjectOwnership: s3_constants.DefaultOwnershipForExists, + Owner: &s3.Owner{ + DisplayName: &AccountAdmin.Name, + ID: &AccountAdmin.Id, + }, + Acl: nil, + }, + }, + { + acpOwnerNil, &BucketMetaData{ + Name: acpOwnerNil.Name, + ObjectOwnership: s3_constants.DefaultOwnershipForExists, + Owner: &s3.Owner{ + DisplayName: &AccountAdmin.Name, + ID: &AccountAdmin.Id, + }, + Acl: make([]*s3.Grant, 0), + }, + }, +} + +func TestBuildBucketMetadata(t *testing.T) { + for _, tc := range tcs { + resultBucketMetadata := buildBucketMetadata(tc.filerEntry) + if !reflect.DeepEqual(resultBucketMetadata, tc.expectBucketMetadata) { + t.Fatalf("result is unexpect: \nresult: %v, \nexpect: %v", resultBucketMetadata, tc.expectBucketMetadata) + } + } +} + +func TestGetBucketMetadata(t *testing.T) { + loadBucketMetadataFromFiler = func(r *BucketRegistry, bucketName string) (*BucketMetaData, error) { + time.Sleep(time.Second) + loadFilerBucket[bucketName] = loadFilerBucket[bucketName] + 1 + return &BucketMetaData{ + Name: bucketName, + }, nil + } + + br := &BucketRegistry{ + metadataCache: make(map[string]*BucketMetaData), + notFound: make(map[string]struct{}), + s3a: nil, + } + + //start 40 goroutine for + var wg sync.WaitGroup + closeCh := make(chan struct{}) + for i := 0; i < 40; i++ { + wg.Add(1) + go func() { + defer wg.Done() + outLoop: + for { + for j := 0; j < 5; j++ { + select { + case <-closeCh: + break outLoop + default: + reqBucket := fmt.Sprintf("%c", 67+j) + _, errCode := br.GetBucketMetadata(reqBucket) + if errCode != s3err.ErrNone { + close(closeCh) + t.Error("not expect") + } + } + } + time.Sleep(10 * time.Microsecond) + } + }() + } + time.Sleep(time.Second) + close(closeCh) + wg.Wait() + + //Each bucket is loaded from the filer only once + for bucketName, loadCount := range loadFilerBucket { + if loadCount != 1 { + t.Fatalf("lock is uneffict: %s, %d", bucketName, loadCount) + } + } +} |
