aboutsummaryrefslogtreecommitdiff
path: root/test/s3
diff options
context:
space:
mode:
authorustuzhanin <55892859+ustuzhanin@users.noreply.github.com>2020-10-02 22:47:25 +0500
committerGitHub <noreply@github.com>2020-10-02 22:47:25 +0500
commit3e0a79ef050dba9e5347d20537ef562cc4b30b62 (patch)
treee0b42e531d18136d9e272258187a305690ee2b4d /test/s3
parentcbd80253e33688f55c02dd29c994a3ee6eac3d6c (diff)
parent9ab98fa912814686b3035a97b5173c1628fbc0fc (diff)
downloadseaweedfs-3e0a79ef050dba9e5347d20537ef562cc4b30b62.tar.xz
seaweedfs-3e0a79ef050dba9e5347d20537ef562cc4b30b62.zip
Merge pull request #1 from chrislusf/master
Merge upstream
Diffstat (limited to 'test/s3')
-rw-r--r--test/s3/basic/basic_test.go21
-rw-r--r--test/s3/multipart/aws_upload.go175
2 files changed, 194 insertions, 2 deletions
diff --git a/test/s3/basic/basic_test.go b/test/s3/basic/basic_test.go
index 1f9e74fc1..653fa1237 100644
--- a/test/s3/basic/basic_test.go
+++ b/test/s3/basic/basic_test.go
@@ -61,7 +61,7 @@ func TestCreateBucket(t *testing.T) {
}
-func TestListBuckets(t *testing.T) {
+func TestPutObject(t *testing.T) {
input := &s3.PutObjectInput{
ACL: aws.String("authenticated-read"),
@@ -89,7 +89,7 @@ func TestListBuckets(t *testing.T) {
}
-func TestPutObject(t *testing.T) {
+func TestListBucket(t *testing.T) {
result, err := svc.ListBuckets(nil)
if err != nil {
@@ -105,6 +105,23 @@ func TestPutObject(t *testing.T) {
}
+func TestListObjectV2(t *testing.T) {
+
+ listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{
+ Bucket: aws.String(Bucket),
+ Prefix: aws.String("foo"),
+ Delimiter: aws.String("/"),
+ })
+ if err != nil {
+ exitErrorf("Unable to list objects, %v", err)
+ }
+ for _, content := range listObj.Contents {
+ fmt.Println(aws.StringValue(content.Key))
+ }
+ fmt.Printf("list: %s\n", listObj)
+
+}
+
func exitErrorf(msg string, args ...interface{}) {
fmt.Fprintf(os.Stderr, msg+"\n", args...)
os.Exit(1)
diff --git a/test/s3/multipart/aws_upload.go b/test/s3/multipart/aws_upload.go
new file mode 100644
index 000000000..8c15cf6ed
--- /dev/null
+++ b/test/s3/multipart/aws_upload.go
@@ -0,0 +1,175 @@
+package main
+
+// copied from https://github.com/apoorvam/aws-s3-multipart-upload
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "net/http"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+const (
+ maxPartSize = int64(5 * 1024 * 1024)
+ maxRetries = 3
+ awsAccessKeyID = "Your access key"
+ awsSecretAccessKey = "Your secret key"
+ awsBucketRegion = "S3 bucket region"
+ awsBucketName = "newBucket"
+)
+
+var (
+ filename = flag.String("f", "", "the file name")
+)
+
+func main() {
+ flag.Parse()
+
+ creds := credentials.NewStaticCredentials(awsAccessKeyID, awsSecretAccessKey, "")
+ _, err := creds.Get()
+ if err != nil {
+ fmt.Printf("bad credentials: %s", err)
+ }
+ cfg := aws.NewConfig().WithRegion(awsBucketRegion).WithCredentials(creds).WithDisableSSL(true).WithEndpoint("localhost:8333")
+ svc := s3.New(session.New(), cfg)
+
+ file, err := os.Open(*filename)
+ if err != nil {
+ fmt.Printf("err opening file: %s", err)
+ return
+ }
+ defer file.Close()
+ fileInfo, _ := file.Stat()
+ size := fileInfo.Size()
+ buffer := make([]byte, size)
+ fileType := http.DetectContentType(buffer)
+ file.Read(buffer)
+
+ path := "/media/" + file.Name()
+ input := &s3.CreateMultipartUploadInput{
+ Bucket: aws.String(awsBucketName),
+ Key: aws.String(path),
+ ContentType: aws.String(fileType),
+ }
+
+ resp, err := svc.CreateMultipartUpload(input)
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+ fmt.Println("Created multipart upload request")
+
+ var curr, partLength int64
+ var remaining = size
+ var completedParts []*s3.CompletedPart
+ partNumber := 1
+ for curr = 0; remaining != 0; curr += partLength {
+ if remaining < maxPartSize {
+ partLength = remaining
+ } else {
+ partLength = maxPartSize
+ }
+ completedPart, err := uploadPart(svc, resp, buffer[curr:curr+partLength], partNumber)
+ if err != nil {
+ fmt.Println(err.Error())
+ err := abortMultipartUpload(svc, resp)
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ return
+ }
+ remaining -= partLength
+ partNumber++
+ completedParts = append(completedParts, completedPart)
+ }
+
+ // list parts
+ parts, err := svc.ListParts(&s3.ListPartsInput{
+ Bucket: input.Bucket,
+ Key: input.Key,
+ MaxParts: nil,
+ PartNumberMarker: nil,
+ RequestPayer: nil,
+ UploadId: resp.UploadId,
+ })
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+ fmt.Printf("list parts: %d\n", len(parts.Parts))
+ for i, part := range parts.Parts {
+ fmt.Printf("part %d: %v\n", i, part)
+ }
+
+
+ completeResponse, err := completeMultipartUpload(svc, resp, completedParts)
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+
+ fmt.Printf("Successfully uploaded file: %s\n", completeResponse.String())
+}
+
+func completeMultipartUpload(svc *s3.S3, resp *s3.CreateMultipartUploadOutput, completedParts []*s3.CompletedPart) (*s3.CompleteMultipartUploadOutput, error) {
+ completeInput := &s3.CompleteMultipartUploadInput{
+ Bucket: resp.Bucket,
+ Key: resp.Key,
+ UploadId: resp.UploadId,
+ MultipartUpload: &s3.CompletedMultipartUpload{
+ Parts: completedParts,
+ },
+ }
+ return svc.CompleteMultipartUpload(completeInput)
+}
+
+func uploadPart(svc *s3.S3, resp *s3.CreateMultipartUploadOutput, fileBytes []byte, partNumber int) (*s3.CompletedPart, error) {
+ tryNum := 1
+ partInput := &s3.UploadPartInput{
+ Body: bytes.NewReader(fileBytes),
+ Bucket: resp.Bucket,
+ Key: resp.Key,
+ PartNumber: aws.Int64(int64(partNumber)),
+ UploadId: resp.UploadId,
+ ContentLength: aws.Int64(int64(len(fileBytes))),
+ }
+
+ for tryNum <= maxRetries {
+ uploadResult, err := svc.UploadPart(partInput)
+ if err != nil {
+ if tryNum == maxRetries {
+ if aerr, ok := err.(awserr.Error); ok {
+ return nil, aerr
+ }
+ return nil, err
+ }
+ fmt.Printf("Retrying to upload part #%v\n", partNumber)
+ tryNum++
+ } else {
+ fmt.Printf("Uploaded part #%v\n", partNumber)
+ return &s3.CompletedPart{
+ ETag: uploadResult.ETag,
+ PartNumber: aws.Int64(int64(partNumber)),
+ }, nil
+ }
+ }
+ return nil, nil
+}
+
+func abortMultipartUpload(svc *s3.S3, resp *s3.CreateMultipartUploadOutput) error {
+ fmt.Println("Aborting multipart upload for UploadId#" + *resp.UploadId)
+ abortInput := &s3.AbortMultipartUploadInput{
+ Bucket: resp.Bucket,
+ Key: resp.Key,
+ UploadId: resp.UploadId,
+ }
+ _, err := svc.AbortMultipartUpload(abortInput)
+ return err
+}