From 0f9ba84274d54cc69e3f592c6c2b058fca9a57e8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 27 Nov 2019 03:09:42 -0800 Subject: s3 --- weed/storage/backend/s3_backend/s3_backend.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 0ff7eca21..69360806f 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -2,6 +2,7 @@ package s3_backend import ( "fmt" + "os" "strings" "time" @@ -25,7 +26,6 @@ type S3Backend struct { conn s3iface.S3API region string bucket string - dir string vid needle.VolumeId key string } @@ -84,11 +84,11 @@ func (s3backend *S3Backend) GetName() string { return "s3" } -func (s3backend *S3Backend) GetSinkToDirectory() string { - return s3backend.dir +func (s3backend S3Backend) Instantiate(src *os.File) error { + panic("implement me") } -func (s3backend *S3Backend) Initialize(configuration util.Configuration, vid needle.VolumeId) error { +func (s3backend *S3Backend) Initialize(configuration util.Configuration, prefix string, vid needle.VolumeId) error { glog.V(0).Infof("storage.backend.s3.region: %v", configuration.GetString("region")) glog.V(0).Infof("storage.backend.s3.bucket: %v", configuration.GetString("bucket")) glog.V(0).Infof("storage.backend.s3.directory: %v", configuration.GetString("directory")) @@ -98,20 +98,19 @@ func (s3backend *S3Backend) Initialize(configuration util.Configuration, vid nee configuration.GetString("aws_secret_access_key"), configuration.GetString("region"), configuration.GetString("bucket"), - configuration.GetString("directory"), + prefix, vid, ) } -func (s3backend *S3Backend) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir string, - vid needle.VolumeId) (err error) { +func (s3backend *S3Backend) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket string, + prefix string, vid needle.VolumeId) (err error) { s3backend.region = region s3backend.bucket = bucket - s3backend.dir = dir s3backend.conn, err = createSession(awsAccessKeyId, awsSecretAccessKey, region) s3backend.vid = vid - s3backend.key = fmt.Sprintf("%s/%d.dat", dir, vid) + s3backend.key = fmt.Sprintf("%s_%d.dat", prefix, vid) if strings.HasPrefix(s3backend.key, "/") { s3backend.key = s3backend.key[1:] } -- cgit v1.2.3 From f60154f330a81354c433da37d612f235d1c0e4e9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 28 Nov 2019 18:33:18 -0800 Subject: master load backend storage config from master.toml --- weed/storage/backend/s3_backend/s3_backend.go | 124 ++++++++++++++------------ 1 file changed, 68 insertions(+), 56 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 69360806f..980e9e9d7 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -10,36 +10,77 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/backend" - "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) -var ( - _ backend.DataStorageBackend = &S3Backend{} -) - func init() { - backend.StorageBackends = append(backend.StorageBackends, &S3Backend{}) + backend.BackendStorageFactories["s3"] = &S3BackendFactory{} +} + +type S3BackendFactory struct { } -type S3Backend struct { +func (factory *S3BackendFactory) StorageType() backend.StorageType { + return backend.StorageType("s3") +} +func (factory *S3BackendFactory) BuildStorage(configuration util.Configuration, id string) (backend.BackendStorage, error) { + return newS3BackendStorage(configuration, id) +} + +type S3BackendStorage struct { + id string conn s3iface.S3API region string bucket string - vid needle.VolumeId - key string } -func (s3backend S3Backend) ReadAt(p []byte, off int64) (n int, err error) { +func newS3BackendStorage(configuration util.Configuration, id string) (s *S3BackendStorage, err error) { + s = &S3BackendStorage{} + s.id = id + s.conn, err = createSession( + configuration.GetString("aws_access_key_id"), + configuration.GetString("aws_secret_access_key"), + configuration.GetString("region")) + s.region = configuration.GetString("region") + s.bucket = configuration.GetString("bucket") + + glog.V(0).Infof("created s3 backend storage %s for region %s bucket %s", s.Name(), s.region, s.bucket) + return +} + +func (s *S3BackendStorage) Name() string { + return "s3." + s.id +} + +func (s *S3BackendStorage) NewStorageFile(key string) backend.BackendStorageFile { + if strings.HasPrefix(key, "/") { + key = key[1:] + } + + f := &S3BackendStorageFile{ + backendStorage: s, + key: key, + } + + return f +} + +type S3BackendStorageFile struct { + backendStorage *S3BackendStorage + key string +} + +func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) { bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1) - getObjectOutput, getObjectErr := s3backend.conn.GetObject(&s3.GetObjectInput{ - Bucket: &s3backend.bucket, - Key: &s3backend.key, + getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{ + Bucket: &s3backendStorageFile.backendStorage.bucket, + Key: &s3backendStorageFile.key, Range: &bytesRange, }) if getObjectErr != nil { - return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backend.bucket, s3backend.key, getObjectErr) + return 0, fmt.Errorf("bucket %s GetObject %s: %v", + s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr) } defer getObjectOutput.Body.Close() @@ -47,27 +88,28 @@ func (s3backend S3Backend) ReadAt(p []byte, off int64) (n int, err error) { } -func (s3backend S3Backend) WriteAt(p []byte, off int64) (n int, err error) { +func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) { panic("implement me") } -func (s3backend S3Backend) Truncate(off int64) error { +func (s3backendStorageFile S3BackendStorageFile) Truncate(off int64) error { panic("implement me") } -func (s3backend S3Backend) Close() error { +func (s3backendStorageFile S3BackendStorageFile) Close() error { return nil } -func (s3backend S3Backend) GetStat() (datSize int64, modTime time.Time, err error) { +func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) { - headObjectOutput, headObjectErr := s3backend.conn.HeadObject(&s3.HeadObjectInput{ - Bucket: &s3backend.bucket, - Key: &s3backend.key, + headObjectOutput, headObjectErr := s3backendStorageFile.backendStorage.conn.HeadObject(&s3.HeadObjectInput{ + Bucket: &s3backendStorageFile.backendStorage.bucket, + Key: &s3backendStorageFile.key, }) if headObjectErr != nil { - return 0, time.Now(), fmt.Errorf("bucket %s HeadObject %s: %v", s3backend.bucket, s3backend.key, headObjectErr) + return 0, time.Now(), fmt.Errorf("bucket %s HeadObject %s: %v", + s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, headObjectErr) } datSize = int64(*headObjectOutput.ContentLength) @@ -76,44 +118,14 @@ func (s3backend S3Backend) GetStat() (datSize int64, modTime time.Time, err erro return } -func (s3backend S3Backend) String() string { - return fmt.Sprintf("%s/%s", s3backend.bucket, s3backend.key) +func (s3backendStorageFile S3BackendStorageFile) String() string { + return s3backendStorageFile.key } -func (s3backend *S3Backend) GetName() string { +func (s3backendStorageFile *S3BackendStorageFile) GetName() string { return "s3" } -func (s3backend S3Backend) Instantiate(src *os.File) error { +func (s3backendStorageFile S3BackendStorageFile) Instantiate(src *os.File) error { panic("implement me") } - -func (s3backend *S3Backend) Initialize(configuration util.Configuration, prefix string, vid needle.VolumeId) error { - glog.V(0).Infof("storage.backend.s3.region: %v", configuration.GetString("region")) - glog.V(0).Infof("storage.backend.s3.bucket: %v", configuration.GetString("bucket")) - glog.V(0).Infof("storage.backend.s3.directory: %v", configuration.GetString("directory")) - - return s3backend.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("bucket"), - prefix, - vid, - ) -} - -func (s3backend *S3Backend) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket string, - prefix string, vid needle.VolumeId) (err error) { - s3backend.region = region - s3backend.bucket = bucket - s3backend.conn, err = createSession(awsAccessKeyId, awsSecretAccessKey, region) - - s3backend.vid = vid - s3backend.key = fmt.Sprintf("%s_%d.dat", prefix, vid) - if strings.HasPrefix(s3backend.key, "/") { - s3backend.key = s3backend.key[1:] - } - - return err -} -- cgit v1.2.3 From 0da7b894ccee449eed942b9ac6e5dbf775ca1d21 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 29 Nov 2019 01:05:09 -0800 Subject: pass backend config from master to volume servers --- weed/storage/backend/s3_backend/s3_backend.go | 33 +++++++++++++++------------ 1 file changed, 19 insertions(+), 14 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 980e9e9d7..7db8f2337 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/backend" - "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -23,33 +22,39 @@ type S3BackendFactory struct { func (factory *S3BackendFactory) StorageType() backend.StorageType { return backend.StorageType("s3") } -func (factory *S3BackendFactory) BuildStorage(configuration util.Configuration, id string) (backend.BackendStorage, error) { +func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, id string) (backend.BackendStorage, error) { return newS3BackendStorage(configuration, id) } type S3BackendStorage struct { - id string - conn s3iface.S3API - region string - bucket string + id string + aws_access_key_id string + aws_secret_access_key string + region string + bucket string + conn s3iface.S3API } -func newS3BackendStorage(configuration util.Configuration, id string) (s *S3BackendStorage, err error) { +func newS3BackendStorage(configuration backend.StringProperties, id string) (s *S3BackendStorage, err error) { s = &S3BackendStorage{} s.id = id - s.conn, err = createSession( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region")) + s.aws_access_key_id = configuration.GetString("aws_access_key_id") + s.aws_secret_access_key = configuration.GetString("aws_secret_access_key") s.region = configuration.GetString("region") s.bucket = configuration.GetString("bucket") + s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region) - glog.V(0).Infof("created s3 backend storage %s for region %s bucket %s", s.Name(), s.region, s.bucket) + glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) return } -func (s *S3BackendStorage) Name() string { - return "s3." + s.id +func (s *S3BackendStorage) ToProperties() map[string]string { + m := make(map[string]string) + m["aws_access_key_id"] = s.aws_access_key_id + m["aws_secret_access_key"] = s.aws_secret_access_key + m["region"] = s.region + m["bucket"] = s.bucket + return m } func (s *S3BackendStorage) NewStorageFile(key string) backend.BackendStorageFile { -- cgit v1.2.3 From ec8de250e28b3356bb29b273baa557852f3c000b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 2 Dec 2019 15:08:28 -0800 Subject: tiered storage: can copy to s3, read from s3 master not aware tiered volume yet, file assigning is not working yet --- weed/storage/backend/s3_backend/s3_backend.go | 56 ++++++++++++++++++++------- 1 file changed, 43 insertions(+), 13 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 7db8f2337..66299be48 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -2,6 +2,7 @@ package s3_backend import ( "fmt" + "io" "os" "strings" "time" @@ -9,7 +10,9 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/google/uuid" ) func init() { @@ -57,7 +60,7 @@ func (s *S3BackendStorage) ToProperties() map[string]string { return m } -func (s *S3BackendStorage) NewStorageFile(key string) backend.BackendStorageFile { +func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeTierInfo) backend.BackendStorageFile { if strings.HasPrefix(key, "/") { key = key[1:] } @@ -65,18 +68,35 @@ func (s *S3BackendStorage) NewStorageFile(key string) backend.BackendStorageFile f := &S3BackendStorageFile{ backendStorage: s, key: key, + tierInfo: tierInfo, } return f } +func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) { + randomUuid, _ := uuid.NewRandom() + key = randomUuid.String() + + glog.V(1).Infof("copying dat file of", f.Name(), "to remote s3", s.id, "as", key) + + size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn) + + return +} + type S3BackendStorageFile struct { backendStorage *S3BackendStorage key string + tierInfo *volume_server_pb.VolumeTierInfo } func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) { + bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1) + + // glog.V(0).Infof("read %s %s", s3backendStorageFile.key, bytesRange) + getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{ Bucket: &s3backendStorageFile.backendStorage.bucket, Key: &s3backendStorageFile.key, @@ -84,13 +104,26 @@ func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n }) if getObjectErr != nil { - return 0, fmt.Errorf("bucket %s GetObject %s: %v", - s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr) + return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr) } defer getObjectOutput.Body.Close() - return getObjectOutput.Body.Read(p) + glog.V(4).Infof("read %s %s", s3backendStorageFile.key, bytesRange) + glog.V(4).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength) + for { + if n, err = getObjectOutput.Body.Read(p); err == nil && n < len(p) { + p = p[n:] + } else { + break + } + } + + if err == io.EOF { + err = nil + } + + return } func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) { @@ -107,18 +140,15 @@ func (s3backendStorageFile S3BackendStorageFile) Close() error { func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) { - headObjectOutput, headObjectErr := s3backendStorageFile.backendStorage.conn.HeadObject(&s3.HeadObjectInput{ - Bucket: &s3backendStorageFile.backendStorage.bucket, - Key: &s3backendStorageFile.key, - }) + files := s3backendStorageFile.tierInfo.GetFiles() - if headObjectErr != nil { - return 0, time.Now(), fmt.Errorf("bucket %s HeadObject %s: %v", - s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, headObjectErr) + if len(files)==0 { + err = fmt.Errorf("remote file info not found") + return } - datSize = int64(*headObjectOutput.ContentLength) - modTime = *headObjectOutput.LastModified + datSize = int64(files[0].FileSize) + modTime = time.Unix(int64(files[0].ModifiedTime),0) return } -- cgit v1.2.3 From caae543a9f7e45aa674d664970875b5c4cf12185 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 2 Dec 2019 15:54:24 -0800 Subject: fix test --- weed/storage/backend/s3_backend/s3_backend.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 66299be48..f2d080b62 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -78,7 +78,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percen randomUuid, _ := uuid.NewRandom() key = randomUuid.String() - glog.V(1).Infof("copying dat file of", f.Name(), "to remote s3", s.id, "as", key) + glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn) -- cgit v1.2.3 From 70648d35ad1a8321e2e1e51a48f781865907f015 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 2 Dec 2019 20:49:58 -0800 Subject: go fmt --- weed/storage/backend/s3_backend/s3_backend.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index f2d080b62..0f384aa9c 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -142,13 +142,13 @@ func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTi files := s3backendStorageFile.tierInfo.GetFiles() - if len(files)==0 { + if len(files) == 0 { err = fmt.Errorf("remote file info not found") return } datSize = int64(files[0].FileSize) - modTime = time.Unix(int64(files[0].ModifiedTime),0) + modTime = time.Unix(int64(files[0].ModifiedTime), 0) return } -- cgit v1.2.3 From 2b8e20f1227648b977fd2a95feba5d6b55e4f5c1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Dec 2019 18:33:25 -0800 Subject: remove unused functions --- weed/storage/backend/s3_backend/s3_backend.go | 7 ------- 1 file changed, 7 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 0f384aa9c..458a9ca72 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -157,10 +157,3 @@ func (s3backendStorageFile S3BackendStorageFile) String() string { return s3backendStorageFile.key } -func (s3backendStorageFile *S3BackendStorageFile) GetName() string { - return "s3" -} - -func (s3backendStorageFile S3BackendStorageFile) Instantiate(src *os.File) error { - panic("implement me") -} -- cgit v1.2.3 From 10bd3c6b4b49ca94dacbd2e065544f1df259fe3c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Dec 2019 19:44:16 -0800 Subject: refactoring --- weed/storage/backend/s3_backend/s3_backend.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 458a9ca72..665f0b9b9 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -153,7 +153,6 @@ func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTi return } -func (s3backendStorageFile S3BackendStorageFile) String() string { +func (s3backendStorageFile S3BackendStorageFile) Name() string { return s3backendStorageFile.key } - -- cgit v1.2.3 From 356bd1b6290439a5fa7cc2db5802257c7f9b1f36 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Dec 2019 21:44:47 -0800 Subject: adjust text --- weed/storage/backend/s3_backend/s3_backend.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 665f0b9b9..458111f09 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -127,11 +127,11 @@ func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n } func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) { - panic("implement me") + panic("not implemented") } func (s3backendStorageFile S3BackendStorageFile) Truncate(off int64) error { - panic("implement me") + panic("not implemented") } func (s3backendStorageFile S3BackendStorageFile) Close() error { -- cgit v1.2.3 From d960b3474af6956ffcf59e782789001169531db8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 25 Dec 2019 09:53:13 -0800 Subject: tier storage: support downloading the remote dat files --- weed/storage/backend/s3_backend/s3_backend.go | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 458111f09..30f95c319 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -9,10 +9,11 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/google/uuid" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/backend" - "github.com/google/uuid" ) func init() { @@ -85,6 +86,24 @@ func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percen return } +func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) { + + glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key) + + size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn) + + return +} + +func (s *S3BackendStorage) DeleteFile(key string) (err error) { + + glog.V(1).Infof("delete dat file %s from remote", key) + + err = deleteFromS3(s.conn, s.bucket, key) + + return +} + type S3BackendStorageFile struct { backendStorage *S3BackendStorage key string -- cgit v1.2.3 From 48d28d3eb2ceabe558ec0c0abf4f66c76175a96e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 25 Dec 2019 21:37:21 -0800 Subject: tier: support remote file attributes and remember the file extension --- weed/storage/backend/s3_backend/s3_backend.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 30f95c319..96a85e504 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -75,13 +75,13 @@ func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb return f } -func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) { +func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) { randomUuid, _ := uuid.NewRandom() key = randomUuid.String() glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) - size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn) + size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn) return } -- cgit v1.2.3 From 2000284435a7f3958cab6e4ed88128b12000da0b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 28 Dec 2019 11:21:49 -0800 Subject: rename volume tier info to volume info --- weed/storage/backend/s3_backend/s3_backend.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 96a85e504..9f03cfa81 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -61,7 +61,7 @@ func (s *S3BackendStorage) ToProperties() map[string]string { return m } -func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeTierInfo) backend.BackendStorageFile { +func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) backend.BackendStorageFile { if strings.HasPrefix(key, "/") { key = key[1:] } @@ -107,7 +107,7 @@ func (s *S3BackendStorage) DeleteFile(key string) (err error) { type S3BackendStorageFile struct { backendStorage *S3BackendStorage key string - tierInfo *volume_server_pb.VolumeTierInfo + tierInfo *volume_server_pb.VolumeInfo } func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) { -- cgit v1.2.3 From d335f04de6861b571190c13bd7d65e9a0c02f187 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 29 Jan 2020 09:09:55 -0800 Subject: support env variables to overwrite toml file --- weed/storage/backend/s3_backend/s3_backend.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'weed/storage/backend/s3_backend/s3_backend.go') diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 9f03cfa81..8d71861c2 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -26,8 +26,8 @@ type S3BackendFactory struct { func (factory *S3BackendFactory) StorageType() backend.StorageType { return backend.StorageType("s3") } -func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, id string) (backend.BackendStorage, error) { - return newS3BackendStorage(configuration, id) +func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, configPrefix string, id string) (backend.BackendStorage, error) { + return newS3BackendStorage(configuration, configPrefix, id) } type S3BackendStorage struct { @@ -39,13 +39,13 @@ type S3BackendStorage struct { conn s3iface.S3API } -func newS3BackendStorage(configuration backend.StringProperties, id string) (s *S3BackendStorage, err error) { +func newS3BackendStorage(configuration backend.StringProperties, configPrefix string, id string) (s *S3BackendStorage, err error) { s = &S3BackendStorage{} s.id = id - s.aws_access_key_id = configuration.GetString("aws_access_key_id") - s.aws_secret_access_key = configuration.GetString("aws_secret_access_key") - s.region = configuration.GetString("region") - s.bucket = configuration.GetString("bucket") + s.aws_access_key_id = configuration.GetString(configPrefix + "aws_access_key_id") + s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key") + s.region = configuration.GetString(configPrefix + "region") + s.bucket = configuration.GetString(configPrefix + "bucket") s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region) glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) -- cgit v1.2.3