aboutsummaryrefslogtreecommitdiff
path: root/weed/replication
diff options
context:
space:
mode:
Diffstat (limited to 'weed/replication')
-rw-r--r--weed/replication/replicator.go19
-rw-r--r--weed/replication/sink/azuresink/azure_sink.go12
-rw-r--r--weed/replication/sink/b2sink/b2_sink.go12
-rw-r--r--weed/replication/sink/filersink/fetch_write.go43
-rw-r--r--weed/replication/sink/filersink/filer_sink.go66
-rw-r--r--weed/replication/sink/gcssink/gcs_sink.go12
-rw-r--r--weed/replication/sink/replication_sink.go7
-rw-r--r--weed/replication/sink/s3sink/s3_sink.go11
-rw-r--r--weed/replication/sink/s3sink/s3_write.go11
-rw-r--r--weed/replication/source/filer_source.go33
-rw-r--r--weed/replication/sub/notification_gocdk_pub_sub.go50
11 files changed, 156 insertions, 120 deletions
diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go
index ac8235fd5..7353cdc91 100644
--- a/weed/replication/replicator.go
+++ b/weed/replication/replicator.go
@@ -1,6 +1,8 @@
package replication
import (
+ "context"
+ "fmt"
"path/filepath"
"strings"
@@ -29,33 +31,38 @@ func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSin
}
}
-func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification) error {
+func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_pb.EventNotification) error {
if !strings.HasPrefix(key, r.source.Dir) {
glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir)
return nil
}
- newKey := filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])
+ newKey := filepath.ToSlash(filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):]))
glog.V(3).Infof("replicate %s => %s", key, newKey)
key = newKey
if message.OldEntry != nil && message.NewEntry == nil {
glog.V(4).Infof("deleting %v", key)
- return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks)
+ return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks)
}
if message.OldEntry == nil && message.NewEntry != nil {
glog.V(4).Infof("creating %v", key)
- return r.sink.CreateEntry(key, message.NewEntry)
+ return r.sink.CreateEntry(ctx, key, message.NewEntry)
}
if message.OldEntry == nil && message.NewEntry == nil {
glog.V(0).Infof("weird message %+v", message)
return nil
}
- foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewEntry, message.DeleteChunks)
+ foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks)
if foundExisting {
glog.V(4).Infof("updated %v", key)
return err
}
+ err = r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, false)
+ if err != nil {
+ return fmt.Errorf("delete old entry %v: %v", key, err)
+ }
+
glog.V(4).Infof("creating missing %v", key)
- return r.sink.CreateEntry(key, message.NewEntry)
+ return r.sink.CreateEntry(ctx, key, message.NewEntry)
}
diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go
index 7acf37fa5..6381908a1 100644
--- a/weed/replication/sink/azuresink/azure_sink.go
+++ b/weed/replication/sink/azuresink/azure_sink.go
@@ -70,7 +70,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e
return nil
}
-func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
+func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
key = cleanKey(key)
@@ -78,8 +78,6 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo
key = key + "/"
}
- ctx := context.Background()
-
if _, err := g.containerURL.NewBlobURL(key).Delete(ctx,
azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil {
return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err)
@@ -89,7 +87,7 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo
}
-func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
+func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
key = cleanKey(key)
@@ -100,8 +98,6 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
totalSize := filer2.TotalSize(entry.Chunks)
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
- ctx := context.Background()
-
// Create a URL that references a to-be-created blob in your
// Azure Storage account's container.
appendBlobURL := g.containerURL.NewAppendBlobURL(key)
@@ -113,7 +109,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
for _, chunk := range chunkViews {
- fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
+ fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
if err != nil {
return err
}
@@ -136,7 +132,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
-func (g *AzureSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
// TODO improve efficiency
return false, nil
diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go
index 17f5e39b2..35c2230fa 100644
--- a/weed/replication/sink/b2sink/b2_sink.go
+++ b/weed/replication/sink/b2sink/b2_sink.go
@@ -58,7 +58,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error {
return nil
}
-func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
+func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
key = cleanKey(key)
@@ -66,8 +66,6 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool)
key = key + "/"
}
- ctx := context.Background()
-
bucket, err := g.client.Bucket(ctx, g.bucket)
if err != nil {
return err
@@ -79,7 +77,7 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool)
}
-func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
+func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
key = cleanKey(key)
@@ -90,8 +88,6 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
totalSize := filer2.TotalSize(entry.Chunks)
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
- ctx := context.Background()
-
bucket, err := g.client.Bucket(ctx, g.bucket)
if err != nil {
return err
@@ -102,7 +98,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
for _, chunk := range chunkViews {
- fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
+ fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
if err != nil {
return err
}
@@ -128,7 +124,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
-func (g *B2Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go
index c14566723..97e9671a3 100644
--- a/weed/replication/sink/filersink/fetch_write.go
+++ b/weed/replication/sink/filersink/fetch_write.go
@@ -3,16 +3,18 @@ package filersink
import (
"context"
"fmt"
+ "google.golang.org/grpc"
"strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
-func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) {
+func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) {
if len(sourceChunks) == 0 {
return
}
@@ -21,7 +23,7 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replic
wg.Add(1)
go func(chunk *filer_pb.FileChunk) {
defer wg.Done()
- replicatedChunk, e := fs.replicateOneChunk(chunk)
+ replicatedChunk, e := fs.replicateOneChunk(ctx, chunk)
if e != nil {
err = e
}
@@ -33,11 +35,11 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replic
return
}
-func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) {
+func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) {
- fileId, err := fs.fetchAndWrite(sourceChunk)
+ fileId, err := fs.fetchAndWrite(ctx, sourceChunk)
if err != nil {
- return nil, fmt.Errorf("copy %s: %v", sourceChunk.FileId, err)
+ return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
}
return &filer_pb.FileChunk{
@@ -46,21 +48,22 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_
Size: sourceChunk.Size,
Mtime: sourceChunk.Mtime,
ETag: sourceChunk.ETag,
- SourceFileId: sourceChunk.FileId,
+ SourceFileId: sourceChunk.GetFileIdString(),
}, nil
}
-func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId string, err error) {
+func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) {
- filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.FileId)
+ filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString())
if err != nil {
- return "", fmt.Errorf("read part %s: %v", sourceChunk.FileId, err)
+ return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
}
defer readCloser.Close()
var host string
+ var auth security.EncodedJwt
- if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if err := fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
@@ -70,13 +73,13 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
DataCenter: fs.dataCenter,
}
- resp, err := client.AssignVolume(context.Background(), request)
+ resp, err := client.AssignVolume(ctx, request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
- fileId, host = resp.FileId, resp.Url
+ fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
return nil
}); err != nil {
@@ -88,7 +91,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
uploadResult, err := operation.Upload(fileUrl, filename, readCloser,
- "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, "")
+ "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
return "", fmt.Errorf("upload data: %v", err)
@@ -101,17 +104,13 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
return
}
-func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error {
- grpcConnection, err := util.GrpcDial(fs.grpcAddress)
- if err != nil {
- return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err)
- }
- defer grpcConnection.Close()
-
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, fs.grpcAddress, fs.grpcDialOption)
- return fn(client)
}
func volumeId(fileId string) string {
diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go
index 2e9cc86d1..f99c7fdf6 100644
--- a/weed/replication/sink/filersink/filer_sink.go
+++ b/weed/replication/sink/filersink/filer_sink.go
@@ -3,6 +3,9 @@ package filersink
import (
"context"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/spf13/viper"
+ "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -13,13 +16,14 @@ import (
)
type FilerSink struct {
- filerSource *source.FilerSource
- grpcAddress string
- dir string
- replication string
- collection string
- ttlSec int32
- dataCenter string
+ filerSource *source.FilerSource
+ grpcAddress string
+ dir string
+ replication string
+ collection string
+ ttlSec int32
+ dataCenter string
+ grpcDialOption grpc.DialOption
}
func init() {
@@ -55,11 +59,12 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string,
fs.replication = replication
fs.collection = collection
fs.ttlSec = int32(ttlSec)
+ fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
return nil
}
-func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
- return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
+ return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
dir, name := filer2.FullPath(key).DirAndName()
@@ -70,7 +75,7 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo
}
glog.V(1).Infof("delete entry: %v", request)
- _, err := client.DeleteEntry(context.Background(), request)
+ _, err := client.DeleteEntry(ctx, request)
if err != nil {
glog.V(0).Infof("delete entry %s: %v", key, err)
return fmt.Errorf("delete entry %s: %v", key, err)
@@ -80,12 +85,11 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo
})
}
-func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
+func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
- return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
dir, name := filer2.FullPath(key).DirAndName()
- ctx := context.Background()
// look up existing entry
lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
@@ -100,7 +104,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
}
- replicatedChunks, err := fs.replicateChunks(entry.Chunks)
+ replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks)
if err != nil {
glog.V(0).Infof("replicate entry chunks %s: %v", key, err)
@@ -129,15 +133,13 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
})
}
-func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
-
- ctx := context.Background()
+func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
dir, name := filer2.FullPath(key).DirAndName()
// read existing entry
var existingEntry *filer_pb.Entry
- err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ err = fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
@@ -177,11 +179,11 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry,
// delete the chunks that are deleted from the source
if deleteIncludeChunks {
// remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
- existingEntry.Chunks = minusChunks(existingEntry.Chunks, deletedChunks)
+ existingEntry.Chunks = filer2.MinusChunks(existingEntry.Chunks, deletedChunks)
}
// replicate the chunks that are new in the source
- replicatedChunks, err := fs.replicateChunks(newChunks)
+ replicatedChunks, err := fs.replicateChunks(ctx, newChunks)
if err != nil {
return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
}
@@ -189,10 +191,10 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry,
}
// save updated meta data
- return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
- Directory: dir,
+ Directory: newParentPath,
Entry: existingEntry,
}
@@ -205,23 +207,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry,
}
func compareChunks(oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk) {
- deletedChunks = minusChunks(oldEntry.Chunks, newEntry.Chunks)
- newChunks = minusChunks(newEntry.Chunks, oldEntry.Chunks)
- return
-}
-
-func minusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
- for _, a := range as {
- found := false
- for _, b := range bs {
- if a.FileId == b.FileId {
- found = true
- break
- }
- }
- if !found {
- delta = append(delta, a)
- }
- }
+ deletedChunks = filer2.MinusChunks(oldEntry.Chunks, newEntry.Chunks)
+ newChunks = filer2.MinusChunks(newEntry.Chunks, oldEntry.Chunks)
return
}
diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go
index c1beefc33..abd7c49b9 100644
--- a/weed/replication/sink/gcssink/gcs_sink.go
+++ b/weed/replication/sink/gcssink/gcs_sink.go
@@ -69,13 +69,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
return nil
}
-func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
+func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
if isDirectory {
key = key + "/"
}
- if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil {
+ if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil {
return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err)
}
@@ -83,7 +83,7 @@ func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool)
}
-func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
+func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
if entry.IsDirectory {
return nil
@@ -92,13 +92,11 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
totalSize := filer2.TotalSize(entry.Chunks)
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
- ctx := context.Background()
-
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx)
for _, chunk := range chunkViews {
- fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
+ fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
if err != nil {
return err
}
@@ -121,7 +119,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
-func (g *GcsSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
// TODO improve efficiency
return false, nil
}
diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go
index 0a86139d3..dd54f0005 100644
--- a/weed/replication/sink/replication_sink.go
+++ b/weed/replication/sink/replication_sink.go
@@ -1,6 +1,7 @@
package sink
import (
+ "context"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -9,9 +10,9 @@ import (
type ReplicationSink interface {
GetName() string
Initialize(configuration util.Configuration) error
- DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error
- CreateEntry(key string, entry *filer_pb.Entry) error
- UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
+ DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error
+ CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error
+ UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
GetSinkToDirectory() string
SetSourceFiler(s *source.FilerSource)
}
diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go
index 0a4e78318..d5cad3541 100644
--- a/weed/replication/sink/s3sink/s3_sink.go
+++ b/weed/replication/sink/s3sink/s3_sink.go
@@ -1,6 +1,7 @@
package S3Sink
import (
+ "context"
"fmt"
"strings"
"sync"
@@ -76,7 +77,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, aswSecretAccessKey, region, buc
return nil
}
-func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
+func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
key = cleanKey(key)
@@ -88,7 +89,7 @@ func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks b
}
-func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
+func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
key = cleanKey(key)
@@ -111,7 +112,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
wg.Add(1)
go func(chunk *filer2.ChunkView) {
defer wg.Done()
- if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil {
+ if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil {
err = uploadErr
} else {
parts = append(parts, part)
@@ -125,11 +126,11 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
return err
}
- return s3sink.completeMultipartUpload(key, uploadId, parts)
+ return s3sink.completeMultipartUpload(ctx, key, uploadId, parts)
}
-func (s3sink *S3Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
// TODO improve efficiency
return false, nil
diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go
index 5c4be7aee..0a190b27d 100644
--- a/weed/replication/sink/s3sink/s3_write.go
+++ b/weed/replication/sink/s3sink/s3_write.go
@@ -2,6 +2,7 @@ package S3Sink
import (
"bytes"
+ "context"
"fmt"
"io"
@@ -81,7 +82,7 @@ func (s3sink *S3Sink) abortMultipartUpload(key, uploadId string) error {
}
// To complete multipart upload
-func (s3sink *S3Sink) completeMultipartUpload(key, uploadId string, parts []*s3.CompletedPart) error {
+func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId string, parts []*s3.CompletedPart) error {
input := &s3.CompleteMultipartUploadInput{
Bucket: aws.String(s3sink.bucket),
Key: aws.String(key),
@@ -102,10 +103,10 @@ func (s3sink *S3Sink) completeMultipartUpload(key, uploadId string, parts []*s3.
}
// To upload a part
-func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
+func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
var readSeeker io.ReadSeeker
- readSeeker, err := s3sink.buildReadSeeker(chunk)
+ readSeeker, err := s3sink.buildReadSeeker(ctx, chunk)
if err != nil {
glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
@@ -155,8 +156,8 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
return err
}
-func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) {
- fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId)
+func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) {
+ fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId)
if err != nil {
return nil, err
}
diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go
index efe71e706..d7b5ebc4d 100644
--- a/weed/replication/source/filer_source.go
+++ b/weed/replication/source/filer_source.go
@@ -3,6 +3,9 @@ package source
import (
"context"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/spf13/viper"
+ "google.golang.org/grpc"
"io"
"net/http"
"strings"
@@ -17,8 +20,9 @@ type ReplicationSource interface {
}
type FilerSource struct {
- grpcAddress string
- Dir string
+ grpcAddress string
+ grpcDialOption grpc.DialOption
+ Dir string
}
func (fs *FilerSource) Initialize(configuration util.Configuration) error {
@@ -31,19 +35,20 @@ func (fs *FilerSource) Initialize(configuration util.Configuration) error {
func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) {
fs.grpcAddress = grpcAddress
fs.Dir = dir
+ fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
return nil
}
-func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) {
+func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) {
vid2Locations := make(map[string]*filer_pb.Locations)
vid := volumeId(part)
- err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ err = fs.withFilerClient(ctx, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read lookup volume id locations: %v", vid)
- resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
+ resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
if err != nil {
@@ -72,9 +77,9 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) {
return
}
-func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) {
+func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) {
- fileUrl, err := fs.LookupFileId(part)
+ fileUrl, err := fs.LookupFileId(ctx, part)
if err != nil {
return "", nil, nil, err
}
@@ -84,17 +89,13 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade
return filename, header, readCloser, err
}
-func (fs *FilerSource) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
- grpcConnection, err := util.GrpcDial(fs.grpcAddress)
- if err != nil {
- return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err)
- }
- defer grpcConnection.Close()
-
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, fs.grpcAddress, fs.grpcDialOption)
- return fn(client)
}
func volumeId(fileId string) string {
diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go
new file mode 100644
index 000000000..9c76e6918
--- /dev/null
+++ b/weed/replication/sub/notification_gocdk_pub_sub.go
@@ -0,0 +1,50 @@
+package sub
+
+import (
+ "context"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/golang/protobuf/proto"
+ "gocloud.dev/pubsub"
+ _ "gocloud.dev/pubsub/awssnssqs"
+ _ "gocloud.dev/pubsub/azuresb"
+ _ "gocloud.dev/pubsub/gcppubsub"
+ _ "gocloud.dev/pubsub/natspubsub"
+ _ "gocloud.dev/pubsub/rabbitpubsub"
+)
+
+func init() {
+ NotificationInputs = append(NotificationInputs, &GoCDKPubSubInput{})
+}
+
+type GoCDKPubSubInput struct {
+ sub *pubsub.Subscription
+}
+
+func (k *GoCDKPubSubInput) GetName() string {
+ return "gocdk_pub_sub"
+}
+
+func (k *GoCDKPubSubInput) Initialize(config util.Configuration) error {
+ subURL := config.GetString("sub_url")
+ glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
+ sub, err := pubsub.OpenSubscription(context.Background(), subURL)
+ if err != nil {
+ return err
+ }
+ k.sub = sub
+ return nil
+}
+
+func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
+ msg, err := k.sub.Receive(context.Background())
+ key = msg.Metadata["key"]
+ message = &filer_pb.EventNotification{}
+ err = proto.Unmarshal(msg.Body, message)
+ if err != nil {
+ return "", nil, err
+ }
+ return key, message, nil
+}