diff options
| author | hilimd <68371223+hilimd@users.noreply.github.com> | 2020-09-03 10:06:09 +0800 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2020-09-03 10:06:09 +0800 |
| commit | 23baa3c36ce468a36d89abae59f4411cdc446043 (patch) | |
| tree | d2e9d7e90530cd14078cc459c7dd1d4ec1ad3e01 /weed/replication | |
| parent | 44a56b158e4637bd70d3fcf8ddc9107973b60558 (diff) | |
| parent | 645a4af3db6fa97440fcaca632bb1ddd939f8268 (diff) | |
| download | seaweedfs-23baa3c36ce468a36d89abae59f4411cdc446043.tar.xz seaweedfs-23baa3c36ce468a36d89abae59f4411cdc446043.zip | |
Merge pull request #14 from chrislusf/master
sync
Diffstat (limited to 'weed/replication')
| -rw-r--r-- | weed/replication/sink/azuresink/azure_sink.go | 6 | ||||
| -rw-r--r-- | weed/replication/sink/b2sink/b2_sink.go | 6 | ||||
| -rw-r--r-- | weed/replication/sink/filersink/filer_sink.go | 24 | ||||
| -rw-r--r-- | weed/replication/sink/gcssink/gcs_sink.go | 6 | ||||
| -rw-r--r-- | weed/replication/sink/s3sink/s3_sink.go | 8 | ||||
| -rw-r--r-- | weed/replication/sink/s3sink/s3_write.go | 6 |
6 files changed, 28 insertions, 28 deletions
diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 6419509be..64c3d46ea 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" @@ -95,8 +95,8 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { return nil } - totalSize := filer2.FileSize(entry) - chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) // Create a URL that references a to-be-created blob in your // Azure Storage account's container. diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 041cee952..d0b3e7a34 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -4,7 +4,7 @@ import ( "context" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/source" @@ -84,8 +84,8 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { return nil } - totalSize := filer2.FileSize(entry) - chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index b90a642c9..7ba1670e0 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -8,7 +8,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" @@ -92,7 +92,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { } glog.V(1).Infof("lookup: %v", lookupRequest) if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { - if filer2.ETag(resp.Entry) == filer2.ETag(entry) { + if filer.ETag(resp.Entry) == filer.ETag(entry) { glog.V(0).Infof("already replicated %s", key) return nil } @@ -164,13 +164,13 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent // skip if already changed // this usually happens when the messages are not ordered glog.V(0).Infof("late updates %s", key) - } else if filer2.ETag(newEntry) == filer2.ETag(existingEntry) { + } else if filer.ETag(newEntry) == filer.ETag(existingEntry) { // skip if no change // this usually happens when retrying the replication glog.V(0).Infof("already replicated %s", key) } else { // find out what changed - deletedChunks, newChunks, err := compareChunks(filer2.LookupFn(fs), oldEntry, newEntry) + deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry) if err != nil { return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err) } @@ -178,7 +178,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent // delete the chunks that are deleted from the source if deleteIncludeChunks { // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks - existingEntry.Chunks = filer2.DoMinusChunks(existingEntry.Chunks, deletedChunks) + existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks) } // replicate the chunks that are new in the source @@ -207,21 +207,21 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent }) } -func compareChunks(lookupFileIdFn filer2.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) { - aData, aMeta, aErr := filer2.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks) +func compareChunks(lookupFileIdFn filer.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) { + aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks) if aErr != nil { return nil, nil, aErr } - bData, bMeta, bErr := filer2.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks) + bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks) if bErr != nil { return nil, nil, bErr } - deletedChunks = append(deletedChunks, filer2.DoMinusChunks(aData, bData)...) - deletedChunks = append(deletedChunks, filer2.DoMinusChunks(aMeta, bMeta)...) + deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...) + deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...) - newChunks = append(newChunks, filer2.DoMinusChunks(bData, aData)...) - newChunks = append(newChunks, filer2.DoMinusChunks(bMeta, aMeta)...) + newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...) + newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...) return } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index 82f4d72cf..2e09a87f9 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -8,7 +8,7 @@ import ( "cloud.google.com/go/storage" "google.golang.org/api/option" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" @@ -89,8 +89,8 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { return nil } - totalSize := filer2.FileSize(entry) - chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 56fc1930d..4e7df8ff2 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -12,7 +12,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" @@ -107,8 +107,8 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { return err } - totalSize := filer2.FileSize(entry) - chunkViews := filer2.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) parts := make([]*s3.CompletedPart, len(chunkViews)) @@ -116,7 +116,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { for chunkIndex, chunk := range chunkViews { partId := chunkIndex + 1 wg.Add(1) - go func(chunk *filer2.ChunkView, index int) { + go func(chunk *filer.ChunkView, index int) { defer wg.Done() if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { err = uploadErr diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index c5c65ed5c..8a8e7a92b 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -103,7 +103,7 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId } // To upload a part -func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { +func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.ChunkView) (*s3.CompletedPart, error) { var readSeeker io.ReadSeeker readSeeker, err := s3sink.buildReadSeeker(chunk) @@ -156,7 +156,7 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou return err } -func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) { +func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, error) { fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId) if err != nil { return nil, err |
