aboutsummaryrefslogtreecommitdiff
path: root/weed/replication
diff options
context:
space:
mode:
authorchrislu <chris.lu@gmail.com>2022-11-15 06:33:36 -0800
committerchrislu <chris.lu@gmail.com>2022-11-15 06:33:36 -0800
commit70a4c98b00d811c01289f26d3602992a0d3f45e1 (patch)
tree8d650eb24623e503ec03ccf45a73f3e61afdbe65 /weed/replication
parent371972a1c212205b51391c17c064e9ea452fee17 (diff)
downloadseaweedfs-70a4c98b00d811c01289f26d3602992a0d3f45e1.tar.xz
seaweedfs-70a4c98b00d811c01289f26d3602992a0d3f45e1.zip
refactor filer_pb.Entry and filer.Entry to use GetChunks()
for later locking on reading chunks
Diffstat (limited to 'weed/replication')
-rw-r--r--weed/replication/sink/azuresink/azure_sink.go2
-rw-r--r--weed/replication/sink/b2sink/b2_sink.go2
-rw-r--r--weed/replication/sink/filersink/filer_sink.go12
-rw-r--r--weed/replication/sink/gcssink/gcs_sink.go2
-rw-r--r--weed/replication/sink/localsink/local_sink.go2
5 files changed, 10 insertions, 10 deletions
diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go
index 87540383d..9bbd7b8eb 100644
--- a/weed/replication/sink/azuresink/azure_sink.go
+++ b/weed/replication/sink/azuresink/azure_sink.go
@@ -103,7 +103,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []
}
totalSize := filer.FileSize(entry)
- chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+ chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
// Create a URL that references a to-be-created blob in your
// Azure Storage account's container.
diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go
index 52883644b..de7899c60 100644
--- a/weed/replication/sink/b2sink/b2_sink.go
+++ b/weed/replication/sink/b2sink/b2_sink.go
@@ -92,7 +92,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int
}
totalSize := filer.FileSize(entry)
- chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+ chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil {
diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go
index b922be568..de5ff55cc 100644
--- a/weed/replication/sink/filersink/filer_sink.go
+++ b/weed/replication/sink/filersink/filer_sink.go
@@ -120,14 +120,14 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
}
}
- replicatedChunks, err := fs.replicateChunks(entry.Chunks, key)
+ replicatedChunks, err := fs.replicateChunks(entry.GetChunks(), key)
if err != nil {
// only warning here since the source chunk may have been deleted already
glog.Warningf("replicate entry chunks %s: %v", key, err)
}
- glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
+ glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.GetChunks(), replicatedChunks)
request := &filer_pb.CreateEntryRequest{
Directory: dir,
@@ -199,7 +199,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
// delete the chunks that are deleted from the source
if deleteIncludeChunks {
// remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
- existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.Chunks, deletedChunks)
+ existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.GetChunks(), deletedChunks)
}
// replicate the chunks that are new in the source
@@ -207,7 +207,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
if err != nil {
return true, fmt.Errorf("replicate %s chunks error: %v", key, err)
}
- existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...)
+ existingEntry.Chunks = append(existingEntry.GetChunks(), replicatedChunks...)
existingEntry.Attributes = newEntry.Attributes
existingEntry.Extended = newEntry.Extended
existingEntry.HardLinkId = newEntry.HardLinkId
@@ -235,11 +235,11 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
}
func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
- aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks, 0, math.MaxInt64)
+ aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.GetChunks(), 0, math.MaxInt64)
if aErr != nil {
return nil, nil, aErr
}
- bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks, 0, math.MaxInt64)
+ bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.GetChunks(), 0, math.MaxInt64)
if bErr != nil {
return nil, nil, bErr
}
diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go
index 8c9fd5b15..db6ea4aec 100644
--- a/weed/replication/sink/gcssink/gcs_sink.go
+++ b/weed/replication/sink/gcssink/gcs_sink.go
@@ -97,7 +97,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []in
}
totalSize := filer.FileSize(entry)
- chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+ chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())
defer wc.Close()
diff --git a/weed/replication/sink/localsink/local_sink.go b/weed/replication/sink/localsink/local_sink.go
index e69045336..70f5cfc9d 100644
--- a/weed/replication/sink/localsink/local_sink.go
+++ b/weed/replication/sink/localsink/local_sink.go
@@ -75,7 +75,7 @@ func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signa
glog.V(4).Infof("Create Entry key: %s", key)
totalSize := filer.FileSize(entry)
- chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+ chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
dir := filepath.Dir(key)