1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
|
package filersink
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/util"
"sync"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
)
func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path string) (replicatedChunks []*filer_pb.FileChunk, err error) {
if len(sourceChunks) == 0 {
return
}
replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks))
var wg sync.WaitGroup
for chunkIndex, sourceChunk := range sourceChunks {
wg.Add(1)
go func(chunk *filer_pb.FileChunk, index int) {
defer wg.Done()
replicatedChunk, e := fs.replicateOneChunk(chunk, path)
if e != nil {
err = e
}
replicatedChunks[index] = replicatedChunk
}(sourceChunk, chunkIndex)
}
wg.Wait()
return
}
func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, path string) (*filer_pb.FileChunk, error) {
fileId, err := fs.fetchAndWrite(sourceChunk, path)
if err != nil {
return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
}
return &filer_pb.FileChunk{
FileId: fileId,
Offset: sourceChunk.Offset,
Size: sourceChunk.Size,
Mtime: sourceChunk.Mtime,
ETag: sourceChunk.ETag,
SourceFileId: sourceChunk.GetFileIdString(),
CipherKey: sourceChunk.CipherKey,
IsCompressed: sourceChunk.IsCompressed,
}, nil
}
func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string) (fileId string, err error) {
filename, header, resp, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString())
if err != nil {
return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
}
defer util.CloseResponse(resp)
var host string
var auth security.EncodedJwt
if err := fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: fs.replication,
Collection: fs.collection,
TtlSec: fs.ttlSec,
DataCenter: fs.dataCenter,
Path: path,
}
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
log.Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
return nil
}); err != nil {
return "", fmt.Errorf("filerGrpcAddress assign volume: %v", err)
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
log.Tracef("replicating %s to %s header:%+v", filename, fileUrl, header)
// fetch data as is, regardless whether it is encrypted or not
uploadResult, err, _ := operation.Upload(fileUrl, filename, false, resp.Body, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
if err != nil {
log.Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err)
return "", fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
log.Infof("upload failure %v to %s: %v", filename, fileUrl, err)
return "", fmt.Errorf("upload result: %v", uploadResult.Error)
}
return
}
var _ = filer_pb.FilerClient(&FilerSink{})
func (fs *FilerSink) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
}, fs.grpcAddress, fs.grpcDialOption)
}
func (fs *FilerSink) AdjustedUrl(location *filer_pb.Location) string {
return location.Url
}
|