aboutsummaryrefslogtreecommitdiff
path: root/weed/filer/stream.go
diff options
context:
space:
mode:
Diffstat (limited to 'weed/filer/stream.go')
-rw-r--r--weed/filer/stream.go34
1 files changed, 30 insertions, 4 deletions
diff --git a/weed/filer/stream.go b/weed/filer/stream.go
index 075204b79..661a210ea 100644
--- a/weed/filer/stream.go
+++ b/weed/filer/stream.go
@@ -3,6 +3,7 @@ package filer
import (
"bytes"
"fmt"
+ "golang.org/x/sync/errgroup"
"io"
"math"
"strings"
@@ -13,9 +14,9 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
-func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
+func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64, isCheck bool) error {
- // fmt.Printf("start to stream content for chunks: %+v\n", chunks)
+ glog.V(9).Infof("start to stream content for chunks: %+v\n", chunks)
chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size)
fileId2Url := make(map[string][]string)
@@ -26,19 +27,33 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err
+ } else if len(urlStrings) == 0 {
+ glog.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId)
+ return fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId)
}
fileId2Url[chunkView.FileId] = urlStrings
}
+ if isCheck {
+ // Pre-check all chunkViews urls
+ gErr := new(errgroup.Group)
+ CheckAllChunkViews(chunkViews, &fileId2Url, gErr)
+ if err := gErr.Wait(); err != nil {
+ glog.Errorf("check all chunks: %v", err)
+ return fmt.Errorf("check all chunks: %v", err)
+ }
+ return nil
+ }
+
for _, chunkView := range chunkViews {
urlStrings := fileId2Url[chunkView.FileId]
-
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
if err != nil {
glog.Errorf("read chunk: %v", err)
return fmt.Errorf("read chunk: %v", err)
}
+
_, err = w.Write(data)
if err != nil {
glog.Errorf("write chunk: %v", err)
@@ -50,6 +65,17 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c
}
+func CheckAllChunkViews(chunkViews []*ChunkView, fileId2Url *map[string][]string, gErr *errgroup.Group) {
+ for _, chunkView := range chunkViews {
+ urlStrings := (*fileId2Url)[chunkView.FileId]
+ glog.V(9).Infof("Check chunk: %+v\n url: %v", chunkView, urlStrings)
+ gErr.Go(func() error {
+ _, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
+ return err
+ })
+ }
+}
+
// ---------------- ReadAllReader ----------------------------------
func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) {
@@ -181,7 +207,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
var buffer bytes.Buffer
var shouldRetry bool
for _, urlString := range urlStrings {
- shouldRetry, err = util.FastReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ shouldRetry, err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
buffer.Write(data)
})
if !shouldRetry {