diff options
Diffstat (limited to 'weed/filer')
| -rw-r--r-- | weed/filer/filechunk_manifest.go | 41 | ||||
| -rw-r--r-- | weed/filer/filer.go | 2 | ||||
| -rw-r--r-- | weed/filer/filer_notify.go | 10 | ||||
| -rw-r--r-- | weed/filer/meta_aggregator.go | 2 | ||||
| -rw-r--r-- | weed/filer/reader_cache.go | 3 | ||||
| -rw-r--r-- | weed/filer/stream.go | 2 |
6 files changed, 11 insertions, 49 deletions
diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go index d9d0331be..60a5c538b 100644 --- a/weed/filer/filechunk_manifest.go +++ b/weed/filer/filechunk_manifest.go @@ -5,8 +5,6 @@ import ( "fmt" "io" "math" - "net/url" - "strings" "sync" "time" @@ -122,44 +120,7 @@ func fetchChunkRange(buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunction glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) return 0, err } - return retriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset) -} - -func retriedFetchChunkData(buffer []byte, urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64) (n int, err error) { - - var shouldRetry bool - - for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 { - for _, urlString := range urlStrings { - n = 0 - if strings.Contains(urlString, "%") { - urlString = url.PathEscape(urlString) - } - shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, len(buffer), func(data []byte) { - if n < len(buffer) { - x := copy(buffer[n:], data) - n += x - } - }) - if !shouldRetry { - break - } - if err != nil { - glog.V(0).Infof("read %s failed, err: %v", urlString, err) - } else { - break - } - } - if err != nil && shouldRetry { - glog.V(0).Infof("retry reading in %v", waitTime) - time.Sleep(waitTime) - } else { - break - } - } - - return n, err - + return util.RetriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset) } func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) { diff --git a/weed/filer/filer.go b/weed/filer/filer.go index 1c6b3c338..518a1ba77 100644 --- a/weed/filer/filer.go +++ b/weed/filer/filer.go @@ -69,7 +69,7 @@ func NewFiler(masters pb.ServerDiscovery, grpcDialOption grpc.DialOption, filerH f.UniqueFilerId = -f.UniqueFilerId } - f.LocalMetaLogBuffer = log_buffer.NewLogBuffer("local", LogFlushInterval, f.logFlushFunc, notifyFn) + f.LocalMetaLogBuffer = log_buffer.NewLogBuffer("local", LogFlushInterval, f.logFlushFunc, nil, notifyFn) f.metaLogCollection = collection f.metaLogReplication = replication diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go index f8a1dd603..8fa3eec2d 100644 --- a/weed/filer/filer_notify.go +++ b/weed/filer/filer_notify.go @@ -3,6 +3,7 @@ package filer import ( "context" "fmt" + "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" "io" "math" "regexp" @@ -113,11 +114,10 @@ var ( VolumeNotFoundPattern = regexp.MustCompile(`volume \d+? not found`) ) -func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, isDone bool, err error) { +func (f *Filer) ReadPersistedLogBuffer(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, isDone bool, err error) { - startTime = startTime.UTC() - startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day()) - startHourMinute := fmt.Sprintf("%02d-%02d", startTime.Hour(), startTime.Minute()) + startDate := fmt.Sprintf("%04d-%02d-%02d", startPosition.Year(), startPosition.Month(), startPosition.Day()) + startHourMinute := fmt.Sprintf("%02d-%02d", startPosition.Hour(), startPosition.Minute()) var stopDate, stopHourMinute string if stopTsNs != 0 { stopTime := time.Unix(0, stopTsNs+24*60*60*int64(time.Nanosecond)).UTC() @@ -126,7 +126,7 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, each } sizeBuf := make([]byte, 4) - startTsNs := startTime.UnixNano() + startTsNs := startPosition.UnixNano() dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "") if listDayErr != nil { diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go index 0433a63a0..d013d5a19 100644 --- a/weed/filer/meta_aggregator.go +++ b/weed/filer/meta_aggregator.go @@ -43,7 +43,7 @@ func NewMetaAggregator(filer *Filer, self pb.ServerAddress, grpcDialOption grpc. peerStatues: make(map[pb.ServerAddress]int), } t.ListenersCond = sync.NewCond(&t.ListenersLock) - t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, func() { + t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, nil, func() { t.ListenersCond.Broadcast() }) return t diff --git a/weed/filer/reader_cache.go b/weed/filer/reader_cache.go index 27d40a78b..f40bb1285 100644 --- a/weed/filer/reader_cache.go +++ b/weed/filer/reader_cache.go @@ -2,6 +2,7 @@ package filer import ( "fmt" + "github.com/seaweedfs/seaweedfs/weed/util" "sync" "sync/atomic" "time" @@ -170,7 +171,7 @@ func (s *SingleChunkCacher) startCaching() { s.data = mem.Allocate(s.chunkSize) - _, s.err = retriedFetchChunkData(s.data, urlStrings, s.cipherKey, s.isGzipped, true, 0) + _, s.err = util.RetriedFetchChunkData(s.data, urlStrings, s.cipherKey, s.isGzipped, true, 0) if s.err != nil { mem.Free(s.data) s.data = nil diff --git a/weed/filer/stream.go b/weed/filer/stream.go index 73a2a219c..51a82fb2e 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -176,7 +176,7 @@ func ReadAll(buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer return err } - n, err := retriedFetchChunkData(buffer[idx:idx+int(chunkView.ViewSize)], urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk) + n, err := util.RetriedFetchChunkData(buffer[idx:idx+int(chunkView.ViewSize)], urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk) if err != nil { return err } |
