diff options
Diffstat (limited to 'weed/filer')
| -rw-r--r-- | weed/filer/configuration.go | 5 | ||||
| -rw-r--r-- | weed/filer/filer.go | 45 | ||||
| -rw-r--r-- | weed/filer/filer_notify.go | 38 | ||||
| -rw-r--r-- | weed/filer/meta_aggregator.go | 30 |
4 files changed, 79 insertions, 39 deletions
diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go index 9ef2f3e0f..85fc65d13 100644 --- a/weed/filer/configuration.go +++ b/weed/filer/configuration.go @@ -12,7 +12,7 @@ var ( Stores []FilerStore ) -func (f *Filer) LoadConfiguration(config *util.ViperProxy) { +func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) { validateOneEnabledStore(config) @@ -24,7 +24,7 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) { if err := store.Initialize(config, store.GetName()+"."); err != nil { glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) } - f.SetStore(store) + isFresh = f.SetStore(store) glog.V(0).Infof("configured filer store to %s", store.GetName()) hasDefaultStoreConfigured = true break @@ -77,6 +77,7 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) { glog.V(0).Infof("configure filer %s for %s", store.GetName(), location) } + return } func validateOneEnabledStore(config *util.ViperProxy) { diff --git a/weed/filer/filer.go b/weed/filer/filer.go index 59cbf4d75..81d2aa158 100644 --- a/weed/filer/filer.go +++ b/weed/filer/filer.go @@ -7,6 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "os" + "sort" "strings" "time" @@ -68,13 +69,33 @@ func NewFiler(masters map[string]pb.ServerAddress, grpcDialOption grpc.DialOptio return f } -func (f *Filer) AggregateFromPeers(self pb.ServerAddress) { +func (f *Filer) MaybeBootstrapFromPeers(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, snapshotTime time.Time) (err error) { + if len(existingNodes) == 0 { + return + } + sort.Slice(existingNodes, func(i, j int) bool { + return existingNodes[i].CreatedAtNs < existingNodes[j].CreatedAtNs + }) + earliestNode := existingNodes[0] + if earliestNode.Address == string(self) { + return + } + + glog.V(0).Infof("bootstrap from %v", earliestNode.Address) + err = pb.FollowMetadata(pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, "bootstrap", int32(f.UniqueFileId), "/", nil, + 0, snapshotTime.UnixNano(), f.Signature, func(resp *filer_pb.SubscribeMetadataResponse) error { + return Replay(f.Store, resp) + }, true) + return +} + +func (f *Filer) AggregateFromPeers(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, startFrom time.Time) { f.MetaAggregator = NewMetaAggregator(f, self, f.GrpcDialOption) f.MasterClient.OnPeerUpdate = f.MetaAggregator.OnPeerUpdate - for _, peerUpdate := range f.ListExistingPeerUpdates() { - f.MetaAggregator.OnPeerUpdate(peerUpdate) + for _, peerUpdate := range existingNodes { + f.MetaAggregator.OnPeerUpdate(peerUpdate, startFrom) } } @@ -90,10 +111,11 @@ func (f *Filer) ListExistingPeerUpdates() (existingNodes []*master_pb.ClusterNod glog.V(0).Infof("the cluster has %d filers\n", len(resp.ClusterNodes)) for _, node := range resp.ClusterNodes { existingNodes = append(existingNodes, &master_pb.ClusterNodeUpdate{ - NodeType: cluster.FilerType, - Address: node.Address, - IsLeader: node.IsLeader, - IsAdd: true, + NodeType: cluster.FilerType, + Address: node.Address, + IsLeader: node.IsLeader, + IsAdd: true, + CreatedAtNs: node.CreatedAtNs, }) } return err @@ -103,14 +125,13 @@ func (f *Filer) ListExistingPeerUpdates() (existingNodes []*master_pb.ClusterNod return } -func (f *Filer) SetStore(store FilerStore) { +func (f *Filer) SetStore(store FilerStore) (isFresh bool) { f.Store = NewFilerStoreWrapper(store) - f.setOrLoadFilerStoreSignature(store) - + return f.setOrLoadFilerStoreSignature(store) } -func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) { +func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) (isFresh bool) { storeIdBytes, err := store.KvGet(context.Background(), []byte(FilerStoreId)) if err == ErrKvNotFound || err == nil && len(storeIdBytes) == 0 { f.Signature = util.RandomInt32() @@ -120,12 +141,14 @@ func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) { glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err) } glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature) + return true } else if err == nil && len(storeIdBytes) == 4 { f.Signature = int32(util.BytesToUint32(storeIdBytes)) glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature) } else { glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err) } + return false } func (f *Filer) GetStore() (store FilerStore) { diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go index 7857d79af..4d26a695c 100644 --- a/weed/filer/filer_notify.go +++ b/weed/filer/filer_notify.go @@ -108,24 +108,35 @@ func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) { } } -func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) { +func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, isDone bool, err error) { startTime = startTime.UTC() startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day()) startHourMinute := fmt.Sprintf("%02d-%02d", startTime.Hour(), startTime.Minute()) + var stopDate, stopHourMinute string + if stopTsNs != 0 { + stopTime := time.Unix(0, stopTsNs+24*60*60*int64(time.Nanosecond)).UTC() + stopDate = fmt.Sprintf("%04d-%02d-%02d", stopTime.Year(), stopTime.Month(), stopTime.Day()) + stopHourMinute = fmt.Sprintf("%02d-%02d", stopTime.Hour(), stopTime.Minute()) + } sizeBuf := make([]byte, 4) startTsNs := startTime.UnixNano() - dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "", "") + dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "") if listDayErr != nil { - return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr) + return lastTsNs, isDone, fmt.Errorf("fail to list log by day: %v", listDayErr) } for _, dayEntry := range dayEntries { + if stopDate != "" { + if strings.Compare(dayEntry.Name(), stopDate) > 0 { + break + } + } // println("checking day", dayEntry.FullPath) hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, math.MaxInt32, "", "", "") if listHourMinuteErr != nil { - return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr) + return lastTsNs, isDone, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr) } for _, hourMinuteEntry := range hourMinuteEntries { // println("checking hh-mm", hourMinuteEntry.FullPath) @@ -135,23 +146,29 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func( continue } } + if dayEntry.Name() == stopDate { + hourMinute := util.FileNameBase(hourMinuteEntry.Name()) + if strings.Compare(hourMinute, stopHourMinute) > 0 { + break + } + } // println("processing", hourMinuteEntry.FullPath) chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks) - if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil { + if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, stopTsNs, eachLogEntryFn); err != nil { chunkedFileReader.Close() if err == io.EOF { continue } - return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err) + return lastTsNs, isDone, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err) } chunkedFileReader.Close() } } - return lastTsNs, nil + return lastTsNs, isDone, nil } -func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) { +func ReadEachLogEntry(r io.Reader, sizeBuf []byte, startTsNs, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) { for { n, err := r.Read(sizeBuf) if err != nil { @@ -174,9 +191,12 @@ func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func if err = proto.Unmarshal(entryData, logEntry); err != nil { return lastTsNs, err } - if logEntry.TsNs <= ns { + if logEntry.TsNs <= startTsNs { continue } + if stopTsNs != 0 && logEntry.TsNs > stopTsNs { + return lastTsNs, err + } // println("each log: ", logEntry.TsNs) if err := eachLogEntryFn(logEntry); err != nil { return lastTsNs, err diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go index 863f5c3e9..fb96ee01b 100644 --- a/weed/filer/meta_aggregator.go +++ b/weed/filer/meta_aggregator.go @@ -48,7 +48,7 @@ func NewMetaAggregator(filer *Filer, self pb.ServerAddress, grpcDialOption grpc. return t } -func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate) { +func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startFrom time.Time) { if update.NodeType != cluster.FilerType { return } @@ -57,7 +57,7 @@ func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate) { if update.IsAdd { // every filer should subscribe to a new filer if ma.setActive(address, true) { - go ma.loopSubscribeToOnefiler(ma.filer, ma.self, address) + go ma.loopSubscribeToOnefiler(ma.filer, ma.self, address, startFrom) } } else { ma.setActive(address, false) @@ -89,21 +89,25 @@ func (ma *MetaAggregator) isActive(address pb.ServerAddress) (isActive bool) { return count > 0 && isActive } -func (ma *MetaAggregator) loopSubscribeToOnefiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress) { +func (ma *MetaAggregator) loopSubscribeToOnefiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time) { + lastTsNs := startFrom.UnixNano() for { - err := ma.doSubscribeToOneFiler(f, self, peer) + glog.V(0).Infof("loopSubscribeToOnefiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs) + nextLastTsNs, err := ma.doSubscribeToOneFiler(f, self, peer, lastTsNs) if !ma.isActive(peer) { glog.V(0).Infof("stop subscribing remote %s meta change", peer) return } if err != nil { glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err) + } else if lastTsNs < nextLastTsNs { + lastTsNs = nextLastTsNs } time.Sleep(1733 * time.Millisecond) } } -func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress) error { +func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom int64) (int64, error) { /* Each filer reads the "filer.store.id", which is the store's signature when filer starts. @@ -117,18 +121,15 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, var maybeReplicateMetadataChange func(*filer_pb.SubscribeMetadataResponse) lastPersistTime := time.Now() - lastTsNs := time.Now().Add(-LogFlushInterval).UnixNano() + lastTsNs := startFrom peerSignature, err := ma.readFilerStoreSignature(peer) - for err != nil { - glog.V(0).Infof("connecting to peer filer %s: %v", peer, err) - time.Sleep(1357 * time.Millisecond) - peerSignature, err = ma.readFilerStoreSignature(peer) + if err != nil { + return lastTsNs, fmt.Errorf("connecting to peer filer %s: %v", peer, err) } // when filer store is not shared by multiple filers if peerSignature != f.Signature { - lastTsNs = 0 if prevTsNs, err := ma.readOffset(f, peer, peerSignature); err == nil { lastTsNs = prevTsNs defer func(prevTsNs int64) { @@ -215,7 +216,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, } }) - return err + return lastTsNs, err } func (ma *MetaAggregator) readFilerStoreSignature(peer pb.ServerAddress) (sig int32, err error) { @@ -241,11 +242,6 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignat value, err := f.Store.KvGet(context.Background(), key) - if err == ErrKvNotFound { - glog.Warningf("readOffset %s not found", peer) - return 0, nil - } - if err != nil { return 0, fmt.Errorf("readOffset %s : %v", peer, err) } |
