diff options
Diffstat (limited to 'weed/topology')
| -rw-r--r-- | weed/topology/cluster_commands.go | 4 | ||||
| -rw-r--r-- | weed/topology/data_node.go | 4 | ||||
| -rw-r--r-- | weed/topology/node.go | 8 | ||||
| -rw-r--r-- | weed/topology/store_replicate.go | 18 | ||||
| -rw-r--r-- | weed/topology/topology.go | 22 | ||||
| -rw-r--r-- | weed/topology/topology_ec.go | 4 | ||||
| -rw-r--r-- | weed/topology/topology_event_handling.go | 4 | ||||
| -rw-r--r-- | weed/topology/topology_vacuum.go | 36 | ||||
| -rw-r--r-- | weed/topology/volume_growth.go | 14 | ||||
| -rw-r--r-- | weed/topology/volume_layout.go | 28 |
10 files changed, 71 insertions, 71 deletions
diff --git a/weed/topology/cluster_commands.go b/weed/topology/cluster_commands.go index 6432828e8..10737732e 100644 --- a/weed/topology/cluster_commands.go +++ b/weed/topology/cluster_commands.go @@ -5,7 +5,7 @@ import ( "fmt" hashicorpRaft "github.com/hashicorp/raft" "github.com/seaweedfs/raft" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/needle" ) @@ -29,7 +29,7 @@ func (c *MaxVolumeIdCommand) Apply(server raft.Server) (interface{}, error) { before := topo.GetMaxVolumeId() topo.UpAdjustMaxVolumeId(c.MaxVolumeId) - glog.V(1).Infoln("max volume id", before, "==>", topo.GetMaxVolumeId()) + log.V(2).Infoln("max volume id", before, "==>", topo.GetMaxVolumeId()) return nil, nil } diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go index 3103dc207..0dd52ea96 100644 --- a/weed/topology/data_node.go +++ b/weed/topology/data_node.go @@ -4,7 +4,7 @@ import ( "fmt" "sync/atomic" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage" @@ -78,7 +78,7 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume for _, v := range existingVolumes { vid := v.Id if _, ok := actualVolumeMap[vid]; !ok { - glog.V(0).Infoln("Deleting volume id:", vid) + log.V(3).Infoln("Deleting volume id:", vid) disk := dn.getOrCreateDisk(v.DiskType) disk.DeleteVolumeById(vid) deletedVolumes = append(deletedVolumes, v) diff --git a/weed/topology/node.go b/weed/topology/node.go index aa178b561..898b13e48 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -8,7 +8,7 @@ import ( "sync/atomic" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -76,7 +76,7 @@ func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, option *VolumeGrowOption } n.RUnlock() if len(candidates) < numberOfNodes { - glog.V(0).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates") + log.V(3).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates") return nil, nil, errors.New("Not enough data nodes found!") } @@ -247,7 +247,7 @@ func (n *NodeImpl) doLinkChildNode(node Node) { } n.UpAdjustMaxVolumeId(node.GetMaxVolumeId()) node.SetParent(n) - glog.V(0).Infoln(n, "adds child", node.Id()) + log.V(3).Infoln(n, "adds child", node.Id()) } } @@ -261,7 +261,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) { for dt, du := range node.GetDiskUsages().negative().usages { n.UpAdjustDiskUsageDelta(dt, du) } - glog.V(0).Infoln(n, "removes", node.Id()) + log.V(3).Infoln(n, "removes", node.Id()) } } diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index a2be991fa..a2b4cc6c2 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/stats" @@ -34,7 +34,7 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt // this is the initial request remoteLocations, err = GetWritableRemoteReplications(s, grpcDialOption, volumeId, masterFn) if err != nil { - glog.V(0).Infoln(err) + log.V(3).Infoln(err) return } } @@ -57,7 +57,7 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt if err != nil { stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorWriteToLocalDisk).Inc() err = fmt.Errorf("failed to write to local disk: %v", err) - glog.V(0).Infoln(err) + log.V(3).Infoln(err) return } } @@ -93,7 +93,7 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt err := json.Unmarshal(n.Pairs, &tmpMap) if err != nil { stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorUnmarshalPairs).Inc() - glog.V(0).Infoln("Unmarshal pairs error:", err) + log.V(3).Infoln("Unmarshal pairs error:", err) } for k, v := range tmpMap { pairMap[needle.PairNamePrefix+k] = v @@ -118,12 +118,12 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt uploader, err := operation.NewUploader() if err != nil { - glog.Errorf("replication-UploadData, err:%v, url:%s", err, u.String()) + log.Errorf("replication-UploadData, err:%v, url:%s", err, u.String()) return err } _, err = uploader.UploadData(n.Data, uploadOption) if err != nil { - glog.Errorf("replication-UploadData, err:%v, url:%s", err, u.String()) + log.Errorf("replication-UploadData, err:%v, url:%s", err, u.String()) } return err }) @@ -131,7 +131,7 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt if err != nil { stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorWriteToReplicas).Inc() err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err) - glog.V(0).Infoln(err) + log.V(3).Infoln(err) return false, err } } @@ -147,14 +147,14 @@ func ReplicatedDelete(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOp if r.FormValue("type") != "replicate" { remoteLocations, err = GetWritableRemoteReplications(store, grpcDialOption, volumeId, masterFn) if err != nil { - glog.V(0).Infoln(err) + log.V(3).Infoln(err) return } } size, err = store.DeleteVolumeNeedle(volumeId, n) if err != nil { - glog.V(0).Infoln("delete error:", err) + log.V(3).Infoln("delete error:", err) return } diff --git a/weed/topology/topology.go b/weed/topology/topology.go index 750c00ea2..c2a483290 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -17,7 +17,7 @@ import ( hashicorpRaft "github.com/hashicorp/raft" "github.com/seaweedfs/raft" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/sequence" "github.com/seaweedfs/seaweedfs/weed/stats" @@ -143,16 +143,16 @@ func (t *Topology) DoBarrier() bool { return true } - glog.V(0).Infof("raft do barrier") + log.V(3).Infof("raft do barrier") barrier := t.HashicorpRaft.Barrier(2 * time.Minute) if err := barrier.Error(); err != nil { - glog.Errorf("failed to wait for barrier, error %s", err) + log.Errorf("failed to wait for barrier, error %s", err) return false } t.barrierDone = true - glog.V(0).Infof("raft do barrier success") + log.V(3).Infof("raft do barrier success") return true } @@ -326,7 +326,7 @@ func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { } func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { - glog.Infof("removing volume info: %+v from %v", v, dn.id) + log.Infof("removing volume info: %+v from %v", v, dn.id) if v.ReplicaPlacement.GetCopyCount() > 1 { stats.MasterReplicaPlacementMismatch.WithLabelValues(v.Collection, v.Id.String()).Set(0) } @@ -397,7 +397,7 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati if vi, err := storage.NewVolumeInfo(v); err == nil { volumeInfos = append(volumeInfos, vi) } else { - glog.V(0).Infof("Fail to convert joined volume information: %v", err) + log.V(3).Infof("Fail to convert joined volume information: %v", err) } } // find out the delta volumes @@ -422,7 +422,7 @@ func (t *Topology) IncrementalSyncDataNodeRegistration(newVolumes, deletedVolume for _, v := range newVolumes { vi, err := storage.NewVolumeInfoFromShort(v) if err != nil { - glog.V(0).Infof("NewVolumeInfoFromShort %v: %v", v, err) + log.V(3).Infof("NewVolumeInfoFromShort %v: %v", v, err) continue } newVis = append(newVis, vi) @@ -430,7 +430,7 @@ func (t *Topology) IncrementalSyncDataNodeRegistration(newVolumes, deletedVolume for _, v := range deletedVolumes { vi, err := storage.NewVolumeInfoFromShort(v) if err != nil { - glog.V(0).Infof("NewVolumeInfoFromShort %v: %v", v, err) + log.V(3).Infof("NewVolumeInfoFromShort %v: %v", v, err) continue } oldVis = append(oldVis, vi) @@ -455,15 +455,15 @@ func (t *Topology) DataNodeRegistration(dcName, rackName string, dn *DataNode) { dc := t.GetOrCreateDataCenter(dcName) rack := dc.GetOrCreateRack(rackName) rack.LinkChildNode(dn) - glog.Infof("[%s] reLink To topo ", dn.Id()) + log.Infof("[%s] reLink To topo ", dn.Id()) } func (t *Topology) DisableVacuum() { - glog.V(0).Infof("DisableVacuum") + log.V(3).Infof("DisableVacuum") t.isDisableVacuum = true } func (t *Topology) EnableVacuum() { - glog.V(0).Infof("EnableVacuum") + log.V(3).Infof("EnableVacuum") t.isDisableVacuum = false } diff --git a/weed/topology/topology_ec.go b/weed/topology/topology_ec.go index 53762b49a..9339998d8 100644 --- a/weed/topology/topology_ec.go +++ b/weed/topology/topology_ec.go @@ -1,7 +1,7 @@ package topology import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" @@ -115,7 +115,7 @@ func (t *Topology) RegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, d } func (t *Topology) UnRegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) { - glog.Infof("removing ec shard info:%+v", ecShardInfos) + log.Infof("removing ec shard info:%+v", ecShardInfos) t.ecShardMapLock.Lock() defer t.ecShardMapLock.Unlock() diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index e3ad8f2dc..e5de2321e 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -9,7 +9,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/types" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" ) @@ -83,7 +83,7 @@ func (t *Topology) SetVolumeCrowded(volumeInfo storage.VolumeInfo) { func (t *Topology) UnRegisterDataNode(dn *DataNode) { dn.IsTerminating = true for _, v := range dn.GetVolumes() { - glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id()) + log.V(3).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id()) diskType := types.ToDiskType(v.DiskType) vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) vl.SetVolumeUnavailable(dn, v.Id) diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index 83be65d7c..8671a8945 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -15,7 +15,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" ) @@ -43,7 +43,7 @@ func (t *Topology) batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vid ne return nil }) if err != nil { - glog.V(0).Infof("Checking vacuuming %d on %s: %v", vid, url, err) + log.V(3).Infof("Checking vacuuming %d on %s: %v", vid, url, err) } }(index, dn.ServerAddress(), vid) } @@ -74,7 +74,7 @@ func (t *Topology) batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl * ch := make(chan bool, locationlist.Length()) for index, dn := range locationlist.list { go func(index int, url pb.ServerAddress, vid needle.VolumeId) { - glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) + log.V(3).Infoln(index, "Start vacuuming", vid, "on", url) err := operation.WithVolumeServerClient(true, url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { stream, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{ VolumeId: uint32(vid), @@ -93,16 +93,16 @@ func (t *Topology) batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl * return recvErr } } - glog.V(0).Infof("%d vacuum %d on %s processed %d bytes, loadAvg %.02f%%", + log.V(3).Infof("%d vacuum %d on %s processed %d bytes, loadAvg %.02f%%", index, vid, url, resp.ProcessedBytes, resp.LoadAvg_1M*100) } return nil }) if err != nil { - glog.Errorf("Error when vacuuming %d on %s: %v", vid, url, err) + log.Errorf("Error when vacuuming %d on %s: %v", vid, url, err) ch <- false } else { - glog.V(0).Infof("Complete vacuuming %d on %s", vid, url) + log.V(3).Infof("Complete vacuuming %d on %s", vid, url) ch <- true } }(index, dn.ServerAddress(), vid) @@ -128,7 +128,7 @@ func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *V isReadOnly := false isFullCapacity := false for _, dn := range vacuumLocationList.list { - glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) + log.V(3).Infoln("Start Committing vacuum", vid, "on", dn.Url()) err := operation.WithVolumeServerClient(false, dn.ServerAddress(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { resp, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ VolumeId: uint32(vid), @@ -144,10 +144,10 @@ func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *V return err }) if err != nil { - glog.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err) + log.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err) isCommitSuccess = false } else { - glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, dn.Url()) + log.V(3).Infof("Complete Committing vacuum %d on %s", vid, dn.Url()) } } @@ -177,7 +177,7 @@ func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *V return err }) if err != nil { - glog.Errorf("Error when checking volume %d status on %s: %v", vid, dn.Url(), err) + log.Errorf("Error when checking volume %d status on %s: %v", vid, dn.Url(), err) //we mark volume read-only, since the volume state is unknown isReadOnly = true } @@ -201,7 +201,7 @@ func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *V func (t *Topology) batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) { for _, dn := range locationlist.list { - glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url()) + log.V(3).Infoln("Start cleaning up", vid, "on", dn.Url()) err := operation.WithVolumeServerClient(false, dn.ServerAddress(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{ VolumeId: uint32(vid), @@ -209,9 +209,9 @@ func (t *Topology) batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl * return err }) if err != nil { - glog.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, dn.Url(), err) + log.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, dn.Url(), err) } else { - glog.V(0).Infof("Complete cleaning up vacuum %d on %s", vid, dn.Url()) + log.V(3).Infof("Complete cleaning up vacuum %d on %s", vid, dn.Url()) } } } @@ -221,14 +221,14 @@ func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float // if there is vacuum going on, return immediately swapped := atomic.CompareAndSwapInt64(&t.vacuumLockCounter, 0, 1) if !swapped { - glog.V(0).Infof("Vacuum is already running") + log.V(3).Infof("Vacuum is already running") return } defer atomic.StoreInt64(&t.vacuumLockCounter, 0) // now only one vacuum process going on - glog.V(1).Infof("Start vacuum on demand with threshold: %f collection: %s volumeId: %d", + log.V(2).Infof("Start vacuum on demand with threshold: %f collection: %s volumeId: %d", garbageThreshold, collection, volumeId) for _, col := range t.collectionMap.Items() { c := col.(*Collection) @@ -255,7 +255,7 @@ func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float } } if automatic && t.isDisableVacuum { - glog.V(0).Infof("Vacuum is disabled") + log.V(3).Infof("Vacuum is disabled") break } } @@ -348,11 +348,11 @@ func (t *Topology) vacuumOneVolumeId(grpcDialOption grpc.DialOption, volumeLayou return } if !isEnoughCopies { - glog.Warningf("skip vacuuming: not enough copies for volume:%d", vid) + log.Warningf("skip vacuuming: not enough copies for volume:%d", vid) return } - glog.V(1).Infof("check vacuum on collection:%s volume:%d", c.Name, vid) + log.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid) if vacuumLocationList, needVacuum := t.batchVacuumVolumeCheck( grpcDialOption, vid, locationList, garbageThreshold); needVacuum { if t.batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, vacuumLocationList, preallocate) { diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 23e1d5fd6..c028b6236 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -12,7 +12,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/super_block" @@ -115,7 +115,7 @@ func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targe if res, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil { result = append(result, res...) } else { - glog.V(0).Infof("create %d volume, created %d: %v", targetCount, len(result), e) + log.V(3).Infof("create %d volume, created %d: %v", targetCount, len(result), e) return result, e } } @@ -128,7 +128,7 @@ func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topolo return nil, e } for !topo.LastLeaderChangeTime.Add(constants.VolumePulseSeconds * 2).Before(time.Now()) { - glog.V(0).Infof("wait for volume servers to join back") + log.V(3).Infof("wait for volume servers to join back") time.Sleep(constants.VolumePulseSeconds / 2) } vid, raftErr := topo.NextVolumeId() @@ -266,9 +266,9 @@ func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid DiskType: option.DiskType.String(), ModifiedAtSecond: time.Now().Unix(), }) - glog.V(0).Infof("Created Volume %d on %s", vid, server.NodeImpl.String()) + log.V(3).Infof("Created Volume %d on %s", vid, server.NodeImpl.String()) } else { - glog.Warningf("Failed to assign volume %d on %s: %v", vid, server.NodeImpl.String(), err) + log.Warningf("Failed to assign volume %d on %s: %v", vid, server.NodeImpl.String(), err) growErr = fmt.Errorf("failed to assign volume %d on %s: %v", vid, server.NodeImpl.String(), err) break } @@ -279,14 +279,14 @@ func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid server := servers[i] server.AddOrUpdateVolume(vi) topo.RegisterVolumeLayout(vi, server) - glog.V(0).Infof("Registered Volume %d on %s", vid, server.NodeImpl.String()) + log.V(3).Infof("Registered Volume %d on %s", vid, server.NodeImpl.String()) } } else { // cleaning up created volume replicas for i, vi := range createdVolumes { server := servers[i] if err := DeleteVolume(server, grpcDialOption, vi.Id); err != nil { - glog.Warningf("Failed to clean up volume %d on %s", vid, server.NodeImpl.String()) + log.Warningf("Failed to clean up volume %d on %s", vid, server.NodeImpl.String()) } } } diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index 852798c19..b82fefe88 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -11,7 +11,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/super_block" @@ -159,11 +159,11 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) { vl.vid2location[v.Id] = NewVolumeLocationList() } vl.vid2location[v.Id].Set(dn) - // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount()) + // log.V(-1).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount()) for _, dn := range vl.vid2location[v.Id].list { if vInfo, err := dn.GetVolumesById(v.Id); err == nil { if vInfo.ReadOnly { - glog.V(1).Infof("vid %d removed from writable", v.Id) + log.V(2).Infof("vid %d removed from writable", v.Id) vl.removeFromWritable(v.Id) vl.readonlyVolumes.Add(v.Id, dn) return @@ -171,7 +171,7 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) { vl.readonlyVolumes.Remove(v.Id, dn) } } else { - glog.V(1).Infof("vid %d removed from writable", v.Id) + log.V(2).Infof("vid %d removed from writable", v.Id) vl.removeFromWritable(v.Id) vl.readonlyVolumes.Remove(v.Id, dn) return @@ -226,15 +226,15 @@ func (vl *VolumeLayout) ensureCorrectWritables(vid needle.VolumeId) { vl.setVolumeWritable(vid) } else { if !isEnoughCopies { - glog.V(0).Infof("volume %d does not have enough copies", vid) + log.V(3).Infof("volume %d does not have enough copies", vid) } if !isAllWritable { - glog.V(0).Infof("volume %d are not all writable", vid) + log.V(3).Infof("volume %d are not all writable", vid) } if isOversizedVolume { - glog.V(1).Infof("volume %d are oversized", vid) + log.V(2).Infof("volume %d are oversized", vid) } - glog.V(0).Infof("volume %d remove from writable", vid) + log.V(3).Infof("volume %d remove from writable", vid) vl.removeFromWritable(vid) } } @@ -402,7 +402,7 @@ func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool { } vl.removeFromCrowded(vid) if toDeleteIndex >= 0 { - glog.V(0).Infoln("Volume", vid, "becomes unwritable") + log.V(3).Infoln("Volume", vid, "becomes unwritable") vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...) return true } @@ -414,7 +414,7 @@ func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool { return false } } - glog.V(0).Infoln("Volume", vid, "becomes writable") + log.V(3).Infoln("Volume", vid, "becomes writable") vl.writables = append(vl.writables, vid) return true } @@ -453,7 +453,7 @@ func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) vl.readonlyVolumes.Remove(vid, dn) vl.oversizedVolumes.Remove(vid, dn) if location.Length() < vl.rp.GetCopyCount() { - glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount()) + log.V(3).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount()) return vl.removeFromWritable(vid) } } @@ -493,14 +493,14 @@ func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool { wasWritable := vl.removeFromWritable(vid) if wasWritable { - glog.V(0).Infof("Volume %d reaches full capacity.", vid) + log.V(3).Infof("Volume %d reaches full capacity.", vid) } return wasWritable } func (vl *VolumeLayout) removeFromCrowded(vid needle.VolumeId) { if _, ok := vl.crowded[vid]; ok { - glog.V(0).Infoln("Volume", vid, "becomes uncrowded") + log.V(3).Infoln("Volume", vid, "becomes uncrowded") delete(vl.crowded, vid) } } @@ -508,7 +508,7 @@ func (vl *VolumeLayout) removeFromCrowded(vid needle.VolumeId) { func (vl *VolumeLayout) setVolumeCrowded(vid needle.VolumeId) { if _, ok := vl.crowded[vid]; !ok { vl.crowded[vid] = struct{}{} - glog.V(0).Infoln("Volume", vid, "becomes crowded") + log.V(3).Infoln("Volume", vid, "becomes crowded") } } |
