diff options
| author | Konstantin Lebedev <9497591+kmlebedev@users.noreply.github.com> | 2024-11-25 22:30:37 +0500 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2024-11-25 09:30:37 -0800 |
| commit | 8836fa19b697429d09359897fc2aff52e626947b (patch) | |
| tree | 4a70826bb6a4d94d71f0fcfa4b2252bb32dd4acc /weed/topology | |
| parent | 167b50be889f056b31ff2f178079541167e644ed (diff) | |
| download | seaweedfs-8836fa19b697429d09359897fc2aff52e626947b.tar.xz seaweedfs-8836fa19b697429d09359897fc2aff52e626947b.zip | |
use ShouldGrowVolumesByDcAndRack (#6280)
Diffstat (limited to 'weed/topology')
| -rw-r--r-- | weed/topology/topology.go | 13 | ||||
| -rw-r--r-- | weed/topology/volume_layout.go | 31 |
2 files changed, 25 insertions, 19 deletions
diff --git a/weed/topology/topology.go b/weed/topology/topology.go index e436b453a..be50eecdf 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -369,6 +369,19 @@ func (t *Topology) ListDataCenters() (dcs []string) { return dcs } +func (t *Topology) ListDCAndRacks() (dcs map[NodeId][]NodeId) { + t.RLock() + defer t.RUnlock() + dcs = make(map[NodeId][]NodeId) + for _, dcNode := range t.children { + dcNodeId := dcNode.(*DataCenter).Id() + for _, rackNode := range dcNode.Children() { + dcs[dcNodeId] = append(dcs[dcNodeId], rackNode.(*Rack).Id()) + } + } + return dcs +} + func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformationMessage, dn *DataNode) (newVolumes, deletedVolumes []storage.VolumeInfo) { // convert into in memory struct storage.VolumeInfo var volumeInfos []storage.VolumeInfo diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index 3a360ff99..94493a177 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -365,25 +365,10 @@ func (vl *VolumeLayout) ShouldGrowVolumes() bool { return writable <= crowded } -func (vl *VolumeLayout) ShouldGrowVolumesByDataNode(nodeType string, dataNode string) bool { - vl.accessLock.RLock() - writables := make([]needle.VolumeId, len(vl.writables)) - copy(writables, vl.writables) - vl.accessLock.RUnlock() - - dataNodeId := NodeId(dataNode) - for _, v := range writables { - for _, dn := range vl.vid2location[v].list { - dataNodeFound := false - switch nodeType { - case "DataCenter": - dataNodeFound = dn.GetDataCenter().Id() == dataNodeId - case "Rack": - dataNodeFound = dn.GetRack().Id() == dataNodeId - case "DataNode": - dataNodeFound = dn.Id() == dataNodeId - } - if dataNodeFound { +func (vl *VolumeLayout) ShouldGrowVolumesByDcAndRack(writables *[]needle.VolumeId, dcId NodeId, rackId NodeId) bool { + for _, v := range *writables { + for _, dn := range vl.Lookup(v) { + if dn.GetDataCenter().Id() == dcId && dn.GetRack().Id() == rackId { if info, err := dn.GetVolumesById(v); err == nil && !vl.isCrowdedVolume(&info) { return false } @@ -399,6 +384,14 @@ func (vl *VolumeLayout) GetWritableVolumeCount() (active, crowded int) { return len(vl.writables), len(vl.crowded) } +func (vl *VolumeLayout) CloneWritableVolumes() (writables []needle.VolumeId) { + vl.accessLock.RLock() + writables = make([]needle.VolumeId, len(vl.writables)) + copy(writables, vl.writables) + vl.accessLock.RUnlock() + return writables +} + func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool { toDeleteIndex := -1 for k, id := range vl.writables { |
