aboutsummaryrefslogtreecommitdiff
path: root/weed/topology
diff options
context:
space:
mode:
authorKonstantin Lebedev <9497591+kmlebedev@users.noreply.github.com>2024-07-16 20:03:40 +0500
committerGitHub <noreply@github.com>2024-07-16 08:03:40 -0700
commit67edf1d01413f330d6983125b94e5c3d40a845f2 (patch)
tree03d337c508e8d66ffaa92c2916f5970cc6c5d53c /weed/topology
parentce61a66b651f83b851e85d68e91f62e0aea00ec7 (diff)
downloadseaweedfs-67edf1d01413f330d6983125b94e5c3d40a845f2.tar.xz
seaweedfs-67edf1d01413f330d6983125b94e5c3d40a845f2.zip
[master] Do Automatic Volume Grow in background (#5781)
* Do Automatic Volume Grow in backgound * pass lastGrowCount to master * fix build * fix count to uint64
Diffstat (limited to 'weed/topology')
-rw-r--r--weed/topology/topology_info.go9
-rw-r--r--weed/topology/volume_growth.go18
-rw-r--r--weed/topology/volume_layout.go20
3 files changed, 37 insertions, 10 deletions
diff --git a/weed/topology/topology_info.go b/weed/topology/topology_info.go
index 120ae0d42..89f9097f6 100644
--- a/weed/topology/topology_info.go
+++ b/weed/topology/topology_info.go
@@ -42,6 +42,15 @@ func (t *Topology) ToInfo() (info TopologyInfo) {
return
}
+func (t *Topology) ListVolumeLyauts() (volumeLayouts []*VolumeLayout) {
+ for _, col := range t.collectionMap.Items() {
+ for _, volumeLayout := range col.(*Collection).storageType2VolumeLayout.Items() {
+ volumeLayouts = append(volumeLayouts, volumeLayout.(*VolumeLayout))
+ }
+ }
+ return volumeLayouts
+}
+
func (t *Topology) ToVolumeMap() interface{} {
m := make(map[string]interface{})
m["Max"] = t.diskUsages.GetMaxVolumeCount()
diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go
index cfc31c8b1..ff516599d 100644
--- a/weed/topology/volume_growth.go
+++ b/weed/topology/volume_growth.go
@@ -27,14 +27,14 @@ This package is created to resolve these replica placement issues:
type VolumeGrowRequest struct {
Option *VolumeGrowOption
- Count int
+ Count uint32
}
type volumeGrowthStrategy struct {
- Copy1Count int
- Copy2Count int
- Copy3Count int
- CopyOtherCount int
+ Copy1Count uint32
+ Copy2Count uint32
+ Copy3Count uint32
+ CopyOtherCount uint32
Threshold float64
}
@@ -75,7 +75,7 @@ func NewDefaultVolumeGrowth() *VolumeGrowth {
// one replication type may need rp.GetCopyCount() actual volumes
// given copyCount, how many logical volumes to create
-func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
+func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count uint32) {
switch copyCount {
case 1:
count = VolumeGrowStrategy.Copy1Count
@@ -89,7 +89,7 @@ func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
return
}
-func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology, targetCount int) (result []*master_pb.VolumeLocation, err error) {
+func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology, targetCount uint32) (result []*master_pb.VolumeLocation, err error) {
if targetCount == 0 {
targetCount = vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount())
}
@@ -99,11 +99,11 @@ func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOp
}
return result, err
}
-func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targetCount int, option *VolumeGrowOption, topo *Topology) (result []*master_pb.VolumeLocation, err error) {
+func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targetCount uint32, option *VolumeGrowOption, topo *Topology) (result []*master_pb.VolumeLocation, err error) {
vg.accessLock.Lock()
defer vg.accessLock.Unlock()
- for i := 0; i < targetCount; i++ {
+ for i := uint32(0); i < targetCount; i++ {
if res, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil {
result = append(result, res...)
} else {
diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go
index 5711a6a9b..c33d0ea0c 100644
--- a/weed/topology/volume_layout.go
+++ b/weed/topology/volume_layout.go
@@ -107,7 +107,8 @@ func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
// mapping from volume to its locations, inverted from server to volume
type VolumeLayout struct {
growRequest atomic.Bool
- rp *super_block.ReplicaPlacement
+ lastGrowCount atomic.Uint32
+ rp *super_block.ReplicaPlacement
ttl *needle.TTL
diskType types.DiskType
vid2location map[needle.VolumeId]*VolumeLocationList
@@ -354,6 +355,16 @@ func (vl *VolumeLayout) DoneGrowRequest() {
vl.growRequest.Store(false)
}
+func (vl *VolumeLayout) SetLastGrowCount(count uint32) {
+ if vl.lastGrowCount.Load() != count {
+ vl.lastGrowCount.Store(count)
+ }
+}
+
+func (vl *VolumeLayout) GetLastGrowCount() uint32 {
+ return vl.lastGrowCount.Load()
+}
+
func (vl *VolumeLayout) ShouldGrowVolumes(option *VolumeGrowOption) bool {
total, active, crowded := vl.GetActiveVolumeCount(option)
stats.MasterVolumeLayout.WithLabelValues(option.Collection, option.DataCenter, "total").Set(float64(total))
@@ -539,6 +550,13 @@ func (vl *VolumeLayout) ToInfo() (info VolumeLayoutInfo) {
return
}
+func (vl *VolumeLayout) ToGrowOption() (option *VolumeGrowOption) {
+ option.ReplicaPlacement = vl.rp
+ option.Ttl = vl.ttl
+ option.DiskType = vl.diskType
+ return
+}
+
func (vl *VolumeLayout) Stats() *VolumeLayoutStats {
vl.accessLock.RLock()
defer vl.accessLock.RUnlock()