diff options
| author | Lisandro Pin <lisandro.pin@proton.ch> | 2025-01-30 18:26:45 +0100 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-01-30 09:26:45 -0800 |
| commit | 331c1f0f3f1a227b76a6752aa051c031a3655903 (patch) | |
| tree | 50ef161c8afe956da68d190fcde451dc23563891 /weed/shell/command_ec_common.go | |
| parent | 551baa44b2f0884187a582f740ace515214ea34e (diff) | |
| download | seaweedfs-331c1f0f3f1a227b76a6752aa051c031a3655903.tar.xz seaweedfs-331c1f0f3f1a227b76a6752aa051c031a3655903.zip | |
Improve EC shards balancing logic regarding replica placement settings. (#6491)
The replica placement type specifies numebr of _replicas_ on the same/different rack;
that means we can have one EC shard copy on each, even if the replica setting is zero.
This PR reworks replica placement parsing for EC rebalancing, so we check allow
(replica placement + 1) when selecting racks and nodes to balance EC shards into.
Diffstat (limited to 'weed/shell/command_ec_common.go')
| -rw-r--r-- | weed/shell/command_ec_common.go | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go index f1d7012d8..0affc7365 100644 --- a/weed/shell/command_ec_common.go +++ b/weed/shell/command_ec_common.go @@ -783,8 +783,8 @@ func (ecb *ecBalancer) pickRackToBalanceShardsInto(rackToEcNodes map[RackId]*EcR details += fmt.Sprintf(" Skipped %s because it has no free slots\n", rackId) continue } - if ecb.replicaPlacement != nil && shards >= ecb.replicaPlacement.DiffRackCount { - details += fmt.Sprintf(" Skipped %s because shards %d >= replica placement limit for other racks (%d)\n", rackId, shards, ecb.replicaPlacement.DiffRackCount) + if ecb.replicaPlacement != nil && shards > ecb.replicaPlacement.DiffRackCount { + details += fmt.Sprintf(" Skipped %s because shards %d > replica placement limit for other racks (%d)\n", rackId, shards, ecb.replicaPlacement.DiffRackCount) continue } @@ -977,8 +977,8 @@ func (ecb *ecBalancer) pickEcNodeToBalanceShardsInto(vid needle.VolumeId, existi } shards := nodeShards[node] - if ecb.replicaPlacement != nil && shards >= ecb.replicaPlacement.SameRackCount { - details += fmt.Sprintf(" Skipped %s because shards %d >= replica placement limit for the rack (%d)\n", node.info.Id, shards, ecb.replicaPlacement.SameRackCount) + if ecb.replicaPlacement != nil && shards > ecb.replicaPlacement.SameRackCount { + details += fmt.Sprintf(" Skipped %s because shards %d > replica placement limit for the rack (%d)\n", node.info.Id, shards, ecb.replicaPlacement.SameRackCount) continue } |
