aboutsummaryrefslogtreecommitdiff
path: root/go/replication/volume_growth.go
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2014-03-02 22:16:54 -0800
committerChris Lu <chris.lu@gmail.com>2014-03-02 22:16:54 -0800
commit27c74a7e66558a4f9ce0d10621606dfed98a3abb (patch)
treef16eef19480fd51ccbef54c05d39c2eacf309e56 /go/replication/volume_growth.go
parentedae676913363bdd1e5a50bf0778fdcc3c6d6051 (diff)
downloadseaweedfs-27c74a7e66558a4f9ce0d10621606dfed98a3abb.tar.xz
seaweedfs-27c74a7e66558a4f9ce0d10621606dfed98a3abb.zip
Major:
change replication_type to ReplicaPlacement, hopefully cleaner code works for 9 possible ReplicaPlacement xyz x : number of copies on other data centers y : number of copies on other racks z : number of copies on current rack x y z each can be 0,1,2 Minor: weed server "-mdir" default to "-dir" if empty
Diffstat (limited to 'go/replication/volume_growth.go')
-rw-r--r--go/replication/volume_growth.go246
1 files changed, 86 insertions, 160 deletions
diff --git a/go/replication/volume_growth.go b/go/replication/volume_growth.go
index d7d1c90bd..8466b149f 100644
--- a/go/replication/volume_growth.go
+++ b/go/replication/volume_growth.go
@@ -5,7 +5,6 @@ import (
"code.google.com/p/weed-fs/go/storage"
"code.google.com/p/weed-fs/go/topology"
"errors"
- "fmt"
"math/rand"
"sync"
)
@@ -19,188 +18,115 @@ This package is created to resolve these replica placement issues:
*/
type VolumeGrowth struct {
- copy1factor int
- copy2factor int
- copy3factor int
- copyAll int
-
accessLock sync.Mutex
}
func NewDefaultVolumeGrowth() *VolumeGrowth {
- return &VolumeGrowth{copy1factor: 7, copy2factor: 6, copy3factor: 3}
+ return &VolumeGrowth{}
}
-func (vg *VolumeGrowth) AutomaticGrowByType(collection string, repType storage.ReplicationType, dataCenter string, topo *topology.Topology) (count int, err error) {
- factor := 1
- switch repType {
- case storage.Copy000:
- factor = 1
- count, err = vg.GrowByCountAndType(vg.copy1factor, collection, repType, dataCenter, topo)
- case storage.Copy001:
- factor = 2
- count, err = vg.GrowByCountAndType(vg.copy2factor, collection, repType, dataCenter, topo)
- case storage.Copy010:
- factor = 2
- count, err = vg.GrowByCountAndType(vg.copy2factor, collection, repType, dataCenter, topo)
- case storage.Copy100:
- factor = 2
- count, err = vg.GrowByCountAndType(vg.copy2factor, collection, repType, dataCenter, topo)
- case storage.Copy110:
- factor = 3
- count, err = vg.GrowByCountAndType(vg.copy3factor, collection, repType, dataCenter, topo)
- case storage.Copy200:
- factor = 3
- count, err = vg.GrowByCountAndType(vg.copy3factor, collection, repType, dataCenter, topo)
+// one replication type may need rp.GetCopyCount() actual volumes
+// given copyCount, how many logical volumes to create
+func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
+ switch copyCount {
+ case 1:
+ count = 7
+ case 2:
+ count = 6
+ case 3:
+ count = 3
default:
- err = errors.New("Unknown Replication Type!")
+ count = 1
}
- if count > 0 && count%factor == 0 {
+ return
+}
+
+func (vg *VolumeGrowth) AutomaticGrowByType(collection string, rp *storage.ReplicaPlacement, preferredDataCenter string, topo *topology.Topology) (count int, err error) {
+ count, err = vg.GrowByCountAndType(vg.findVolumeCount(rp.GetCopyCount()), collection, rp, preferredDataCenter, topo)
+ if count > 0 && count%rp.GetCopyCount() == 0 {
return count, nil
}
return count, err
}
-func (vg *VolumeGrowth) GrowByCountAndType(count int, collection string, repType storage.ReplicationType, dataCenter string, topo *topology.Topology) (counter int, err error) {
+func (vg *VolumeGrowth) GrowByCountAndType(targetCount int, collection string, rp *storage.ReplicaPlacement, preferredDataCenter string, topo *topology.Topology) (counter int, err error) {
vg.accessLock.Lock()
defer vg.accessLock.Unlock()
- counter = 0
- switch repType {
- case storage.Copy000:
- for i := 0; i < count; i++ {
- if ok, server, vid := topo.RandomlyReserveOneVolume(dataCenter); ok {
- if err = vg.grow(topo, *vid, collection, repType, server); err == nil {
- counter++
- } else {
- return counter, err
- }
- } else {
- return counter, fmt.Errorf("Failed to grown volume for data center %s", dataCenter)
- }
- }
- case storage.Copy001:
- for i := 0; i < count; i++ {
- //randomly pick one server from the datacenter, and then choose from the same rack
- if ok, server1, vid := topo.RandomlyReserveOneVolume(dataCenter); ok {
- rack := server1.Parent()
- exclusion := make(map[string]topology.Node)
- exclusion[server1.String()] = server1
- newNodeList := topology.NewNodeList(rack.Children(), exclusion)
- if newNodeList.FreeSpace() > 0 {
- if ok2, server2 := newNodeList.ReserveOneVolume(rand.Intn(newNodeList.FreeSpace()), *vid); ok2 {
- if err = vg.grow(topo, *vid, collection, repType, server1, server2); err == nil {
- counter++
- }
- }
- }
- }
- }
- case storage.Copy010:
- for i := 0; i < count; i++ {
- //randomly pick one server from the datacenter, and then choose from the a different rack
- if ok, server1, vid := topo.RandomlyReserveOneVolume(dataCenter); ok {
- rack := server1.Parent()
- dc := rack.Parent()
- exclusion := make(map[string]topology.Node)
- exclusion[rack.String()] = rack
- newNodeList := topology.NewNodeList(dc.Children(), exclusion)
- if newNodeList.FreeSpace() > 0 {
- if ok2, server2 := newNodeList.ReserveOneVolume(rand.Intn(newNodeList.FreeSpace()), *vid); ok2 {
- if err = vg.grow(topo, *vid, collection, repType, server1, server2); err == nil {
- counter++
- }
- }
- }
- }
+ for i := 0; i < targetCount; i++ {
+ if c, e := vg.findAndGrow(topo, preferredDataCenter, collection, rp); e == nil {
+ counter += c
+ } else {
+ return counter, e
}
- case storage.Copy100:
- for i := 0; i < count; i++ {
- nl := topology.NewNodeList(topo.Children(), nil)
- picked, ret := nl.RandomlyPickN(2, 1, dataCenter)
- vid := topo.NextVolumeId()
- if ret {
- var servers []*topology.DataNode
- for _, n := range picked {
- if n.FreeSpace() > 0 {
- if ok, server := n.ReserveOneVolume(rand.Intn(n.FreeSpace()), vid, ""); ok {
- servers = append(servers, server)
- }
- }
- }
- if len(servers) == 2 {
- if err = vg.grow(topo, vid, collection, repType, servers...); err == nil {
- counter++
- }
- }
- } else {
- return counter, fmt.Errorf("Failed to grown volume on data center %s and another data center", dataCenter)
- }
+ }
+ return
+}
+
+func (vg *VolumeGrowth) findAndGrow(topo *topology.Topology, preferredDataCenter string, collection string, rp *storage.ReplicaPlacement) (int, error) {
+ servers, e := vg.findEmptySlotsForOneVolume(topo, preferredDataCenter, rp)
+ if e != nil {
+ return 0, e
+ }
+ vid := topo.NextVolumeId()
+ err := vg.grow(topo, vid, collection, rp, servers...)
+ return len(servers), err
+}
+
+func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *topology.Topology, preferredDataCenter string, rp *storage.ReplicaPlacement) (servers []*topology.DataNode, err error) {
+ //find main datacenter and other data centers
+ mainDataCenter, otherDataCenters, dc_err := topo.RandomlyPickNodes(rp.DiffDataCenterCount+1, func(node topology.Node) bool {
+ if preferredDataCenter != "" && node.IsDataCenter() && node.Id() != topology.NodeId(preferredDataCenter) {
+ return false
}
- case storage.Copy110:
- for i := 0; i < count; i++ {
- nl := topology.NewNodeList(topo.Children(), nil)
- picked, ret := nl.RandomlyPickN(2, 2, dataCenter)
- vid := topo.NextVolumeId()
- if ret {
- var servers []*topology.DataNode
- dc1, dc2 := picked[0], picked[1]
- if dc2.FreeSpace() > dc1.FreeSpace() {
- dc1, dc2 = dc2, dc1
- }
- if dc1.FreeSpace() > 0 {
- if ok, server1 := dc1.ReserveOneVolume(rand.Intn(dc1.FreeSpace()), vid, ""); ok {
- servers = append(servers, server1)
- rack := server1.Parent()
- exclusion := make(map[string]topology.Node)
- exclusion[rack.String()] = rack
- newNodeList := topology.NewNodeList(dc1.Children(), exclusion)
- if newNodeList.FreeSpace() > 0 {
- if ok2, server2 := newNodeList.ReserveOneVolume(rand.Intn(newNodeList.FreeSpace()), vid); ok2 {
- servers = append(servers, server2)
- }
- }
- }
- }
- if dc2.FreeSpace() > 0 {
- if ok, server := dc2.ReserveOneVolume(rand.Intn(dc2.FreeSpace()), vid, ""); ok {
- servers = append(servers, server)
- }
- }
- if len(servers) == 3 {
- if err = vg.grow(topo, vid, collection, repType, servers...); err == nil {
- counter++
- }
- }
- }
+ return node.FreeSpace() > rp.DiffRackCount+rp.SameRackCount+1
+ })
+ if dc_err != nil {
+ return nil, dc_err
+ }
+
+ //find main rack and other racks
+ mainRack, otherRacks, rack_err := mainDataCenter.(*topology.DataCenter).RandomlyPickNodes(rp.DiffRackCount+1, func(node topology.Node) bool {
+ return node.FreeSpace() > rp.SameRackCount+1
+ })
+ if rack_err != nil {
+ return nil, rack_err
+ }
+
+ //find main rack and other racks
+ mainServer, otherServers, server_err := mainRack.(*topology.Rack).RandomlyPickNodes(rp.SameRackCount+1, func(node topology.Node) bool {
+ return node.FreeSpace() > 1
+ })
+ if server_err != nil {
+ return nil, server_err
+ }
+
+ servers = append(servers, mainServer.(*topology.DataNode))
+ for _, server := range otherServers {
+ servers = append(servers, server.(*topology.DataNode))
+ }
+ for _, rack := range otherRacks {
+ r := rand.Intn(rack.FreeSpace())
+ if server, e := rack.ReserveOneVolume(r); e == nil {
+ servers = append(servers, server)
+ } else {
+ return servers, e
}
- case storage.Copy200:
- for i := 0; i < count; i++ {
- nl := topology.NewNodeList(topo.Children(), nil)
- picked, ret := nl.RandomlyPickN(3, 1, dataCenter)
- vid := topo.NextVolumeId()
- if ret {
- var servers []*topology.DataNode
- for _, n := range picked {
- if n.FreeSpace() > 0 {
- if ok, server := n.ReserveOneVolume(rand.Intn(n.FreeSpace()), vid, ""); ok {
- servers = append(servers, server)
- }
- }
- }
- if len(servers) == 3 {
- if err = vg.grow(topo, vid, collection, repType, servers...); err == nil {
- counter++
- }
- }
- }
+ }
+ for _, datacenter := range otherDataCenters {
+ r := rand.Intn(datacenter.FreeSpace())
+ if server, e := datacenter.ReserveOneVolume(r); e == nil {
+ servers = append(servers, server)
+ } else {
+ return servers, e
}
}
return
}
-func (vg *VolumeGrowth) grow(topo *topology.Topology, vid storage.VolumeId, collection string, repType storage.ReplicationType, servers ...*topology.DataNode) error {
+
+func (vg *VolumeGrowth) grow(topo *topology.Topology, vid storage.VolumeId, collection string, rp *storage.ReplicaPlacement, servers ...*topology.DataNode) error {
for _, server := range servers {
- if err := AllocateVolume(server, vid, collection, repType); err == nil {
- vi := storage.VolumeInfo{Id: vid, Size: 0, Collection: collection, RepType: repType, Version: storage.CurrentVersion}
+ if err := AllocateVolume(server, vid, collection, rp); err == nil {
+ vi := storage.VolumeInfo{Id: vid, Size: 0, Collection: collection, ReplicaPlacement: rp, Version: storage.CurrentVersion}
server.AddOrUpdateVolume(vi)
topo.RegisterVolumeLayout(&vi, server)
glog.V(0).Infoln("Created Volume", vid, "on", server)