diff options
| author | Chris Lu <chris.lu@gmail.com> | 2020-12-13 03:11:24 -0800 |
|---|---|---|
| committer | Chris Lu <chris.lu@gmail.com> | 2020-12-13 03:11:24 -0800 |
| commit | d156c74ec097add1954fa2a0a9a55eb02b44bb0e (patch) | |
| tree | 872edef3665d27b852631303f031aa97470f4ad5 /weed/topology | |
| parent | e9cd798bd372741753efcba2af594b00fe7b8437 (diff) | |
| download | seaweedfs-d156c74ec097add1954fa2a0a9a55eb02b44bb0e.tar.xz seaweedfs-d156c74ec097add1954fa2a0a9a55eb02b44bb0e.zip | |
volume server set volume type and heartbeat to the master
Diffstat (limited to 'weed/topology')
| -rw-r--r-- | weed/topology/collection.go | 11 | ||||
| -rw-r--r-- | weed/topology/data_center.go | 3 | ||||
| -rw-r--r-- | weed/topology/data_node.go | 21 | ||||
| -rw-r--r-- | weed/topology/node.go | 36 | ||||
| -rw-r--r-- | weed/topology/rack.go | 6 | ||||
| -rw-r--r-- | weed/topology/topology.go | 19 | ||||
| -rw-r--r-- | weed/topology/topology_event_handling.go | 8 | ||||
| -rw-r--r-- | weed/topology/topology_map.go | 4 | ||||
| -rw-r--r-- | weed/topology/topology_test.go | 27 |
9 files changed, 120 insertions, 15 deletions
diff --git a/weed/topology/collection.go b/weed/topology/collection.go index e3aab2f76..d2bdeab62 100644 --- a/weed/topology/collection.go +++ b/weed/topology/collection.go @@ -44,6 +44,17 @@ func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, t return vl.(*VolumeLayout) } +func (c *Collection) DeleteVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeType storage.VolumeType) { + keyString := rp.String() + if ttl != nil { + keyString += ttl.String() + } + if volumeType != storage.HardDriveType { + keyString += string(volumeType) + } + c.storageType2VolumeLayout.Delete(keyString) +} + func (c *Collection) Lookup(vid needle.VolumeId) []*DataNode { for _, vl := range c.storageType2VolumeLayout.Items() { if vl != nil { diff --git a/weed/topology/data_center.go b/weed/topology/data_center.go index dc3accb71..6ca94b52d 100644 --- a/weed/topology/data_center.go +++ b/weed/topology/data_center.go @@ -31,6 +31,7 @@ func (dc *DataCenter) ToMap() interface{} { m := make(map[string]interface{}) m["Id"] = dc.Id() m["Max"] = dc.GetMaxVolumeCount() + m["MaxSsd"] = dc.GetMaxSsdVolumeCount() m["Free"] = dc.FreeSpace() var racks []interface{} for _, c := range dc.Children() { @@ -46,6 +47,8 @@ func (dc *DataCenter) ToDataCenterInfo() *master_pb.DataCenterInfo { Id: string(dc.Id()), VolumeCount: uint64(dc.GetVolumeCount()), MaxVolumeCount: uint64(dc.GetMaxVolumeCount()), + MaxSsdVolumeCount: uint64(dc.GetMaxSsdVolumeCount()), + SsdVolumeCount: uint64(dc.GetSsdVolumeCount()), FreeVolumeCount: uint64(dc.FreeSpace()), ActiveVolumeCount: uint64(dc.GetActiveVolumeCount()), RemoteVolumeCount: uint64(dc.GetRemoteVolumeCount()), diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go index 0a4df63d0..0ea234202 100644 --- a/weed/topology/data_node.go +++ b/weed/topology/data_node.go @@ -50,7 +50,11 @@ func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) { if oldV, ok := dn.volumes[v.Id]; !ok { dn.volumes[v.Id] = v - dn.UpAdjustVolumeCountDelta(1) + if v.VolumeType == storage.SsdType { + dn.UpAdjustSsdVolumeCountDelta(1) + } else { + dn.UpAdjustVolumeCountDelta(1) + } if v.IsRemote() { dn.UpAdjustRemoteVolumeCountDelta(1) } @@ -89,7 +93,11 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume glog.V(0).Infoln("Deleting volume id:", vid) delete(dn.volumes, vid) deletedVolumes = append(deletedVolumes, v) - dn.UpAdjustVolumeCountDelta(-1) + if v.VolumeType == storage.SsdType { + dn.UpAdjustSsdVolumeCountDelta(-1) + } else { + dn.UpAdjustVolumeCountDelta(-1) + } if v.IsRemote() { dn.UpAdjustRemoteVolumeCountDelta(-1) } @@ -116,7 +124,11 @@ func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.Volu for _, v := range deletedVolumes { delete(dn.volumes, v.Id) - dn.UpAdjustVolumeCountDelta(-1) + if v.VolumeType == storage.SsdType { + dn.UpAdjustSsdVolumeCountDelta(-1) + } else { + dn.UpAdjustVolumeCountDelta(-1) + } if v.IsRemote() { dn.UpAdjustRemoteVolumeCountDelta(-1) } @@ -182,6 +194,7 @@ func (dn *DataNode) ToMap() interface{} { ret["VolumeIds"] = dn.GetVolumeIds() ret["EcShards"] = dn.GetEcShardCount() ret["Max"] = dn.GetMaxVolumeCount() + ret["MaxSsd"] = dn.GetMaxSsdVolumeCount() ret["Free"] = dn.FreeSpace() ret["PublicUrl"] = dn.PublicUrl return ret @@ -192,6 +205,8 @@ func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo { Id: string(dn.Id()), VolumeCount: uint64(dn.GetVolumeCount()), MaxVolumeCount: uint64(dn.GetMaxVolumeCount()), + MaxSsdVolumeCount: uint64(dn.GetMaxSsdVolumeCount()), + SsdVolumeCount: uint64(dn.GetSsdVolumeCount()), FreeVolumeCount: uint64(dn.FreeSpace()), ActiveVolumeCount: uint64(dn.GetActiveVolumeCount()), RemoteVolumeCount: uint64(dn.GetRemoteVolumeCount()), diff --git a/weed/topology/node.go b/weed/topology/node.go index 114417edf..c916857c0 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -19,17 +19,21 @@ type Node interface { FreeSpace() int64 ReserveOneVolume(r int64) (*DataNode, error) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) + UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta int64) UpAdjustVolumeCountDelta(volumeCountDelta int64) + UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta int64) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) UpAdjustEcShardCountDelta(ecShardCountDelta int64) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) UpAdjustMaxVolumeId(vid needle.VolumeId) GetVolumeCount() int64 + GetSsdVolumeCount() int64 GetEcShardCount() int64 GetActiveVolumeCount() int64 GetRemoteVolumeCount() int64 GetMaxVolumeCount() int64 + GetMaxSsdVolumeCount() int64 GetMaxVolumeId() needle.VolumeId SetParent(Node) LinkChildNode(node Node) @@ -47,9 +51,11 @@ type Node interface { type NodeImpl struct { volumeCount int64 remoteVolumeCount int64 + ssdVolumeCount int64 activeVolumeCount int64 ecShardCount int64 maxVolumeCount int64 + maxSsdVolumeCount int64 id NodeId parent Node sync.RWMutex // lock children @@ -143,7 +149,7 @@ func (n *NodeImpl) Id() NodeId { return n.id } func (n *NodeImpl) FreeSpace() int64 { - freeVolumeSlotCount := n.maxVolumeCount + n.remoteVolumeCount - n.volumeCount + freeVolumeSlotCount := n.maxVolumeCount + n.maxSsdVolumeCount + n.remoteVolumeCount - n.volumeCount - n.ssdVolumeCount if n.ecShardCount > 0 { freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1 } @@ -200,6 +206,15 @@ func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) { //ca n.parent.UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta) } } +func (n *NodeImpl) UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta int64) { //can be negative + if maxSsdVolumeCountDelta == 0 { + return + } + atomic.AddInt64(&n.maxSsdVolumeCount, maxSsdVolumeCountDelta) + if n.parent != nil { + n.parent.UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta) + } +} func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be negative if volumeCountDelta == 0 { return @@ -218,6 +233,15 @@ func (n *NodeImpl) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) n.parent.UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta) } } +func (n *NodeImpl) UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta int64) { //can be negative + if ssdVolumeCountDelta == 0 { + return + } + atomic.AddInt64(&n.ssdVolumeCount, ssdVolumeCountDelta) + if n.parent != nil { + n.parent.UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta) + } +} func (n *NodeImpl) UpAdjustEcShardCountDelta(ecShardCountDelta int64) { //can be negative if ecShardCountDelta == 0 { return @@ -250,6 +274,9 @@ func (n *NodeImpl) GetMaxVolumeId() needle.VolumeId { func (n *NodeImpl) GetVolumeCount() int64 { return n.volumeCount } +func (n *NodeImpl) GetSsdVolumeCount() int64 { + return n.ssdVolumeCount +} func (n *NodeImpl) GetEcShardCount() int64 { return n.ecShardCount } @@ -262,6 +289,9 @@ func (n *NodeImpl) GetActiveVolumeCount() int64 { func (n *NodeImpl) GetMaxVolumeCount() int64 { return n.maxVolumeCount } +func (n *NodeImpl) GetMaxSsdVolumeCount() int64 { + return n.maxSsdVolumeCount +} func (n *NodeImpl) LinkChildNode(node Node) { n.Lock() @@ -269,8 +299,10 @@ func (n *NodeImpl) LinkChildNode(node Node) { if n.children[node.Id()] == nil { n.children[node.Id()] = node n.UpAdjustMaxVolumeCountDelta(node.GetMaxVolumeCount()) + n.UpAdjustMaxSsdVolumeCountDelta(node.GetMaxSsdVolumeCount()) n.UpAdjustMaxVolumeId(node.GetMaxVolumeId()) n.UpAdjustVolumeCountDelta(node.GetVolumeCount()) + n.UpAdjustSsdVolumeCountDelta(node.GetSsdVolumeCount()) n.UpAdjustRemoteVolumeCountDelta(node.GetRemoteVolumeCount()) n.UpAdjustEcShardCountDelta(node.GetEcShardCount()) n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount()) @@ -287,10 +319,12 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) { node.SetParent(nil) delete(n.children, node.Id()) n.UpAdjustVolumeCountDelta(-node.GetVolumeCount()) + n.UpAdjustSsdVolumeCountDelta(-node.GetSsdVolumeCount()) n.UpAdjustRemoteVolumeCountDelta(-node.GetRemoteVolumeCount()) n.UpAdjustEcShardCountDelta(-node.GetEcShardCount()) n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount()) n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount()) + n.UpAdjustMaxSsdVolumeCountDelta(-node.GetMaxSsdVolumeCount()) glog.V(0).Infoln(n, "removes", node.Id()) } } diff --git a/weed/topology/rack.go b/weed/topology/rack.go index 1921c0c05..35563abe5 100644 --- a/weed/topology/rack.go +++ b/weed/topology/rack.go @@ -28,7 +28,7 @@ func (r *Rack) FindDataNode(ip string, port int) *DataNode { } return nil } -func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int64) *DataNode { +func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int64, maxSsdVolumeCount int64) *DataNode { for _, c := range r.Children() { dn := c.(*DataNode) if dn.MatchLocation(ip, port) { @@ -41,6 +41,7 @@ func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVol dn.Port = port dn.PublicUrl = publicUrl dn.maxVolumeCount = maxVolumeCount + dn.maxSsdVolumeCount = maxSsdVolumeCount dn.LastSeen = time.Now().Unix() r.LinkChildNode(dn) return dn @@ -50,6 +51,7 @@ func (r *Rack) ToMap() interface{} { m := make(map[string]interface{}) m["Id"] = r.Id() m["Max"] = r.GetMaxVolumeCount() + m["MaxSsd"] = r.GetMaxSsdVolumeCount() m["Free"] = r.FreeSpace() var dns []interface{} for _, c := range r.Children() { @@ -65,6 +67,8 @@ func (r *Rack) ToRackInfo() *master_pb.RackInfo { Id: string(r.Id()), VolumeCount: uint64(r.GetVolumeCount()), MaxVolumeCount: uint64(r.GetMaxVolumeCount()), + MaxSsdVolumeCount: uint64(r.GetMaxSsdVolumeCount()), + SsdVolumeCount: uint64(r.GetSsdVolumeCount()), FreeVolumeCount: uint64(r.FreeSpace()), ActiveVolumeCount: uint64(r.GetActiveVolumeCount()), RemoteVolumeCount: uint64(r.GetRemoteVolumeCount()), diff --git a/weed/topology/topology.go b/weed/topology/topology.go index e02982451..62332f805 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -176,17 +176,27 @@ func (t *Topology) DeleteCollection(collectionName string) { t.collectionMap.Delete(collectionName) } +func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeType storage.VolumeType) { + collection, found := t.FindCollection(collectionName) + if !found { + return + } + collection.DeleteVolumeLayout(rp, ttl, volumeType) +} + func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, v.VolumeType) + volumeType, _ := storage.ToVolumeType(v.VolumeType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, volumeType) vl.RegisterVolume(&v, dn) vl.EnsureCorrectWritables(&v) } func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { glog.Infof("removing volume info: %+v", v) - volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, v.VolumeType) + volumeType, _ := storage.ToVolumeType(v.VolumeType) + volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, volumeType) volumeLayout.UnRegisterVolume(&v, dn) if volumeLayout.isEmpty() { - t.DeleteCollection(v.Collection) + t.DeleteLayout(v.Collection, v.ReplicaPlacement, v.Ttl, volumeType) } } @@ -222,7 +232,8 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati t.UnRegisterVolumeLayout(v, dn) } for _, v := range changedVolumes { - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, v.VolumeType) + volumeType, _ := storage.ToVolumeType(v.VolumeType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, volumeType) vl.EnsureCorrectWritables(&v) } return diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index 98957a964..076900069 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -37,7 +37,8 @@ func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, g }() } func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool { - vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, volumeInfo.VolumeType) + volumeType, _ := storage.ToVolumeType(volumeInfo.VolumeType) + vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, volumeType) if !vl.SetVolumeCapacityFull(volumeInfo.Id) { return false } @@ -55,13 +56,16 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool { func (t *Topology) UnRegisterDataNode(dn *DataNode) { for _, v := range dn.GetVolumes() { glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id()) - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, v.VolumeType) + volumeType, _ := storage.ToVolumeType(v.VolumeType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, volumeType) vl.SetVolumeUnavailable(dn, v.Id) } dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount()) + dn.UpAdjustSsdVolumeCountDelta(-dn.GetSsdVolumeCount()) dn.UpAdjustRemoteVolumeCountDelta(-dn.GetRemoteVolumeCount()) dn.UpAdjustActiveVolumeCountDelta(-dn.GetActiveVolumeCount()) dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount()) + dn.UpAdjustMaxSsdVolumeCountDelta(-dn.GetMaxSsdVolumeCount()) if dn.Parent() != nil { dn.Parent().UnlinkChildNode(dn.Id()) } diff --git a/weed/topology/topology_map.go b/weed/topology/topology_map.go index 73c55d77d..eb8990ab2 100644 --- a/weed/topology/topology_map.go +++ b/weed/topology/topology_map.go @@ -5,6 +5,7 @@ import "github.com/chrislusf/seaweedfs/weed/pb/master_pb" func (t *Topology) ToMap() interface{} { m := make(map[string]interface{}) m["Max"] = t.GetMaxVolumeCount() + m["MaxSsd"] = t.GetMaxSsdVolumeCount() m["Free"] = t.FreeSpace() var dcs []interface{} for _, c := range t.Children() { @@ -30,6 +31,7 @@ func (t *Topology) ToMap() interface{} { func (t *Topology) ToVolumeMap() interface{} { m := make(map[string]interface{}) m["Max"] = t.GetMaxVolumeCount() + m["MaxSsd"] = t.GetMaxSsdVolumeCount() m["Free"] = t.FreeSpace() dcs := make(map[NodeId]interface{}) for _, c := range t.Children() { @@ -83,9 +85,11 @@ func (t *Topology) ToTopologyInfo() *master_pb.TopologyInfo { Id: string(t.Id()), VolumeCount: uint64(t.GetVolumeCount()), MaxVolumeCount: uint64(t.GetMaxVolumeCount()), + MaxSsdVolumeCount: uint64(t.GetMaxSsdVolumeCount()), FreeVolumeCount: uint64(t.FreeSpace()), ActiveVolumeCount: uint64(t.GetActiveVolumeCount()), RemoteVolumeCount: uint64(t.GetRemoteVolumeCount()), + SsdVolumeCount: uint64(t.GetSsdVolumeCount()), } for _, c := range t.Children() { dc := c.(*DataCenter) diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go index 9bcf2331e..07dc22681 100644 --- a/weed/topology/topology_test.go +++ b/weed/topology/topology_test.go @@ -27,7 +27,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { dc := topo.GetOrCreateDataCenter("dc1") rack := dc.GetOrCreateRack("rack1") - dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25) + dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25, 12) { volumeCount := 7 @@ -48,10 +48,28 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { volumeMessages = append(volumeMessages, volumeMessage) } + for k := 1; k <= volumeCount; k++ { + volumeMessage := &master_pb.VolumeInformationMessage{ + Id: uint32(volumeCount + k), + Size: uint64(25432), + Collection: "", + FileCount: uint64(2343), + DeleteCount: uint64(345), + DeletedByteCount: 34524, + ReadOnly: false, + ReplicaPlacement: uint32(0), + Version: uint32(needle.CurrentVersion), + Ttl: 0, + VolumeType: "ssd", + } + volumeMessages = append(volumeMessages, volumeMessage) + } + topo.SyncDataNodeRegistration(volumeMessages, dn) - assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount) + assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount*2) assert(t, "volumeCount", int(topo.volumeCount), volumeCount) + assert(t, "ssdVolumeCount", int(topo.ssdVolumeCount), volumeCount) } { @@ -115,7 +133,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { nil, dn) - for vid, _ := range layout.vid2location { + for vid := range layout.vid2location { println("after add volume id", vid) } for _, vid := range layout.writables { @@ -144,12 +162,13 @@ func TestAddRemoveVolume(t *testing.T) { dc := topo.GetOrCreateDataCenter("dc1") rack := dc.GetOrCreateRack("rack1") - dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25) + dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25, 12) v := storage.VolumeInfo{ Id: needle.VolumeId(1), Size: 100, Collection: "xcollection", + VolumeType: "ssd", FileCount: 123, DeleteCount: 23, DeletedByteCount: 45, |
