diff options
Diffstat (limited to 'weed/topology')
| -rw-r--r-- | weed/topology/allocate_volume.go | 1 | ||||
| -rw-r--r-- | weed/topology/collection.go | 19 | ||||
| -rw-r--r-- | weed/topology/data_center.go | 3 | ||||
| -rw-r--r-- | weed/topology/data_node.go | 21 | ||||
| -rw-r--r-- | weed/topology/node.go | 47 | ||||
| -rw-r--r-- | weed/topology/rack.go | 6 | ||||
| -rw-r--r-- | weed/topology/topology.go | 32 | ||||
| -rw-r--r-- | weed/topology/topology_event_handling.go | 8 | ||||
| -rw-r--r-- | weed/topology/topology_map.go | 4 | ||||
| -rw-r--r-- | weed/topology/topology_test.go | 29 | ||||
| -rw-r--r-- | weed/topology/volume_growth.go | 1 | ||||
| -rw-r--r-- | weed/topology/volume_layout.go | 4 |
12 files changed, 151 insertions, 24 deletions
diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go index e5dc48652..39c24ab04 100644 --- a/weed/topology/allocate_volume.go +++ b/weed/topology/allocate_volume.go @@ -24,6 +24,7 @@ func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid needle.Vol Ttl: option.Ttl.String(), Preallocate: option.Prealloacte, MemoryMapMaxSizeMb: option.MemoryMapMaxSizeMb, + DiskType: string(option.DiskType), }) return deleteErr }) diff --git a/weed/topology/collection.go b/weed/topology/collection.go index 5b410d1eb..0c5dc5db7 100644 --- a/weed/topology/collection.go +++ b/weed/topology/collection.go @@ -2,6 +2,7 @@ package topology import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" @@ -29,17 +30,31 @@ func (c *Collection) String() string { return fmt.Sprintf("Name:%s, volumeSizeLimit:%d, storageType2VolumeLayout:%v", c.Name, c.volumeSizeLimit, c.storageType2VolumeLayout) } -func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { +func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) *VolumeLayout { keyString := rp.String() if ttl != nil { keyString += ttl.String() } + if diskType != storage.HardDriveType { + keyString += string(diskType) + } vl := c.storageType2VolumeLayout.Get(keyString, func() interface{} { - return NewVolumeLayout(rp, ttl, c.volumeSizeLimit, c.replicationAsMin) + return NewVolumeLayout(rp, ttl, diskType, c.volumeSizeLimit, c.replicationAsMin) }) return vl.(*VolumeLayout) } +func (c *Collection) DeleteVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) { + keyString := rp.String() + if ttl != nil { + keyString += ttl.String() + } + if diskType != storage.HardDriveType { + keyString += string(diskType) + } + c.storageType2VolumeLayout.Delete(keyString) +} + func (c *Collection) Lookup(vid needle.VolumeId) []*DataNode { for _, vl := range c.storageType2VolumeLayout.Items() { if vl != nil { diff --git a/weed/topology/data_center.go b/weed/topology/data_center.go index dc3accb71..6ca94b52d 100644 --- a/weed/topology/data_center.go +++ b/weed/topology/data_center.go @@ -31,6 +31,7 @@ func (dc *DataCenter) ToMap() interface{} { m := make(map[string]interface{}) m["Id"] = dc.Id() m["Max"] = dc.GetMaxVolumeCount() + m["MaxSsd"] = dc.GetMaxSsdVolumeCount() m["Free"] = dc.FreeSpace() var racks []interface{} for _, c := range dc.Children() { @@ -46,6 +47,8 @@ func (dc *DataCenter) ToDataCenterInfo() *master_pb.DataCenterInfo { Id: string(dc.Id()), VolumeCount: uint64(dc.GetVolumeCount()), MaxVolumeCount: uint64(dc.GetMaxVolumeCount()), + MaxSsdVolumeCount: uint64(dc.GetMaxSsdVolumeCount()), + SsdVolumeCount: uint64(dc.GetSsdVolumeCount()), FreeVolumeCount: uint64(dc.FreeSpace()), ActiveVolumeCount: uint64(dc.GetActiveVolumeCount()), RemoteVolumeCount: uint64(dc.GetRemoteVolumeCount()), diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go index 0a4df63d0..400e91455 100644 --- a/weed/topology/data_node.go +++ b/weed/topology/data_node.go @@ -50,7 +50,11 @@ func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) { if oldV, ok := dn.volumes[v.Id]; !ok { dn.volumes[v.Id] = v - dn.UpAdjustVolumeCountDelta(1) + if v.DiskType == storage.SsdType { + dn.UpAdjustSsdVolumeCountDelta(1) + } else { + dn.UpAdjustVolumeCountDelta(1) + } if v.IsRemote() { dn.UpAdjustRemoteVolumeCountDelta(1) } @@ -89,7 +93,11 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume glog.V(0).Infoln("Deleting volume id:", vid) delete(dn.volumes, vid) deletedVolumes = append(deletedVolumes, v) - dn.UpAdjustVolumeCountDelta(-1) + if v.DiskType == storage.SsdType { + dn.UpAdjustSsdVolumeCountDelta(-1) + } else { + dn.UpAdjustVolumeCountDelta(-1) + } if v.IsRemote() { dn.UpAdjustRemoteVolumeCountDelta(-1) } @@ -116,7 +124,11 @@ func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.Volu for _, v := range deletedVolumes { delete(dn.volumes, v.Id) - dn.UpAdjustVolumeCountDelta(-1) + if v.DiskType == storage.SsdType { + dn.UpAdjustSsdVolumeCountDelta(-1) + } else { + dn.UpAdjustVolumeCountDelta(-1) + } if v.IsRemote() { dn.UpAdjustRemoteVolumeCountDelta(-1) } @@ -182,6 +194,7 @@ func (dn *DataNode) ToMap() interface{} { ret["VolumeIds"] = dn.GetVolumeIds() ret["EcShards"] = dn.GetEcShardCount() ret["Max"] = dn.GetMaxVolumeCount() + ret["MaxSsd"] = dn.GetMaxSsdVolumeCount() ret["Free"] = dn.FreeSpace() ret["PublicUrl"] = dn.PublicUrl return ret @@ -192,6 +205,8 @@ func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo { Id: string(dn.Id()), VolumeCount: uint64(dn.GetVolumeCount()), MaxVolumeCount: uint64(dn.GetMaxVolumeCount()), + MaxSsdVolumeCount: uint64(dn.GetMaxSsdVolumeCount()), + SsdVolumeCount: uint64(dn.GetSsdVolumeCount()), FreeVolumeCount: uint64(dn.FreeSpace()), ActiveVolumeCount: uint64(dn.GetActiveVolumeCount()), RemoteVolumeCount: uint64(dn.GetRemoteVolumeCount()), diff --git a/weed/topology/node.go b/weed/topology/node.go index 114417edf..8f525021f 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -2,6 +2,7 @@ package topology import ( "errors" + "github.com/chrislusf/seaweedfs/weed/storage" "math/rand" "strings" "sync" @@ -19,17 +20,21 @@ type Node interface { FreeSpace() int64 ReserveOneVolume(r int64) (*DataNode, error) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) + UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta int64) UpAdjustVolumeCountDelta(volumeCountDelta int64) + UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta int64) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) UpAdjustEcShardCountDelta(ecShardCountDelta int64) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) UpAdjustMaxVolumeId(vid needle.VolumeId) GetVolumeCount() int64 + GetSsdVolumeCount() int64 GetEcShardCount() int64 GetActiveVolumeCount() int64 GetRemoteVolumeCount() int64 GetMaxVolumeCount() int64 + GetMaxSsdVolumeCount() int64 GetMaxVolumeId() needle.VolumeId SetParent(Node) LinkChildNode(node Node) @@ -47,9 +52,11 @@ type Node interface { type NodeImpl struct { volumeCount int64 remoteVolumeCount int64 + ssdVolumeCount int64 activeVolumeCount int64 ecShardCount int64 maxVolumeCount int64 + maxSsdVolumeCount int64 id NodeId parent Node sync.RWMutex // lock children @@ -142,8 +149,18 @@ func (n *NodeImpl) String() string { func (n *NodeImpl) Id() NodeId { return n.id } -func (n *NodeImpl) FreeSpace() int64 { +func (n *NodeImpl) AvailableSpaceFor(option *VolumeGrowOption) int64 { freeVolumeSlotCount := n.maxVolumeCount + n.remoteVolumeCount - n.volumeCount + if option.DiskType == storage.SsdType { + freeVolumeSlotCount = n.maxSsdVolumeCount - n.ssdVolumeCount + } + if n.ecShardCount > 0 { + freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1 + } + return freeVolumeSlotCount +} +func (n *NodeImpl) FreeSpace() int64 { + freeVolumeSlotCount := n.maxVolumeCount + n.maxSsdVolumeCount + n.remoteVolumeCount - n.volumeCount - n.ssdVolumeCount if n.ecShardCount > 0 { freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1 } @@ -200,6 +217,15 @@ func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) { //ca n.parent.UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta) } } +func (n *NodeImpl) UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta int64) { //can be negative + if maxSsdVolumeCountDelta == 0 { + return + } + atomic.AddInt64(&n.maxSsdVolumeCount, maxSsdVolumeCountDelta) + if n.parent != nil { + n.parent.UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta) + } +} func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be negative if volumeCountDelta == 0 { return @@ -218,6 +244,15 @@ func (n *NodeImpl) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) n.parent.UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta) } } +func (n *NodeImpl) UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta int64) { //can be negative + if ssdVolumeCountDelta == 0 { + return + } + atomic.AddInt64(&n.ssdVolumeCount, ssdVolumeCountDelta) + if n.parent != nil { + n.parent.UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta) + } +} func (n *NodeImpl) UpAdjustEcShardCountDelta(ecShardCountDelta int64) { //can be negative if ecShardCountDelta == 0 { return @@ -250,6 +285,9 @@ func (n *NodeImpl) GetMaxVolumeId() needle.VolumeId { func (n *NodeImpl) GetVolumeCount() int64 { return n.volumeCount } +func (n *NodeImpl) GetSsdVolumeCount() int64 { + return n.ssdVolumeCount +} func (n *NodeImpl) GetEcShardCount() int64 { return n.ecShardCount } @@ -262,6 +300,9 @@ func (n *NodeImpl) GetActiveVolumeCount() int64 { func (n *NodeImpl) GetMaxVolumeCount() int64 { return n.maxVolumeCount } +func (n *NodeImpl) GetMaxSsdVolumeCount() int64 { + return n.maxSsdVolumeCount +} func (n *NodeImpl) LinkChildNode(node Node) { n.Lock() @@ -269,8 +310,10 @@ func (n *NodeImpl) LinkChildNode(node Node) { if n.children[node.Id()] == nil { n.children[node.Id()] = node n.UpAdjustMaxVolumeCountDelta(node.GetMaxVolumeCount()) + n.UpAdjustMaxSsdVolumeCountDelta(node.GetMaxSsdVolumeCount()) n.UpAdjustMaxVolumeId(node.GetMaxVolumeId()) n.UpAdjustVolumeCountDelta(node.GetVolumeCount()) + n.UpAdjustSsdVolumeCountDelta(node.GetSsdVolumeCount()) n.UpAdjustRemoteVolumeCountDelta(node.GetRemoteVolumeCount()) n.UpAdjustEcShardCountDelta(node.GetEcShardCount()) n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount()) @@ -287,10 +330,12 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) { node.SetParent(nil) delete(n.children, node.Id()) n.UpAdjustVolumeCountDelta(-node.GetVolumeCount()) + n.UpAdjustSsdVolumeCountDelta(-node.GetSsdVolumeCount()) n.UpAdjustRemoteVolumeCountDelta(-node.GetRemoteVolumeCount()) n.UpAdjustEcShardCountDelta(-node.GetEcShardCount()) n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount()) n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount()) + n.UpAdjustMaxSsdVolumeCountDelta(-node.GetMaxSsdVolumeCount()) glog.V(0).Infoln(n, "removes", node.Id()) } } diff --git a/weed/topology/rack.go b/weed/topology/rack.go index 1921c0c05..35563abe5 100644 --- a/weed/topology/rack.go +++ b/weed/topology/rack.go @@ -28,7 +28,7 @@ func (r *Rack) FindDataNode(ip string, port int) *DataNode { } return nil } -func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int64) *DataNode { +func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int64, maxSsdVolumeCount int64) *DataNode { for _, c := range r.Children() { dn := c.(*DataNode) if dn.MatchLocation(ip, port) { @@ -41,6 +41,7 @@ func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVol dn.Port = port dn.PublicUrl = publicUrl dn.maxVolumeCount = maxVolumeCount + dn.maxSsdVolumeCount = maxSsdVolumeCount dn.LastSeen = time.Now().Unix() r.LinkChildNode(dn) return dn @@ -50,6 +51,7 @@ func (r *Rack) ToMap() interface{} { m := make(map[string]interface{}) m["Id"] = r.Id() m["Max"] = r.GetMaxVolumeCount() + m["MaxSsd"] = r.GetMaxSsdVolumeCount() m["Free"] = r.FreeSpace() var dns []interface{} for _, c := range r.Children() { @@ -65,6 +67,8 @@ func (r *Rack) ToRackInfo() *master_pb.RackInfo { Id: string(r.Id()), VolumeCount: uint64(r.GetVolumeCount()), MaxVolumeCount: uint64(r.GetMaxVolumeCount()), + MaxSsdVolumeCount: uint64(r.GetMaxSsdVolumeCount()), + SsdVolumeCount: uint64(r.GetSsdVolumeCount()), FreeVolumeCount: uint64(r.FreeSpace()), ActiveVolumeCount: uint64(r.GetActiveVolumeCount()), RemoteVolumeCount: uint64(r.GetRemoteVolumeCount()), diff --git a/weed/topology/topology.go b/weed/topology/topology.go index bde72cf09..486394f7a 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -121,12 +121,12 @@ func (t *Topology) NextVolumeId() (needle.VolumeId, error) { } func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool { - vl := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl) + vl := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType) return vl.GetActiveVolumeCount(option) > 0 } func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) { - vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl).PickForWrite(count, option) + vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType).PickForWrite(count, option) if err != nil { return "", 0, nil, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err) } @@ -137,10 +137,10 @@ func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil } -func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { +func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) *VolumeLayout { return t.collectionMap.Get(collectionName, func() interface{} { return NewCollection(collectionName, t.volumeSizeLimit, t.replicationAsMin) - }).(*Collection).GetOrCreateVolumeLayout(rp, ttl) + }).(*Collection).GetOrCreateVolumeLayout(rp, ttl, diskType) } func (t *Topology) ListCollections(includeNormalVolumes, includeEcVolumes bool) (ret []string) { @@ -176,17 +176,30 @@ func (t *Topology) DeleteCollection(collectionName string) { t.collectionMap.Delete(collectionName) } +func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) { + collection, found := t.FindCollection(collectionName) + if !found { + return + } + collection.DeleteVolumeLayout(rp, ttl, diskType) + if len(collection.storageType2VolumeLayout.Items()) == 0 { + t.DeleteCollection(collectionName) + } +} + func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl) + diskType, _ := storage.ToDiskType(v.DiskType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) vl.RegisterVolume(&v, dn) vl.EnsureCorrectWritables(&v) } func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { - glog.Infof("removing volume info:%+v", v) - volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl) + glog.Infof("removing volume info: %+v", v) + diskType, _ := storage.ToDiskType(v.DiskType) + volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) volumeLayout.UnRegisterVolume(&v, dn) if volumeLayout.isEmpty() { - t.DeleteCollection(v.Collection) + t.DeleteLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) } } @@ -222,7 +235,8 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati t.UnRegisterVolumeLayout(v, dn) } for _, v := range changedVolumes { - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl) + diskType, _ := storage.ToDiskType(v.DiskType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) vl.EnsureCorrectWritables(&v) } return diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index 068bd401e..5b9facc14 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -37,7 +37,8 @@ func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, g }() } func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool { - vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl) + diskType, _ := storage.ToDiskType(volumeInfo.DiskType) + vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, diskType) if !vl.SetVolumeCapacityFull(volumeInfo.Id) { return false } @@ -55,13 +56,16 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool { func (t *Topology) UnRegisterDataNode(dn *DataNode) { for _, v := range dn.GetVolumes() { glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id()) - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl) + diskType, _ := storage.ToDiskType(v.DiskType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) vl.SetVolumeUnavailable(dn, v.Id) } dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount()) + dn.UpAdjustSsdVolumeCountDelta(-dn.GetSsdVolumeCount()) dn.UpAdjustRemoteVolumeCountDelta(-dn.GetRemoteVolumeCount()) dn.UpAdjustActiveVolumeCountDelta(-dn.GetActiveVolumeCount()) dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount()) + dn.UpAdjustMaxSsdVolumeCountDelta(-dn.GetMaxSsdVolumeCount()) if dn.Parent() != nil { dn.Parent().UnlinkChildNode(dn.Id()) } diff --git a/weed/topology/topology_map.go b/weed/topology/topology_map.go index 73c55d77d..eb8990ab2 100644 --- a/weed/topology/topology_map.go +++ b/weed/topology/topology_map.go @@ -5,6 +5,7 @@ import "github.com/chrislusf/seaweedfs/weed/pb/master_pb" func (t *Topology) ToMap() interface{} { m := make(map[string]interface{}) m["Max"] = t.GetMaxVolumeCount() + m["MaxSsd"] = t.GetMaxSsdVolumeCount() m["Free"] = t.FreeSpace() var dcs []interface{} for _, c := range t.Children() { @@ -30,6 +31,7 @@ func (t *Topology) ToMap() interface{} { func (t *Topology) ToVolumeMap() interface{} { m := make(map[string]interface{}) m["Max"] = t.GetMaxVolumeCount() + m["MaxSsd"] = t.GetMaxSsdVolumeCount() m["Free"] = t.FreeSpace() dcs := make(map[NodeId]interface{}) for _, c := range t.Children() { @@ -83,9 +85,11 @@ func (t *Topology) ToTopologyInfo() *master_pb.TopologyInfo { Id: string(t.Id()), VolumeCount: uint64(t.GetVolumeCount()), MaxVolumeCount: uint64(t.GetMaxVolumeCount()), + MaxSsdVolumeCount: uint64(t.GetMaxSsdVolumeCount()), FreeVolumeCount: uint64(t.FreeSpace()), ActiveVolumeCount: uint64(t.GetActiveVolumeCount()), RemoteVolumeCount: uint64(t.GetRemoteVolumeCount()), + SsdVolumeCount: uint64(t.GetSsdVolumeCount()), } for _, c := range t.Children() { dc := c.(*DataCenter) diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go index 2fe381ca2..92dcb9a22 100644 --- a/weed/topology/topology_test.go +++ b/weed/topology/topology_test.go @@ -27,7 +27,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { dc := topo.GetOrCreateDataCenter("dc1") rack := dc.GetOrCreateRack("rack1") - dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25) + dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25, 12) { volumeCount := 7 @@ -48,10 +48,28 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { volumeMessages = append(volumeMessages, volumeMessage) } + for k := 1; k <= volumeCount; k++ { + volumeMessage := &master_pb.VolumeInformationMessage{ + Id: uint32(volumeCount + k), + Size: uint64(25432), + Collection: "", + FileCount: uint64(2343), + DeleteCount: uint64(345), + DeletedByteCount: 34524, + ReadOnly: false, + ReplicaPlacement: uint32(0), + Version: uint32(needle.CurrentVersion), + Ttl: 0, + DiskType: "ssd", + } + volumeMessages = append(volumeMessages, volumeMessage) + } + topo.SyncDataNodeRegistration(volumeMessages, dn) - assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount) + assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount*2) assert(t, "volumeCount", int(topo.volumeCount), volumeCount) + assert(t, "ssdVolumeCount", int(topo.ssdVolumeCount), volumeCount) } { @@ -96,7 +114,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { nil, dn) rp, _ := super_block.NewReplicaPlacementFromString("000") - layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL) + layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL, storage.HardDriveType) assert(t, "writables after repeated add", len(layout.writables), volumeCount) assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount) @@ -115,7 +133,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { nil, dn) - for vid, _ := range layout.vid2location { + for vid := range layout.vid2location { println("after add volume id", vid) } for _, vid := range layout.writables { @@ -144,12 +162,13 @@ func TestAddRemoveVolume(t *testing.T) { dc := topo.GetOrCreateDataCenter("dc1") rack := dc.GetOrCreateRack("rack1") - dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25) + dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25, 12) v := storage.VolumeInfo{ Id: needle.VolumeId(1), Size: 100, Collection: "xcollection", + DiskType: "ssd", FileCount: 123, DeleteCount: 23, DeletedByteCount: 45, diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 58b5702bf..b2f1fe3d5 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -27,6 +27,7 @@ type VolumeGrowOption struct { Collection string ReplicaPlacement *super_block.ReplicaPlacement Ttl *needle.TTL + DiskType storage.DiskType Prealloacte int64 DataCenter string Rack string diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index 0db95d289..ba30eca49 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -103,6 +103,7 @@ func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState { type VolumeLayout struct { rp *super_block.ReplicaPlacement ttl *needle.TTL + diskType storage.DiskType vid2location map[needle.VolumeId]*VolumeLocationList writables []needle.VolumeId // transient array of writable volume id readonlyVolumes *volumesBinaryState // readonly volumes @@ -118,10 +119,11 @@ type VolumeLayoutStats struct { FileCount uint64 } -func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout { +func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout { return &VolumeLayout{ rp: rp, ttl: ttl, + diskType: diskType, vid2location: make(map[needle.VolumeId]*VolumeLocationList), writables: *new([]needle.VolumeId), readonlyVolumes: NewVolumesBinaryState(readOnlyState, rp, ExistCopies()), |
