diff options
| author | Chris Lu <chris.lu@gmail.com> | 2020-12-13 11:59:32 -0800 |
|---|---|---|
| committer | Chris Lu <chris.lu@gmail.com> | 2020-12-13 11:59:32 -0800 |
| commit | 0d2ec832e2ced90371f2c5549b175d5b93becd1a (patch) | |
| tree | 8f4fddcc88491331b0e395c1606b8cb9d6d00513 /weed/topology | |
| parent | 715b199eeb6fbd3f028b99e42097096fdcac5e09 (diff) | |
| download | seaweedfs-0d2ec832e2ced90371f2c5549b175d5b93becd1a.tar.xz seaweedfs-0d2ec832e2ced90371f2c5549b175d5b93becd1a.zip | |
rename from volumeType to diskType
Diffstat (limited to 'weed/topology')
| -rw-r--r-- | weed/topology/collection.go | 14 | ||||
| -rw-r--r-- | weed/topology/data_node.go | 6 | ||||
| -rw-r--r-- | weed/topology/topology.go | 26 | ||||
| -rw-r--r-- | weed/topology/topology_event_handling.go | 8 | ||||
| -rw-r--r-- | weed/topology/topology_test.go | 4 | ||||
| -rw-r--r-- | weed/topology/volume_growth.go | 2 | ||||
| -rw-r--r-- | weed/topology/volume_layout.go | 6 |
7 files changed, 33 insertions, 33 deletions
diff --git a/weed/topology/collection.go b/weed/topology/collection.go index d2bdeab62..0c5dc5db7 100644 --- a/weed/topology/collection.go +++ b/weed/topology/collection.go @@ -30,27 +30,27 @@ func (c *Collection) String() string { return fmt.Sprintf("Name:%s, volumeSizeLimit:%d, storageType2VolumeLayout:%v", c.Name, c.volumeSizeLimit, c.storageType2VolumeLayout) } -func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeType storage.VolumeType) *VolumeLayout { +func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) *VolumeLayout { keyString := rp.String() if ttl != nil { keyString += ttl.String() } - if volumeType != storage.HardDriveType { - keyString += string(volumeType) + if diskType != storage.HardDriveType { + keyString += string(diskType) } vl := c.storageType2VolumeLayout.Get(keyString, func() interface{} { - return NewVolumeLayout(rp, ttl, volumeType, c.volumeSizeLimit, c.replicationAsMin) + return NewVolumeLayout(rp, ttl, diskType, c.volumeSizeLimit, c.replicationAsMin) }) return vl.(*VolumeLayout) } -func (c *Collection) DeleteVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeType storage.VolumeType) { +func (c *Collection) DeleteVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) { keyString := rp.String() if ttl != nil { keyString += ttl.String() } - if volumeType != storage.HardDriveType { - keyString += string(volumeType) + if diskType != storage.HardDriveType { + keyString += string(diskType) } c.storageType2VolumeLayout.Delete(keyString) } diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go index 0ea234202..400e91455 100644 --- a/weed/topology/data_node.go +++ b/weed/topology/data_node.go @@ -50,7 +50,7 @@ func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) { if oldV, ok := dn.volumes[v.Id]; !ok { dn.volumes[v.Id] = v - if v.VolumeType == storage.SsdType { + if v.DiskType == storage.SsdType { dn.UpAdjustSsdVolumeCountDelta(1) } else { dn.UpAdjustVolumeCountDelta(1) @@ -93,7 +93,7 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume glog.V(0).Infoln("Deleting volume id:", vid) delete(dn.volumes, vid) deletedVolumes = append(deletedVolumes, v) - if v.VolumeType == storage.SsdType { + if v.DiskType == storage.SsdType { dn.UpAdjustSsdVolumeCountDelta(-1) } else { dn.UpAdjustVolumeCountDelta(-1) @@ -124,7 +124,7 @@ func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.Volu for _, v := range deletedVolumes { delete(dn.volumes, v.Id) - if v.VolumeType == storage.SsdType { + if v.DiskType == storage.SsdType { dn.UpAdjustSsdVolumeCountDelta(-1) } else { dn.UpAdjustVolumeCountDelta(-1) diff --git a/weed/topology/topology.go b/weed/topology/topology.go index f5d335be5..486394f7a 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -121,12 +121,12 @@ func (t *Topology) NextVolumeId() (needle.VolumeId, error) { } func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool { - vl := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.VolumeType) + vl := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType) return vl.GetActiveVolumeCount(option) > 0 } func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) { - vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.VolumeType).PickForWrite(count, option) + vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType).PickForWrite(count, option) if err != nil { return "", 0, nil, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err) } @@ -137,10 +137,10 @@ func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil } -func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeType storage.VolumeType) *VolumeLayout { +func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) *VolumeLayout { return t.collectionMap.Get(collectionName, func() interface{} { return NewCollection(collectionName, t.volumeSizeLimit, t.replicationAsMin) - }).(*Collection).GetOrCreateVolumeLayout(rp, ttl, volumeType) + }).(*Collection).GetOrCreateVolumeLayout(rp, ttl, diskType) } func (t *Topology) ListCollections(includeNormalVolumes, includeEcVolumes bool) (ret []string) { @@ -176,30 +176,30 @@ func (t *Topology) DeleteCollection(collectionName string) { t.collectionMap.Delete(collectionName) } -func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeType storage.VolumeType) { +func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) { collection, found := t.FindCollection(collectionName) if !found { return } - collection.DeleteVolumeLayout(rp, ttl, volumeType) + collection.DeleteVolumeLayout(rp, ttl, diskType) if len(collection.storageType2VolumeLayout.Items()) == 0 { t.DeleteCollection(collectionName) } } func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { - volumeType, _ := storage.ToVolumeType(v.VolumeType) - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, volumeType) + diskType, _ := storage.ToDiskType(v.DiskType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) vl.RegisterVolume(&v, dn) vl.EnsureCorrectWritables(&v) } func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { glog.Infof("removing volume info: %+v", v) - volumeType, _ := storage.ToVolumeType(v.VolumeType) - volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, volumeType) + diskType, _ := storage.ToDiskType(v.DiskType) + volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) volumeLayout.UnRegisterVolume(&v, dn) if volumeLayout.isEmpty() { - t.DeleteLayout(v.Collection, v.ReplicaPlacement, v.Ttl, volumeType) + t.DeleteLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) } } @@ -235,8 +235,8 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati t.UnRegisterVolumeLayout(v, dn) } for _, v := range changedVolumes { - volumeType, _ := storage.ToVolumeType(v.VolumeType) - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, volumeType) + diskType, _ := storage.ToDiskType(v.DiskType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) vl.EnsureCorrectWritables(&v) } return diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index 076900069..5b9facc14 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -37,8 +37,8 @@ func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, g }() } func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool { - volumeType, _ := storage.ToVolumeType(volumeInfo.VolumeType) - vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, volumeType) + diskType, _ := storage.ToDiskType(volumeInfo.DiskType) + vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, diskType) if !vl.SetVolumeCapacityFull(volumeInfo.Id) { return false } @@ -56,8 +56,8 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool { func (t *Topology) UnRegisterDataNode(dn *DataNode) { for _, v := range dn.GetVolumes() { glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id()) - volumeType, _ := storage.ToVolumeType(v.VolumeType) - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, volumeType) + diskType, _ := storage.ToDiskType(v.DiskType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) vl.SetVolumeUnavailable(dn, v.Id) } dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount()) diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go index 07dc22681..92dcb9a22 100644 --- a/weed/topology/topology_test.go +++ b/weed/topology/topology_test.go @@ -60,7 +60,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { ReplicaPlacement: uint32(0), Version: uint32(needle.CurrentVersion), Ttl: 0, - VolumeType: "ssd", + DiskType: "ssd", } volumeMessages = append(volumeMessages, volumeMessage) } @@ -168,7 +168,7 @@ func TestAddRemoveVolume(t *testing.T) { Id: needle.VolumeId(1), Size: 100, Collection: "xcollection", - VolumeType: "ssd", + DiskType: "ssd", FileCount: 123, DeleteCount: 23, DeletedByteCount: 45, diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 7a0f3beb6..b2f1fe3d5 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -27,7 +27,7 @@ type VolumeGrowOption struct { Collection string ReplicaPlacement *super_block.ReplicaPlacement Ttl *needle.TTL - VolumeType storage.VolumeType + DiskType storage.DiskType Prealloacte int64 DataCenter string Rack string diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index 3d391cd84..ba30eca49 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -103,7 +103,7 @@ func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState { type VolumeLayout struct { rp *super_block.ReplicaPlacement ttl *needle.TTL - volumeType storage.VolumeType + diskType storage.DiskType vid2location map[needle.VolumeId]*VolumeLocationList writables []needle.VolumeId // transient array of writable volume id readonlyVolumes *volumesBinaryState // readonly volumes @@ -119,11 +119,11 @@ type VolumeLayoutStats struct { FileCount uint64 } -func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeType storage.VolumeType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout { +func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout { return &VolumeLayout{ rp: rp, ttl: ttl, - volumeType: volumeType, + diskType: diskType, vid2location: make(map[needle.VolumeId]*VolumeLocationList), writables: *new([]needle.VolumeId), readonlyVolumes: NewVolumesBinaryState(readOnlyState, rp, ExistCopies()), |
