aboutsummaryrefslogtreecommitdiff
path: root/weed/shell
diff options
context:
space:
mode:
Diffstat (limited to 'weed/shell')
-rw-r--r--weed/shell/command_collection_delete.go34
-rw-r--r--weed/shell/command_collection_list.go5
-rw-r--r--weed/shell/command_ec_balance.go116
-rw-r--r--weed/shell/command_ec_common.go140
-rw-r--r--weed/shell/command_ec_decode.go68
-rw-r--r--weed/shell/command_ec_encode.go75
-rw-r--r--weed/shell/command_ec_rebuild.go59
-rw-r--r--weed/shell/command_ec_test.go6
-rw-r--r--weed/shell/command_fs_cat.go21
-rw-r--r--weed/shell/command_fs_cd.go19
-rw-r--r--weed/shell/command_fs_configure.go129
-rw-r--r--weed/shell/command_fs_du.go66
-rw-r--r--weed/shell/command_fs_lock_unlock.go55
-rw-r--r--weed/shell/command_fs_ls.go28
-rw-r--r--weed/shell/command_fs_meta_cat.go32
-rw-r--r--weed/shell/command_fs_meta_load.go22
-rw-r--r--weed/shell/command_fs_meta_notify.go14
-rw-r--r--weed/shell/command_fs_meta_save.go131
-rw-r--r--weed/shell/command_fs_mv.go27
-rw-r--r--weed/shell/command_fs_pwd.go6
-rw-r--r--weed/shell/command_fs_tree.go29
-rw-r--r--weed/shell/command_s3_bucket_create.go85
-rw-r--r--weed/shell/command_s3_bucket_delete.go54
-rw-r--r--weed/shell/command_s3_bucket_list.go78
-rw-r--r--weed/shell/command_s3_clean_uploads.go92
-rw-r--r--weed/shell/command_s3_configure.go183
-rw-r--r--weed/shell/command_volume_balance.go326
-rw-r--r--weed/shell/command_volume_balance_test.go183
-rw-r--r--weed/shell/command_volume_check_disk.go258
-rw-r--r--weed/shell/command_volume_configure_replication.go107
-rw-r--r--weed/shell/command_volume_copy.go26
-rw-r--r--weed/shell/command_volume_delete.go26
-rw-r--r--weed/shell/command_volume_fix_replication.go369
-rw-r--r--weed/shell/command_volume_fix_replication_test.go273
-rw-r--r--weed/shell/command_volume_fsck.go372
-rw-r--r--weed/shell/command_volume_list.go58
-rw-r--r--weed/shell/command_volume_list_test.go893
-rw-r--r--weed/shell/command_volume_mark.go55
-rw-r--r--weed/shell/command_volume_mount.go29
-rw-r--r--weed/shell/command_volume_move.go104
-rw-r--r--weed/shell/command_volume_server_evacuate.go221
-rw-r--r--weed/shell/command_volume_server_evacuate_test.go17
-rw-r--r--weed/shell/command_volume_server_leave.go67
-rw-r--r--weed/shell/command_volume_tier_download.go33
-rw-r--r--weed/shell/command_volume_tier_move.go177
-rw-r--r--weed/shell/command_volume_tier_upload.go23
-rw-r--r--weed/shell/command_volume_unmount.go29
-rw-r--r--weed/shell/command_volume_vacuum.go53
-rw-r--r--weed/shell/commands.go79
-rw-r--r--weed/shell/shell_liner.go81
50 files changed, 4645 insertions, 788 deletions
diff --git a/weed/shell/command_collection_delete.go b/weed/shell/command_collection_delete.go
index fbaddcd51..e43f2a093 100644
--- a/weed/shell/command_collection_delete.go
+++ b/weed/shell/command_collection_delete.go
@@ -2,6 +2,7 @@ package shell
import (
"context"
+ "flag"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"io"
@@ -21,23 +22,40 @@ func (c *commandCollectionDelete) Name() string {
func (c *commandCollectionDelete) Help() string {
return `delete specified collection
- collection.delete <collection_name>
+ collection.delete -collection <collection_name> -force
`
}
func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- if len(args) == 0 {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ colDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ collectionName := colDeleteCommand.String("collection", "", "collection to delete. Use '_default_' for the empty-named collection.")
+ applyBalancing := colDeleteCommand.Bool("force", false, "apply the collection")
+ if err = colDeleteCommand.Parse(args); err != nil {
return nil
}
- collectionName := args[0]
+ if *collectionName == "" {
+ return fmt.Errorf("empty collection name is not allowed")
+ }
+
+ if *collectionName == "_default_" {
+ *collectionName = ""
+ }
+
+ if !*applyBalancing {
+ fmt.Fprintf(writer, "collection '%s' will be deleted. Use -force to apply the change.\n", *collectionName)
+ return nil
+ }
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- _, err = client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{
- Name: collectionName,
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err = client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
+ Name: *collectionName,
})
return err
})
@@ -45,7 +63,7 @@ func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writ
return
}
- fmt.Fprintf(writer, "collection %s is deleted.\n", collectionName)
+ fmt.Fprintf(writer, "collection %s is deleted.\n", *collectionName)
return nil
}
diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go
index c4325c66f..2a114e61b 100644
--- a/weed/shell/command_collection_list.go
+++ b/weed/shell/command_collection_list.go
@@ -41,9 +41,8 @@ func (c *commandCollectionList) Do(args []string, commandEnv *CommandEnv, writer
func ListCollectionNames(commandEnv *CommandEnv, includeNormalVolumes, includeEcVolumes bool) (collections []string, err error) {
var resp *master_pb.CollectionListResponse
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.CollectionList(ctx, &master_pb.CollectionListRequest{
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.CollectionList(context.Background(), &master_pb.CollectionListRequest{
IncludeNormalVolumes: includeNormalVolumes,
IncludeEcVolumes: includeEcVolumes,
})
diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go
index 96599372e..b1ca926d5 100644
--- a/weed/shell/command_ec_balance.go
+++ b/weed/shell/command_ec_balance.go
@@ -1,9 +1,9 @@
package shell
import (
- "context"
"flag"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"io"
"sort"
@@ -29,7 +29,7 @@ func (c *commandEcBalance) Help() string {
Algorithm:
- For each type of volume server (different max volume count limit){
+ func EcBalance() {
for each collection:
balanceEcVolumes(collectionName)
for each rack:
@@ -99,6 +99,10 @@ func (c *commandEcBalance) Help() string {
func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
@@ -107,10 +111,8 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W
return nil
}
- ctx := context.Background()
-
// collect all ec nodes
- allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, *dc)
+ allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, *dc)
if err != nil {
return err
}
@@ -138,7 +140,7 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W
}
}
- if err := balanceEcRacks(ctx, commandEnv, racks, *applyBalancing); err != nil {
+ if err := balanceEcRacks(commandEnv, racks, *applyBalancing); err != nil {
return fmt.Errorf("balance ec racks: %v", err)
}
@@ -162,38 +164,36 @@ func collectRacks(allEcNodes []*EcNode) map[RackId]*EcRack {
func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error {
- ctx := context.Background()
-
fmt.Printf("balanceEcVolumes %s\n", collection)
- if err := deleteDuplicatedEcShards(ctx, commandEnv, allEcNodes, collection, applyBalancing); err != nil {
+ if err := deleteDuplicatedEcShards(commandEnv, allEcNodes, collection, applyBalancing); err != nil {
return fmt.Errorf("delete duplicated collection %s ec shards: %v", collection, err)
}
- if err := balanceEcShardsAcrossRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil {
+ if err := balanceEcShardsAcrossRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil {
return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err)
}
- if err := balanceEcShardsWithinRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil {
- return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err)
+ if err := balanceEcShardsWithinRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil {
+ return fmt.Errorf("balance within racks collection %s ec shards: %v", collection, err)
}
return nil
}
-func deleteDuplicatedEcShards(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error {
+func deleteDuplicatedEcShards(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error {
// vid => []ecNode
vidLocations := collectVolumeIdToEcNodes(allEcNodes)
// deduplicate ec shards
for vid, locations := range vidLocations {
- if err := doDeduplicateEcShards(ctx, commandEnv, collection, vid, locations, applyBalancing); err != nil {
+ if err := doDeduplicateEcShards(commandEnv, collection, vid, locations, applyBalancing); err != nil {
return err
}
}
return nil
}
-func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error {
+func doDeduplicateEcShards(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error {
// check whether this volume has ecNodes that are over average
shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount)
@@ -215,10 +215,10 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti
duplicatedShardIds := []uint32{uint32(shardId)}
for _, ecNode := range ecNodes[1:] {
- if err := unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
+ if err := unmountEcShards(commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
return err
}
- if err := sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
+ if err := sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
return err
}
ecNode.deleteEcVolumeShards(vid, duplicatedShardIds)
@@ -227,19 +227,19 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti
return nil
}
-func balanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error {
+func balanceEcShardsAcrossRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error {
// collect vid => []ecNode, since previous steps can change the locations
vidLocations := collectVolumeIdToEcNodes(allEcNodes)
// spread the ec shards evenly
for vid, locations := range vidLocations {
- if err := doBalanceEcShardsAcrossRacks(ctx, commandEnv, collection, vid, locations, racks, applyBalancing); err != nil {
+ if err := doBalanceEcShardsAcrossRacks(commandEnv, collection, vid, locations, racks, applyBalancing); err != nil {
return err
}
}
return nil
}
-func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error {
+func doBalanceEcShardsAcrossRacks(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error {
// calculate average number of shards an ec rack should have for one volume
averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks))
@@ -274,7 +274,7 @@ func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, c
for _, n := range racks[rackId].ecNodes {
possibleDestinationEcNodes = append(possibleDestinationEcNodes, n)
}
- err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
+ err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
if err != nil {
return err
}
@@ -306,7 +306,7 @@ func pickOneRack(rackToEcNodes map[RackId]*EcRack, rackToShardCount map[string]i
return ""
}
-func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error {
+func balanceEcShardsWithinRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error {
// collect vid => []ecNode, since previous steps can change the locations
vidLocations := collectVolumeIdToEcNodes(allEcNodes)
@@ -326,11 +326,13 @@ func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, all
var possibleDestinationEcNodes []*EcNode
for _, n := range racks[RackId(rackId)].ecNodes {
- possibleDestinationEcNodes = append(possibleDestinationEcNodes, n)
+ if _, found := n.info.DiskInfos[string(types.HardDriveType)]; found {
+ possibleDestinationEcNodes = append(possibleDestinationEcNodes, n)
+ }
}
sourceEcNodes := rackEcNodesWithVid[rackId]
averageShardsPerEcNode := ceilDivide(rackToShardCount[rackId], len(possibleDestinationEcNodes))
- if err := doBalanceEcShardsWithinOneRack(ctx, commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil {
+ if err := doBalanceEcShardsWithinOneRack(commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil {
return err
}
}
@@ -338,7 +340,7 @@ func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, all
return nil
}
-func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
+func doBalanceEcShardsWithinOneRack(commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
for _, ecNode := range existingLocations {
@@ -353,7 +355,7 @@ func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv,
fmt.Printf("%s has %d overlimit, moving ec shard %d.%d\n", ecNode.info.Id, overLimitCount, vid, shardId)
- err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
+ err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
if err != nil {
return err
}
@@ -365,18 +367,18 @@ func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv,
return nil
}
-func balanceEcRacks(ctx context.Context, commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error {
+func balanceEcRacks(commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error {
// balance one rack for all ec shards
for _, ecRack := range racks {
- if err := doBalanceEcRack(ctx, commandEnv, ecRack, applyBalancing); err != nil {
+ if err := doBalanceEcRack(commandEnv, ecRack, applyBalancing); err != nil {
return err
}
}
return nil
}
-func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error {
+func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error {
if len(ecRack.ecNodes) <= 1 {
return nil
@@ -387,11 +389,15 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack
rackEcNodes = append(rackEcNodes, node)
}
- ecNodeIdToShardCount := groupByCount(rackEcNodes, func(node *EcNode) (id string, count int) {
- for _, ecShardInfo := range node.info.EcShardInfos {
+ ecNodeIdToShardCount := groupByCount(rackEcNodes, func(ecNode *EcNode) (id string, count int) {
+ diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
+ if !found {
+ return
+ }
+ for _, ecShardInfo := range diskInfo.EcShardInfos {
count += erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIdCount()
}
- return node.info.Id, count
+ return ecNode.info.Id, count
})
var totalShardCount int
@@ -412,26 +418,30 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack
if fullNodeShardCount > averageShardCount && emptyNodeShardCount+1 <= averageShardCount {
emptyNodeIds := make(map[uint32]bool)
- for _, shards := range emptyNode.info.EcShardInfos {
- emptyNodeIds[shards.Id] = true
+ if emptyDiskInfo, found := emptyNode.info.DiskInfos[string(types.HardDriveType)]; found {
+ for _, shards := range emptyDiskInfo.EcShardInfos {
+ emptyNodeIds[shards.Id] = true
+ }
}
- for _, shards := range fullNode.info.EcShardInfos {
- if _, found := emptyNodeIds[shards.Id]; !found {
- for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() {
-
- fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id)
-
- err := moveMountedShardToEcNode(ctx, commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing)
- if err != nil {
- return err
+ if fullDiskInfo, found := fullNode.info.DiskInfos[string(types.HardDriveType)]; found {
+ for _, shards := range fullDiskInfo.EcShardInfos {
+ if _, found := emptyNodeIds[shards.Id]; !found {
+ for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() {
+
+ fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id)
+
+ err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing)
+ if err != nil {
+ return err
+ }
+
+ ecNodeIdToShardCount[emptyNode.info.Id]++
+ ecNodeIdToShardCount[fullNode.info.Id]--
+ hasMove = true
+ break
}
-
- ecNodeIdToShardCount[emptyNode.info.Id]++
- ecNodeIdToShardCount[fullNode.info.Id]--
- hasMove = true
break
}
- break
}
}
}
@@ -440,7 +450,7 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack
return nil
}
-func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
+func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes)
@@ -458,7 +468,7 @@ func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, a
fmt.Printf("%s moves ec shard %d.%d to %s\n", existingLocation.info.Id, vid, shardId, destEcNode.info.Id)
- err := moveMountedShardToEcNode(ctx, commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing)
+ err := moveMountedShardToEcNode(commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing)
if err != nil {
return err
}
@@ -512,7 +522,11 @@ func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[
func collectVolumeIdToEcNodes(allEcNodes []*EcNode) map[needle.VolumeId][]*EcNode {
vidLocations := make(map[needle.VolumeId][]*EcNode)
for _, ecNode := range allEcNodes {
- for _, shardInfo := range ecNode.info.EcShardInfos {
+ diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
+ if !found {
+ continue
+ }
+ for _, shardInfo := range diskInfo.EcShardInfos {
vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode)
}
}
diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go
index 2beed4742..fd35bb14b 100644
--- a/weed/shell/command_ec_common.go
+++ b/weed/shell/command_ec_common.go
@@ -3,6 +3,7 @@ package shell
import (
"context"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"math"
"sort"
@@ -15,26 +16,26 @@ import (
"google.golang.org/grpc"
)
-func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) {
+func moveMountedShardToEcNode(commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) {
copiedShardIds := []uint32{uint32(shardId)}
if applyBalancing {
// ask destination node to copy shard and the ecx file from source node, and mount it
- copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id)
+ copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id)
if err != nil {
return err
}
// unmount the to be deleted shards
- err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds)
+ err = unmountEcShards(commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds)
if err != nil {
return err
}
// ask source node to delete the shard, and maybe the ecx file
- err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds)
+ err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds)
if err != nil {
return err
}
@@ -50,7 +51,7 @@ func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, exist
}
-func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption,
+func oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption,
targetServer *EcNode, shardIdsToCopy []uint32,
volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) {
@@ -61,7 +62,7 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption
if targetServer.info.Id != existingLocation {
fmt.Printf("copy %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)
- _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
+ _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: shardIdsToCopy,
@@ -76,7 +77,7 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption
}
fmt.Printf("mount %d.%v on %s\n", volumeId, shardIdsToCopy, targetServer.info.Id)
- _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
+ _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: shardIdsToCopy,
@@ -159,8 +160,15 @@ func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (cou
return
}
-func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) {
- return int(dn.MaxVolumeCount-dn.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(dn.EcShardInfos)
+func countFreeShardSlots(dn *master_pb.DataNodeInfo, diskType types.DiskType) (count int) {
+ if dn.DiskInfos == nil {
+ return 0
+ }
+ diskInfo := dn.DiskInfos[string(diskType)]
+ if diskInfo == nil {
+ return 0
+ }
+ return int(diskInfo.MaxVolumeCount-diskInfo.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(diskInfo.EcShardInfos)
}
type RackId string
@@ -173,30 +181,47 @@ type EcNode struct {
freeEcSlot int
}
+func (ecNode *EcNode) localShardIdCount(vid uint32) int {
+ for _, diskInfo := range ecNode.info.DiskInfos {
+ for _, ecShardInfo := range diskInfo.EcShardInfos {
+ if vid == ecShardInfo.Id {
+ shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
+ return shardBits.ShardIdCount()
+ }
+ }
+ }
+ return 0
+}
+
type EcRack struct {
ecNodes map[EcNodeId]*EcNode
freeEcSlot int
}
-func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) {
+func collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) {
// list all possible locations
- var resp *master_pb.VolumeListResponse
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
- return err
- })
+ // collect topology information
+ topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
- return nil, 0, err
+ return
}
// find out all volume servers with one slot left.
- eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ ecNodes, totalFreeEcSlots = collectEcVolumeServersByDc(topologyInfo, selectedDataCenter)
+
+ sortEcNodesByFreeslotsDecending(ecNodes)
+
+ return
+}
+
+func collectEcVolumeServersByDc(topo *master_pb.TopologyInfo, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int) {
+ eachDataNode(topo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
if selectedDataCenter != "" && selectedDataCenter != dc {
return
}
- freeEcSlots := countFreeShardSlots(dn)
+ freeEcSlots := countFreeShardSlots(dn, types.HardDriveType)
ecNodes = append(ecNodes, &EcNode{
info: dn,
dc: dc,
@@ -205,19 +230,15 @@ func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCen
})
totalFreeEcSlots += freeEcSlots
})
-
- sortEcNodesByFreeslotsDecending(ecNodes)
-
return
}
-func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
- collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error {
+func sourceServerDeleteEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error {
fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation)
return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
+ _, deleteErr := volumeServerClient.VolumeEcShardsDelete(context.Background(), &volume_server_pb.VolumeEcShardsDeleteRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: toBeDeletedShardIds,
@@ -227,13 +248,12 @@ func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOpt
}
-func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
- volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error {
+func unmountEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error {
fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation)
return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{
+ _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(context.Background(), &volume_server_pb.VolumeEcShardsUnmountRequest{
VolumeId: uint32(volumeId),
ShardIds: toBeUnmountedhardIds,
})
@@ -241,13 +261,12 @@ func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
})
}
-func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
- collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error {
+func mountEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error {
fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation)
return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
+ _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: toBeMountedhardIds,
@@ -256,15 +275,21 @@ func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
})
}
+func divide(total, n int) float64 {
+ return float64(total) / float64(n)
+}
+
func ceilDivide(total, n int) int {
return int(math.Ceil(float64(total) / float64(n)))
}
func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits {
- for _, shardInfo := range ecNode.info.EcShardInfos {
- if needle.VolumeId(shardInfo.Id) == vid {
- return erasure_coding.ShardBits(shardInfo.EcIndexBits)
+ if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {
+ for _, shardInfo := range diskInfo.EcShardInfos {
+ if needle.VolumeId(shardInfo.Id) == vid {
+ return erasure_coding.ShardBits(shardInfo.EcIndexBits)
+ }
}
}
@@ -274,18 +299,26 @@ func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.Shar
func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode {
foundVolume := false
- for _, shardInfo := range ecNode.info.EcShardInfos {
- if needle.VolumeId(shardInfo.Id) == vid {
- oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
- newShardBits := oldShardBits
- for _, shardId := range shardIds {
- newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
+ diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
+ if found {
+ for _, shardInfo := range diskInfo.EcShardInfos {
+ if needle.VolumeId(shardInfo.Id) == vid {
+ oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
+ newShardBits := oldShardBits
+ for _, shardId := range shardIds {
+ newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
+ }
+ shardInfo.EcIndexBits = uint32(newShardBits)
+ ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
+ foundVolume = true
+ break
}
- shardInfo.EcIndexBits = uint32(newShardBits)
- ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
- foundVolume = true
- break
}
+ } else {
+ diskInfo = &master_pb.DiskInfo{
+ Type: string(types.HardDriveType),
+ }
+ ecNode.info.DiskInfos[string(types.HardDriveType)] = diskInfo
}
if !foundVolume {
@@ -293,10 +326,11 @@ func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string,
for _, shardId := range shardIds {
newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
}
- ecNode.info.EcShardInfos = append(ecNode.info.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{
+ diskInfo.EcShardInfos = append(diskInfo.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{
Id: uint32(vid),
Collection: collection,
EcIndexBits: uint32(newShardBits),
+ DiskType: string(types.HardDriveType),
})
ecNode.freeEcSlot -= len(shardIds)
}
@@ -306,15 +340,17 @@ func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string,
func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode {
- for _, shardInfo := range ecNode.info.EcShardInfos {
- if needle.VolumeId(shardInfo.Id) == vid {
- oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
- newShardBits := oldShardBits
- for _, shardId := range shardIds {
- newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId))
+ if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {
+ for _, shardInfo := range diskInfo.EcShardInfos {
+ if needle.VolumeId(shardInfo.Id) == vid {
+ oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
+ newShardBits := oldShardBits
+ for _, shardId := range shardIds {
+ newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId))
+ }
+ shardInfo.EcIndexBits = uint32(newShardBits)
+ ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
}
- shardInfo.EcIndexBits = uint32(newShardBits)
- ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
}
}
diff --git a/weed/shell/command_ec_decode.go b/weed/shell/command_ec_decode.go
index 1f9ad2ff9..dafdb041a 100644
--- a/weed/shell/command_ec_decode.go
+++ b/weed/shell/command_ec_decode.go
@@ -4,6 +4,7 @@ import (
"context"
"flag"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"io"
"google.golang.org/grpc"
@@ -36,6 +37,10 @@ func (c *commandEcDecode) Help() string {
func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
collection := encodeCommand.String("collection", "", "the collection name")
@@ -43,25 +48,24 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return nil
}
- ctx := context.Background()
vid := needle.VolumeId(*volumeId)
// collect topology information
- topologyInfo, err := collectTopologyInfo(ctx, commandEnv)
+ topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
// volumeId is provided
if vid != 0 {
- return doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid)
+ return doEcDecode(commandEnv, topologyInfo, *collection, vid)
}
// apply to all volumes in the collection
volumeIds := collectEcShardIds(topologyInfo, *collection)
fmt.Printf("ec encode volumes: %v\n", volumeIds)
for _, vid := range volumeIds {
- if err = doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid); err != nil {
+ if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil {
return err
}
}
@@ -69,26 +73,26 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return nil
}
-func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {
+func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {
// find volume location
nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid)
fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits)
// collect ec shards to the server with most space
- targetNodeLocation, err := collectEcShards(ctx, commandEnv, nodeToEcIndexBits, collection, vid)
+ targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid)
if err != nil {
return fmt.Errorf("collectEcShards for volume %d: %v", vid, err)
}
// generate a normal volume
- err = generateNormalVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation)
+ err = generateNormalVolume(commandEnv.option.GrpcDialOption, vid, collection, targetNodeLocation)
if err != nil {
return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err)
}
// delete the previous ec shards
- err = mountVolumeAndDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)
+ err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)
if err != nil {
return fmt.Errorf("delete ec shards for volume %d: %v", vid, err)
}
@@ -96,11 +100,11 @@ func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb
return nil
}
-func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error {
+func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error {
// mount volume
if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{
+ _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
VolumeId: uint32(vid),
})
return mountErr
@@ -111,7 +115,7 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO
// unmount ec shards
for location, ecIndexBits := range nodeToEcIndexBits {
fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
- err := unmountEcShards(ctx, grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())
+ err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())
if err != nil {
return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err)
}
@@ -119,7 +123,7 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO
// delete ec shards
for location, ecIndexBits := range nodeToEcIndexBits {
fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
- err := sourceServerDeleteEcShards(ctx, grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())
+ err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())
if err != nil {
return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err)
}
@@ -128,12 +132,12 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO
return nil
}
-func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error {
+func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error {
fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer)
err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, genErr := volumeServerClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
+ _, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{
VolumeId: uint32(vid),
Collection: collection,
})
@@ -144,7 +148,7 @@ func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, v
}
-func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) {
+func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) {
maxShardCount := 0
var exisitngEcIndexBits erasure_coding.ShardBits
@@ -174,7 +178,7 @@ func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexB
fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation)
- _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
+ _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: uint32(vid),
Collection: collection,
ShardIds: needToCopyEcIndexBits.ToUint32Slice(),
@@ -204,27 +208,29 @@ func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexB
}
-func collectTopologyInfo(ctx context.Context, commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) {
+func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, err error) {
var resp *master_pb.VolumeListResponse
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
if err != nil {
return
}
- return resp.TopologyInfo, nil
+ return resp.TopologyInfo, resp.VolumeSizeLimitMb, nil
}
func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) {
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
- for _, v := range dn.EcShardInfos {
- if v.Collection == selectedCollection && v.Id == uint32(vid) {
- ecShardInfos = append(ecShardInfos, v)
+ if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
+ for _, v := range diskInfo.EcShardInfos {
+ if v.Collection == selectedCollection && v.Id == uint32(vid) {
+ ecShardInfos = append(ecShardInfos, v)
+ }
}
}
})
@@ -236,9 +242,11 @@ func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection stri
vidMap := make(map[uint32]bool)
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
- for _, v := range dn.EcShardInfos {
- if v.Collection == selectedCollection {
- vidMap[v.Id] = true
+ if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
+ for _, v := range diskInfo.EcShardInfos {
+ if v.Collection == selectedCollection {
+ vidMap[v.Id] = true
+ }
}
}
})
@@ -254,9 +262,11 @@ func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeI
nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits)
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
- for _, v := range dn.EcShardInfos {
- if v.Id == uint32(vid) {
- nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits)
+ if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
+ for _, v := range diskInfo.EcShardInfos {
+ if v.Id == uint32(vid) {
+ nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits)
+ }
}
}
})
diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go
index 58527abf2..634cb11e2 100644
--- a/weed/shell/command_ec_encode.go
+++ b/weed/shell/command_ec_encode.go
@@ -54,6 +54,10 @@ func (c *commandEcEncode) Help() string {
func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
collection := encodeCommand.String("collection", "", "the collection name")
@@ -63,22 +67,21 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return nil
}
- ctx := context.Background()
vid := needle.VolumeId(*volumeId)
// volumeId is provided
if vid != 0 {
- return doEcEncode(ctx, commandEnv, *collection, vid)
+ return doEcEncode(commandEnv, *collection, vid)
}
// apply to all volumes in the collection
- volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod)
+ volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod)
if err != nil {
return err
}
fmt.Printf("ec encode volumes: %v\n", volumeIds)
for _, vid := range volumeIds {
- if err = doEcEncode(ctx, commandEnv, *collection, vid); err != nil {
+ if err = doEcEncode(commandEnv, *collection, vid); err != nil {
return err
}
}
@@ -86,7 +89,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return nil
}
-func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) {
+func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) {
// find volume location
locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
if !found {
@@ -96,19 +99,19 @@ func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string,
// fmt.Printf("found ec %d shards on %v\n", vid, locations)
// mark the volume as readonly
- err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations)
+ err = markVolumeReadonly(commandEnv.option.GrpcDialOption, vid, locations)
if err != nil {
return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err)
}
// generate ec shards
- err = generateEcShards(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url)
+ err = generateEcShards(commandEnv.option.GrpcDialOption, vid, collection, locations[0].Url)
if err != nil {
return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err)
}
// balance the ec shards to current cluster
- err = spreadEcShards(ctx, commandEnv, vid, collection, locations)
+ err = spreadEcShards(commandEnv, vid, collection, locations)
if err != nil {
return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err)
}
@@ -116,12 +119,14 @@ func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string,
return nil
}
-func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error {
+func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error {
for _, location := range locations {
+ fmt.Printf("markVolumeReadonly %d on %s ...\n", volumeId, location.Url)
+
err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, markErr := volumeServerClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{
+ _, markErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{
VolumeId: uint32(volumeId),
})
return markErr
@@ -136,10 +141,12 @@ func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, vol
return nil
}
-func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error {
+func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error {
+
+ fmt.Printf("generateEcShards %s %d on %s ...\n", collection, volumeId, sourceVolumeServer)
err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, genErr := volumeServerClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
+ _, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: uint32(volumeId),
Collection: collection,
})
@@ -150,9 +157,9 @@ func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volum
}
-func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) {
+func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) {
- allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, "")
+ allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "")
if err != nil {
return err
}
@@ -169,26 +176,27 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle
allocatedEcIds := balancedEcDistribution(allocatedDataNodes)
// ask the data nodes to copy from the source volume server
- copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0])
+ copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0])
if err != nil {
return err
}
// unmount the to be deleted shards
- err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds)
+ err = unmountEcShards(commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds)
if err != nil {
return err
}
// ask the source volume server to clean up copied ec shards
- err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds)
+ err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds)
if err != nil {
return fmt.Errorf("source delete copied ecShards %s %d.%v: %v", existingLocations[0].Url, volumeId, copiedShardIds, err)
}
// ask the source volume server to delete the original volume
for _, location := range existingLocations {
- err = deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, location.Url)
+ fmt.Printf("delete volume %d from %s\n", volumeId, location.Url)
+ err = deleteVolume(commandEnv.option.GrpcDialOption, volumeId, location.Url)
if err != nil {
return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, volumeId, err)
}
@@ -198,9 +206,9 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle
}
-func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption,
- targetServers []*EcNode, allocatedEcIds [][]uint32,
- volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) {
+func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) {
+
+ fmt.Printf("parallelCopyEcShardsFromSource %d %s\n", volumeId, existingLocation.Url)
// parallelize
shardIdChan := make(chan []uint32, len(targetServers))
@@ -213,7 +221,7 @@ func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Dia
wg.Add(1)
go func(server *EcNode, allocatedEcShardIds []uint32) {
defer wg.Done()
- copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(ctx, grpcDialOption, server,
+ copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server,
allocatedEcShardIds, volumeId, collection, existingLocation.Url)
if copyErr != nil {
err = copyErr
@@ -255,13 +263,10 @@ func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) {
return allocated
}
-func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
+func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
- var resp *master_pb.VolumeListResponse
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
- return err
- })
+ // collect topology information
+ topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv)
if err != nil {
return
}
@@ -269,14 +274,16 @@ func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, se
quietSeconds := int64(quietPeriod / time.Second)
nowUnixSeconds := time.Now().Unix()
- fmt.Printf("ec encode volumes quiet for: %d seconds\n", quietSeconds)
+ fmt.Printf("collect volumes quiet for: %d seconds\n", quietSeconds)
vidMap := make(map[uint32]bool)
- eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
- for _, v := range dn.VolumeInfos {
- if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds {
- if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 {
- vidMap[v.Id] = true
+ eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ for _, diskInfo := range dn.DiskInfos {
+ for _, v := range diskInfo.VolumeInfos {
+ if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds {
+ if float64(v.Size) > fullPercentage/100*float64(volumeSizeLimitMb)*1024*1024 {
+ vidMap[v.Id] = true
+ }
}
}
}
diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go
index 2e2fca743..8d5d7bb91 100644
--- a/weed/shell/command_ec_rebuild.go
+++ b/weed/shell/command_ec_rebuild.go
@@ -56,6 +56,10 @@ func (c *commandEcRebuild) Help() string {
func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
fixCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collection := fixCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
applyChanges := fixCommand.Bool("force", false, "apply the changes")
@@ -64,7 +68,7 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W
}
// collect all ec nodes
- allEcNodes, _, err := collectEcNodes(context.Background(), commandEnv, "")
+ allEcNodes, _, err := collectEcNodes(commandEnv, "")
if err != nil {
return err
}
@@ -92,8 +96,6 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W
func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error {
- ctx := context.Background()
-
fmt.Printf("rebuildEcVolumes %s\n", collection)
// collect vid => each shard locations, similar to ecShardMap in topology.go
@@ -117,7 +119,7 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s
return fmt.Errorf("disk space is not enough")
}
- if err := rebuildOneEcVolume(ctx, commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil {
+ if err := rebuildOneEcVolume(commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil {
return err
}
}
@@ -125,13 +127,13 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s
return nil
}
-func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error {
+func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error {
fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId)
// collect shard files to rebuilder local disk
var generatedShardIds []uint32
- copiedShardIds, _, err := prepareDataToRecover(ctx, commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges)
+ copiedShardIds, _, err := prepareDataToRecover(commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges)
if err != nil {
return err
}
@@ -139,7 +141,7 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *
// clean up working files
// ask the rebuilder to delete the copied shards
- err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds)
+ err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds)
if err != nil {
fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds)
}
@@ -151,13 +153,13 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *
}
// generate ec shards, and maybe ecx file
- generatedShardIds, err = generateMissingShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id)
+ generatedShardIds, err = generateMissingShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id)
if err != nil {
return err
}
// mount the generated shards
- err = mountEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds)
+ err = mountEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds)
if err != nil {
return err
}
@@ -167,11 +169,10 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *
return nil
}
-func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption,
- collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) {
+func generateMissingShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) {
err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{
+ resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{
VolumeId: uint32(volumeId),
Collection: collection,
})
@@ -183,14 +184,16 @@ func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption,
return
}
-func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) {
+func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) {
needEcxFile := true
var localShardBits erasure_coding.ShardBits
- for _, ecShardInfo := range rebuilder.info.EcShardInfos {
- if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId {
- needEcxFile = false
- localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
+ for _, diskInfo := range rebuilder.info.DiskInfos {
+ for _, ecShardInfo := range diskInfo.EcShardInfos {
+ if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId {
+ needEcxFile = false
+ localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
+ }
}
}
@@ -210,7 +213,7 @@ func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder
var copyErr error
if applyBalancing {
copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
+ _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: []uint32{uint32(shardId)},
@@ -246,15 +249,17 @@ type EcShardMap map[needle.VolumeId]EcShardLocations
type EcShardLocations [][]*EcNode
func (ecShardMap EcShardMap) registerEcNode(ecNode *EcNode, collection string) {
- for _, shardInfo := range ecNode.info.EcShardInfos {
- if shardInfo.Collection == collection {
- existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)]
- if !found {
- existing = make([][]*EcNode, erasure_coding.TotalShardsCount)
- ecShardMap[needle.VolumeId(shardInfo.Id)] = existing
- }
- for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() {
- existing[shardId] = append(existing[shardId], ecNode)
+ for _, diskInfo := range ecNode.info.DiskInfos {
+ for _, shardInfo := range diskInfo.EcShardInfos {
+ if shardInfo.Collection == collection {
+ existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)]
+ if !found {
+ existing = make([][]*EcNode, erasure_coding.TotalShardsCount)
+ ecShardMap[needle.VolumeId(shardInfo.Id)] = existing
+ }
+ for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() {
+ existing[shardId] = append(existing[shardId], ecNode)
+ }
}
}
}
diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go
index c233d25d0..a1226adbb 100644
--- a/weed/shell/command_ec_test.go
+++ b/weed/shell/command_ec_test.go
@@ -1,7 +1,6 @@
package shell
import (
- "context"
"fmt"
"testing"
@@ -121,13 +120,14 @@ func TestCommandEcBalanceVolumeEvenButRackUneven(t *testing.T) {
racks := collectRacks(allEcNodes)
balanceEcVolumes(nil, "c1", allEcNodes, racks, false)
- balanceEcRacks(context.Background(), nil, racks, false)
+ balanceEcRacks(nil, racks, false)
}
func newEcNode(dc string, rack string, dataNodeId string, freeEcSlot int) *EcNode {
return &EcNode{
info: &master_pb.DataNodeInfo{
- Id: dataNodeId,
+ Id: dataNodeId,
+ DiskInfos: make(map[string]*master_pb.DiskInfo),
},
dc: dc,
rack: RackId(rack),
diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go
index 9db36e9d1..df43d93dc 100644
--- a/weed/shell/command_fs_cat.go
+++ b/weed/shell/command_fs_cat.go
@@ -1,13 +1,13 @@
package shell
import (
- "context"
"fmt"
"io"
"math"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -25,39 +25,34 @@ func (c *commandFsCat) Help() string {
return `stream the file content on to the screen
fs.cat /dir/file_name
- fs.cat http://<filer_server>:<port>/dir/file_name
`
}
func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- input := findInputDirectory(args)
-
- filerServer, filerPort, path, err := commandEnv.parseUrl(input)
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
- ctx := context.Background()
-
- if commandEnv.isDirectory(ctx, filerServer, filerPort, path) {
+ if commandEnv.isDirectory(path) {
return fmt.Errorf("%s is a directory", path)
}
- dir, name := filer2.FullPath(path).DirAndName()
+ dir, name := util.FullPath(path).DirAndName()
- return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error {
+ return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Name: name,
Directory: dir,
}
- respLookupEntry, err := client.LookupDirectoryEntry(ctx, request)
+ respLookupEntry, err := filer_pb.LookupEntry(client, request)
if err != nil {
return err
}
- return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt32)
+ return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false)
})
diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go
index 408ec86c8..2cc28f7a2 100644
--- a/weed/shell/command_fs_cd.go
+++ b/weed/shell/command_fs_cd.go
@@ -1,7 +1,6 @@
package shell
import (
- "context"
"io"
)
@@ -17,41 +16,33 @@ func (c *commandFsCd) Name() string {
}
func (c *commandFsCd) Help() string {
- return `change directory to http://<filer_server>:<port>/dir/
+ return `change directory to a directory /path/to/dir
The full path can be too long to type. For example,
- fs.ls http://<filer_server>:<port>/some/path/to/file_name
+ fs.ls /some/path/to/file_name
can be simplified as
- fs.cd http://<filer_server>:<port>/some/path
+ fs.cd /some/path
fs.ls to/file_name
`
}
func (c *commandFsCd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- input := findInputDirectory(args)
-
- filerServer, filerPort, path, err := commandEnv.parseUrl(input)
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
if path == "/" {
- commandEnv.option.FilerHost = filerServer
- commandEnv.option.FilerPort = filerPort
commandEnv.option.Directory = "/"
return nil
}
- ctx := context.Background()
-
- err = commandEnv.checkDirectory(ctx, filerServer, filerPort, path)
+ err = commandEnv.checkDirectory(path)
if err == nil {
- commandEnv.option.FilerHost = filerServer
- commandEnv.option.FilerPort = filerPort
commandEnv.option.Directory = path
}
diff --git a/weed/shell/command_fs_configure.go b/weed/shell/command_fs_configure.go
new file mode 100644
index 000000000..02cd7ac69
--- /dev/null
+++ b/weed/shell/command_fs_configure.go
@@ -0,0 +1,129 @@
+package shell
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsConfigure{})
+}
+
+type commandFsConfigure struct {
+}
+
+func (c *commandFsConfigure) Name() string {
+ return "fs.configure"
+}
+
+func (c *commandFsConfigure) Help() string {
+ return `configure and apply storage options for each location
+
+ # see the current configuration file content
+ fs.configure
+
+ # trying the changes and see the possible configuration file content
+ fs.configure -locationPrfix=/my/folder -collection=abc
+ fs.configure -locationPrfix=/my/folder -collection=abc -ttl=7d
+
+ # example: configure adding only 1 physical volume for each bucket collection
+ fs.configure -locationPrfix=/buckets/ -volumeGrowthCount=1
+
+ # apply the changes
+ fs.configure -locationPrfix=/my/folder -collection=abc -apply
+
+ # delete the changes
+ fs.configure -locationPrfix=/my/folder -delete -apply
+
+`
+}
+
+func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ fsConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ locationPrefix := fsConfigureCommand.String("locationPrefix", "", "path prefix, required to update the path-specific configuration")
+ collection := fsConfigureCommand.String("collection", "", "assign writes to this collection")
+ replication := fsConfigureCommand.String("replication", "", "assign writes with this replication")
+ ttl := fsConfigureCommand.String("ttl", "", "assign writes with this ttl")
+ diskType := fsConfigureCommand.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
+ fsync := fsConfigureCommand.Bool("fsync", false, "fsync for the writes")
+ volumeGrowthCount := fsConfigureCommand.Int("volumeGrowthCount", 0, "the number of physical volumes to add if no writable volumes")
+ isDelete := fsConfigureCommand.Bool("delete", false, "delete the configuration by locationPrefix")
+ apply := fsConfigureCommand.Bool("apply", false, "update and apply filer configuration")
+ if err = fsConfigureCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ if err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ return filer.ReadEntry(commandEnv.MasterClient, client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, &buf)
+ }); err != nil && err != filer_pb.ErrNotFound {
+ return err
+ }
+
+ fc := filer.NewFilerConf()
+ if buf.Len() > 0 {
+ if err = fc.LoadFromBytes(buf.Bytes()); err != nil {
+ return err
+ }
+ }
+
+ if *locationPrefix != "" {
+ locConf := &filer_pb.FilerConf_PathConf{
+ LocationPrefix: *locationPrefix,
+ Collection: *collection,
+ Replication: *replication,
+ Ttl: *ttl,
+ Fsync: *fsync,
+ DiskType: *diskType,
+ VolumeGrowthCount: uint32(*volumeGrowthCount),
+ }
+
+ // check collection
+ if *collection != "" && strings.HasPrefix(*locationPrefix, "/buckets/") {
+ return fmt.Errorf("one s3 bucket goes to one collection and not customizable")
+ }
+
+ // check replication
+ if *replication != "" {
+ rp, err := super_block.NewReplicaPlacementFromString(*replication)
+ if err != nil {
+ return fmt.Errorf("parse replication %s: %v", *replication, err)
+ }
+ if *volumeGrowthCount%rp.GetCopyCount() != 0 {
+ return fmt.Errorf("volumeGrowthCount %d should be devided by replication copy count %d", *volumeGrowthCount, rp.GetCopyCount())
+ }
+ }
+
+ // save it
+ if *isDelete {
+ fc.DeleteLocationConf(*locationPrefix)
+ } else {
+ fc.AddLocationConf(locConf)
+ }
+ }
+
+ buf.Reset()
+ fc.ToText(&buf)
+
+ fmt.Fprintf(writer, string(buf.Bytes()))
+ fmt.Fprintln(writer)
+
+ if *apply {
+
+ if err := filer.SaveAs(commandEnv.option.FilerHost, int(commandEnv.option.FilerPort), filer.DirectoryEtcSeaweedFS, filer.FilerConfName, "text/plain; charset=utf-8", &buf); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+
+}
diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go
index 1d7d79686..71003714d 100644
--- a/weed/shell/command_fs_du.go
+++ b/weed/shell/command_fs_du.go
@@ -1,13 +1,10 @@
package shell
import (
- "context"
"fmt"
"io"
- "google.golang.org/grpc"
-
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -26,28 +23,26 @@ func (c *commandFsDu) Name() string {
func (c *commandFsDu) Help() string {
return `show disk usage
- fs.du http://<filer_server>:<port>/dir
- fs.du http://<filer_server>:<port>/dir/file_name
- fs.du http://<filer_server>:<port>/dir/file_prefix
+ fs.du /dir
+ fs.du /dir/file_name
+ fs.du /dir/file_prefix
`
}
func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args))
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
- ctx := context.Background()
-
- if commandEnv.isDirectory(ctx, filerServer, filerPort, path) {
+ if commandEnv.isDirectory(path) {
path = path + "/"
}
var blockCount, byteCount uint64
- dir, name := filer2.FullPath(path).DirAndName()
- blockCount, byteCount, err = duTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name)
+ dir, name := util.FullPath(path).DirAndName()
+ blockCount, byteCount, err = duTraverseDirectory(writer, commandEnv, dir, name)
if name == "" && err == nil {
fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir)
@@ -57,54 +52,33 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer
}
-func duTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string) (blockCount uint64, byteCount uint64, err error) {
+func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir, name string) (blockCount, byteCount uint64, err error) {
+
+ err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) error {
+
+ var fileBlockCount, fileByteCount uint64
- err = filer2.ReadDirAllEntries(ctx, filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) {
if entry.IsDirectory {
subDir := fmt.Sprintf("%s/%s", dir, entry.Name)
if dir == "/" {
subDir = "/" + entry.Name
}
- numBlock, numByte, err := duTraverseDirectory(ctx, writer, filerClient, subDir, "")
+ numBlock, numByte, err := duTraverseDirectory(writer, filerClient, subDir, "")
if err == nil {
blockCount += numBlock
byteCount += numByte
}
} else {
- blockCount += uint64(len(entry.Chunks))
- byteCount += filer2.TotalSize(entry.Chunks)
+ fileBlockCount = uint64(len(entry.Chunks))
+ fileByteCount = filer.FileSize(entry)
+ blockCount += fileBlockCount
+ byteCount += fileByteCount
}
if name != "" && !entry.IsDirectory {
- fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name)
+ fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", fileBlockCount, fileByteCount, dir, entry.Name)
}
+ return nil
})
return
}
-
-func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error {
-
- filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000)
- return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
- return fn(client)
- }, filerGrpcAddress, env.option.GrpcDialOption)
-
-}
-
-type commandFilerClient struct {
- env *CommandEnv
- filerServer string
- filerPort int64
-}
-
-func (env *CommandEnv) getFilerClient(filerServer string, filerPort int64) *commandFilerClient {
- return &commandFilerClient{
- env: env,
- filerServer: filerServer,
- filerPort: filerPort,
- }
-}
-func (c *commandFilerClient) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error {
- return c.env.withFilerClient(ctx, c.filerServer, c.filerPort, fn)
-}
diff --git a/weed/shell/command_fs_lock_unlock.go b/weed/shell/command_fs_lock_unlock.go
new file mode 100644
index 000000000..33458bb6f
--- /dev/null
+++ b/weed/shell/command_fs_lock_unlock.go
@@ -0,0 +1,55 @@
+package shell
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "io"
+)
+
+func init() {
+ Commands = append(Commands, &commandUnlock{})
+ Commands = append(Commands, &commandLock{})
+}
+
+// =========== Lock ==============
+type commandLock struct {
+}
+
+func (c *commandLock) Name() string {
+ return "lock"
+}
+
+func (c *commandLock) Help() string {
+ return `lock in order to exclusively manage the cluster
+
+ This is a blocking operation if there is alread another lock.
+`
+}
+
+func (c *commandLock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ commandEnv.locker.RequestLock(util.DetectedHostAddress())
+
+ return nil
+}
+
+// =========== Unlock ==============
+
+type commandUnlock struct {
+}
+
+func (c *commandUnlock) Name() string {
+ return "unlock"
+}
+
+func (c *commandUnlock) Help() string {
+ return `unlock the cluster-wide lock
+
+`
+}
+
+func (c *commandUnlock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ commandEnv.locker.ReleaseLock()
+
+ return nil
+}
diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go
index 01842083b..592ec8be0 100644
--- a/weed/shell/command_fs_ls.go
+++ b/weed/shell/command_fs_ls.go
@@ -1,7 +1,6 @@
package shell
import (
- "context"
"fmt"
"io"
"os"
@@ -9,8 +8,9 @@ import (
"strconv"
"strings"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -30,9 +30,6 @@ func (c *commandFsLs) Help() string {
fs.ls [-l] [-a] /dir/
fs.ls [-l] [-a] /dir/file_name
fs.ls [-l] [-a] /dir/file_prefix
- fs.ls [-l] [-a] http://<filer_server>:<port>/dir/
- fs.ls [-l] [-a] http://<filer_server>:<port>/dir/file_name
- fs.ls [-l] [-a] http://<filer_server>:<port>/dir/file_prefix
`
}
@@ -53,26 +50,22 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer
}
}
- input := findInputDirectory(args)
-
- filerServer, filerPort, path, err := commandEnv.parseUrl(input)
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
- ctx := context.Background()
-
- if commandEnv.isDirectory(ctx, filerServer, filerPort, path) {
+ if commandEnv.isDirectory(path) {
path = path + "/"
}
- dir, name := filer2.FullPath(path).DirAndName()
+ dir, name := util.FullPath(path).DirAndName()
entryCount := 0
- err = filer2.ReadDirAllEntries(ctx, commandEnv.getFilerClient(filerServer, filerPort), dir, name, func(entry *filer_pb.Entry, isLast bool) {
+ err = filer_pb.ReadDirAllEntries(commandEnv, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) error {
if !showHidden && strings.HasPrefix(entry.Name, ".") {
- return
+ return nil
}
entryCount++
@@ -95,18 +88,19 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer
}
}
- if dir == "/" {
+ if strings.HasSuffix(dir, "/") {
// just for printing
- dir = ""
+ dir = dir[:len(dir)-1]
}
fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n",
fileMode, len(entry.Chunks),
userName, groupName,
- filer2.TotalSize(entry.Chunks), dir, entry.Name)
+ filer.FileSize(entry), dir, entry.Name)
} else {
fmt.Fprintf(writer, "%s\n", entry.Name)
}
+ return nil
})
if isLongFormat && err == nil {
diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go
index 5908b0a3c..e0525defa 100644
--- a/weed/shell/command_fs_meta_cat.go
+++ b/weed/shell/command_fs_meta_cat.go
@@ -1,14 +1,15 @@
package shell
import (
- "context"
"fmt"
+ "github.com/golang/protobuf/proto"
"io"
+ "sort"
"github.com/golang/protobuf/jsonpb"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -27,31 +28,25 @@ func (c *commandFsMetaCat) Help() string {
fs.meta.cat /dir/
fs.meta.cat /dir/file_name
- fs.meta.cat http://<filer_server>:<port>/dir/
- fs.meta.cat http://<filer_server>:<port>/dir/file_name
`
}
func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- input := findInputDirectory(args)
-
- filerServer, filerPort, path, err := commandEnv.parseUrl(input)
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
- ctx := context.Background()
-
- dir, name := filer2.FullPath(path).DirAndName()
+ dir, name := util.FullPath(path).DirAndName()
- return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error {
+ return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Name: name,
Directory: dir,
}
- respLookupEntry, err := client.LookupDirectoryEntry(ctx, request)
+ respLookupEntry, err := filer_pb.LookupEntry(client, request)
if err != nil {
return err
}
@@ -61,6 +56,13 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W
Indent: " ",
}
+ sort.Slice(respLookupEntry.Entry.Chunks, func(i, j int) bool {
+ if respLookupEntry.Entry.Chunks[i].Offset == respLookupEntry.Entry.Chunks[j].Offset {
+ return respLookupEntry.Entry.Chunks[i].Mtime < respLookupEntry.Entry.Chunks[j].Mtime
+ }
+ return respLookupEntry.Entry.Chunks[i].Offset < respLookupEntry.Entry.Chunks[j].Offset
+ })
+
text, marshalErr := m.MarshalToString(respLookupEntry.Entry)
if marshalErr != nil {
return fmt.Errorf("marshal meta: %v", marshalErr)
@@ -68,6 +70,12 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W
fmt.Fprintf(writer, "%s\n", text)
+ bytes, _ := proto.Marshal(respLookupEntry.Entry)
+ gzippedBytes, _ := util.GzipData(bytes)
+ // zstdBytes, _ := util.ZstdData(bytes)
+ // fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d zstd:%d\n", len(respLookupEntry.Entry.Chunks), len(bytes), len(gzippedBytes), len(zstdBytes))
+ fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d\n", len(respLookupEntry.Entry.Chunks), len(bytes), len(gzippedBytes))
+
return nil
})
diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go
index 5ea8de9f5..46dc07e9a 100644
--- a/weed/shell/command_fs_meta_load.go
+++ b/weed/shell/command_fs_meta_load.go
@@ -1,15 +1,15 @@
package shell
import (
- "context"
"fmt"
"io"
"os"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/golang/protobuf/proto"
)
func init() {
@@ -38,11 +38,6 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io.
return nil
}
- filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(nil))
- if err != nil {
- return err
- }
-
fileName := args[len(args)-1]
dst, err := os.OpenFile(fileName, os.O_RDONLY, 0644)
@@ -53,9 +48,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io.
var dirCount, fileCount uint64
- ctx := context.Background()
-
- err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error {
+ err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
sizeBuf := make([]byte, 4)
@@ -80,14 +73,15 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io.
return err
}
- if _, err = client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{
+ fullEntry.Entry.Name = strings.ReplaceAll(fullEntry.Entry.Name, "/", "x")
+ if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
Directory: fullEntry.Dir,
Entry: fullEntry.Entry,
}); err != nil {
return err
}
- fmt.Fprintf(writer, "load %s\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name))
+ fmt.Fprintf(writer, "load %s\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name))
if fullEntry.Entry.IsDirectory {
dirCount++
@@ -101,7 +95,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io.
if err == nil {
fmt.Fprintf(writer, "\ntotal %d directories, %d files", dirCount, fileCount)
- fmt.Fprintf(writer, "\n%s is loaded to http://%s:%d%s\n", fileName, filerServer, filerPort, path)
+ fmt.Fprintf(writer, "\n%s is loaded.\n", fileName)
}
return err
diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go
index a898df7a0..4342fa81d 100644
--- a/weed/shell/command_fs_meta_notify.go
+++ b/weed/shell/command_fs_meta_notify.go
@@ -1,13 +1,9 @@
package shell
import (
- "context"
"fmt"
"io"
- "github.com/spf13/viper"
-
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -36,20 +32,18 @@ func (c *commandFsMetaNotify) Help() string {
func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args))
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
util.LoadConfiguration("notification", true)
- v := viper.GetViper()
- notification.LoadConfiguration(v.Sub("notification"))
-
- ctx := context.Background()
+ v := util.GetViper()
+ notification.LoadConfiguration(v, "notification.")
var dirCount, fileCount uint64
- err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) {
+ err = filer_pb.TraverseBfs(commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) {
if entry.IsDirectory {
dirCount++
diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go
index ed070350f..37d94fe42 100644
--- a/weed/shell/command_fs_meta_save.go
+++ b/weed/shell/command_fs_meta_save.go
@@ -1,18 +1,18 @@
package shell
import (
- "context"
"flag"
"fmt"
"io"
"os"
+ "path/filepath"
+ "strings"
"sync"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -40,8 +40,6 @@ func (c *commandFsMetaSave) Help() string {
The meta data will be saved into a local <filer_host>-<port>-<time>.meta file.
These meta data can be later loaded by fs.meta.load command,
- This assumes there are no deletions, so this is different from taking a snapshot.
-
`
}
@@ -50,22 +48,22 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
fsMetaSaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
verbose := fsMetaSaveCommand.Bool("v", false, "print out each processed files")
outputFileName := fsMetaSaveCommand.String("o", "", "output the meta data to this file")
+ isObfuscate := fsMetaSaveCommand.Bool("obfuscate", false, "obfuscate the file names")
+ // chunksFileName := fsMetaSaveCommand.String("chunks", "", "output all the chunks to this file")
if err = fsMetaSaveCommand.Parse(args); err != nil {
return nil
}
- filerServer, filerPort, path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args()))
+ path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args()))
if parseErr != nil {
return parseErr
}
- ctx := context.Background()
-
- t := time.Now()
fileName := *outputFileName
if fileName == "" {
+ t := time.Now()
fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta",
- filerServer, filerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
+ commandEnv.option.FilerHost, commandEnv.option.FilerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
}
dst, openErr := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
@@ -74,43 +72,76 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
}
defer dst.Close()
- var wg sync.WaitGroup
- wg.Add(1)
- outputChan := make(chan []byte, 1024)
- go func() {
+ var cipherKey util.CipherKey
+ if *isObfuscate {
+ cipherKey = util.GenCipherKey()
+ }
+
+ err = doTraverseBfsAndSaving(commandEnv, writer, path, *verbose, func(outputChan chan interface{}) {
sizeBuf := make([]byte, 4)
- for b := range outputChan {
+ for item := range outputChan {
+ b := item.([]byte)
util.Uint32toBytes(sizeBuf, uint32(len(b)))
dst.Write(sizeBuf)
dst.Write(b)
}
+ }, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
+ if !entry.Entry.IsDirectory {
+ ext := filepath.Ext(entry.Entry.Name)
+ if encrypted, encErr := util.Encrypt([]byte(entry.Entry.Name), cipherKey); encErr == nil {
+ entry.Entry.Name = util.Base64Encode(encrypted)[:len(entry.Entry.Name)] + ext
+ entry.Entry.Name = strings.ReplaceAll(entry.Entry.Name, "/", "x")
+ }
+ }
+ bytes, err := proto.Marshal(entry)
+ if err != nil {
+ fmt.Fprintf(writer, "marshall error: %v\n", err)
+ return
+ }
+
+ outputChan <- bytes
+ return nil
+ })
+
+ if err == nil {
+ fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", commandEnv.option.FilerHost, commandEnv.option.FilerPort, path, fileName)
+ }
+
+ return err
+
+}
+
+func doTraverseBfsAndSaving(filerClient filer_pb.FilerClient, writer io.Writer, path string, verbose bool, saveFn func(outputChan chan interface{}), genFn func(entry *filer_pb.FullEntry, outputChan chan interface{}) error) error {
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ outputChan := make(chan interface{}, 1024)
+ go func() {
+ saveFn(outputChan)
wg.Done()
}()
var dirCount, fileCount uint64
- err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) {
+ err := filer_pb.TraverseBfs(filerClient, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) {
protoMessage := &filer_pb.FullEntry{
Dir: string(parentPath),
Entry: entry,
}
- bytes, err := proto.Marshal(protoMessage)
- if err != nil {
+ if err := genFn(protoMessage, outputChan); err != nil {
fmt.Fprintf(writer, "marshall error: %v\n", err)
return
}
- outputChan <- bytes
-
if entry.IsDirectory {
atomic.AddUint64(&dirCount, 1)
} else {
atomic.AddUint64(&fileCount, 1)
}
- if *verbose {
+ if verbose {
println(parentPath.Child(entry.Name))
}
@@ -120,66 +151,8 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
wg.Wait()
- if err == nil {
+ if err == nil && writer != nil {
fmt.Fprintf(writer, "total %d directories, %d files\n", dirCount, fileCount)
- fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName)
}
-
return err
-
-}
-func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient,
- parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) {
-
- K := 5
-
- var jobQueueWg sync.WaitGroup
- queue := util.NewQueue()
- jobQueueWg.Add(1)
- queue.Enqueue(parentPath)
- var isTerminating bool
-
- for i := 0; i < K; i++ {
- go func() {
- for {
- if isTerminating {
- break
- }
- t := queue.Dequeue()
- if t == nil {
- time.Sleep(329 * time.Millisecond)
- continue
- }
- dir := t.(filer2.FullPath)
- processErr := processOneDirectory(ctx, writer, filerClient, dir, queue, &jobQueueWg, fn)
- if processErr != nil {
- err = processErr
- }
- jobQueueWg.Done()
- }
- }()
- }
- jobQueueWg.Wait()
- isTerminating = true
- return
-}
-
-func processOneDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient,
- parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup,
- fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) {
-
- return filer2.ReadDirAllEntries(ctx, filerClient, string(parentPath), "", func(entry *filer_pb.Entry, isLast bool) {
-
- fn(parentPath, entry)
-
- if entry.IsDirectory {
- subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name)
- if parentPath == "/" {
- subDir = "/" + entry.Name
- }
- jobQueueWg.Add(1)
- queue.Enqueue(filer2.FullPath(subDir))
- }
- })
-
}
diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go
index 67606ab53..2448c8f61 100644
--- a/weed/shell/command_fs_mv.go
+++ b/weed/shell/command_fs_mv.go
@@ -4,10 +4,9 @@ import (
"context"
"fmt"
"io"
- "path/filepath"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -37,37 +36,39 @@ func (c *commandFsMv) Help() string {
func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- filerServer, filerPort, sourcePath, err := commandEnv.parseUrl(args[0])
+ if len(args) != 2 {
+ return fmt.Errorf("need to have 2 arguments")
+ }
+
+ sourcePath, err := commandEnv.parseUrl(args[0])
if err != nil {
return err
}
- _, _, destinationPath, err := commandEnv.parseUrl(args[1])
+ destinationPath, err := commandEnv.parseUrl(args[1])
if err != nil {
return err
}
- ctx := context.Background()
-
- sourceDir, sourceName := filer2.FullPath(sourcePath).DirAndName()
+ sourceDir, sourceName := util.FullPath(sourcePath).DirAndName()
- destinationDir, destinationName := filer2.FullPath(destinationPath).DirAndName()
+ destinationDir, destinationName := util.FullPath(destinationPath).DirAndName()
- return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error {
+ return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// collect destination entry info
destinationRequest := &filer_pb.LookupDirectoryEntryRequest{
Name: destinationDir,
Directory: destinationName,
}
- respDestinationLookupEntry, err := client.LookupDirectoryEntry(ctx, destinationRequest)
+ respDestinationLookupEntry, err := filer_pb.LookupEntry(client, destinationRequest)
var targetDir, targetName string
// moving a file or folder
if err == nil && respDestinationLookupEntry.Entry.IsDirectory {
// to a directory
- targetDir = filepath.ToSlash(filepath.Join(destinationDir, destinationName))
+ targetDir = util.Join(destinationDir, destinationName)
targetName = sourceName
} else {
// to a file or folder
@@ -82,9 +83,9 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer
NewName: targetName,
}
- _, err = client.AtomicRenameEntry(ctx, request)
+ _, err = client.AtomicRenameEntry(context.Background(), request)
- fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, filer2.NewFullPath(targetDir, targetName))
+ fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, util.NewFullPath(targetDir, targetName))
return err
diff --git a/weed/shell/command_fs_pwd.go b/weed/shell/command_fs_pwd.go
index 084a5e90a..d7d9819c8 100644
--- a/weed/shell/command_fs_pwd.go
+++ b/weed/shell/command_fs_pwd.go
@@ -22,11 +22,7 @@ func (c *commandFsPwd) Help() string {
func (c *commandFsPwd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- fmt.Fprintf(writer, "http://%s:%d%s\n",
- commandEnv.option.FilerHost,
- commandEnv.option.FilerPort,
- commandEnv.option.Directory,
- )
+ fmt.Fprintf(writer, "%s\n", commandEnv.option.Directory)
return nil
}
diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go
index a4524f341..a8c5b2018 100644
--- a/weed/shell/command_fs_tree.go
+++ b/weed/shell/command_fs_tree.go
@@ -1,13 +1,12 @@
package shell
import (
- "context"
"fmt"
"io"
"strings"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -24,22 +23,21 @@ func (c *commandFsTree) Name() string {
func (c *commandFsTree) Help() string {
return `recursively list all files under a directory
- fs.tree http://<filer_server>:<port>/dir/
+ fs.tree /some/dir
+
`
}
func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args))
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
- dir, name := filer2.FullPath(path).DirAndName()
-
- ctx := context.Background()
+ dir, name := util.FullPath(path).DirAndName()
- dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name, newPrefix(), -1)
+ dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv, util.FullPath(dir), name, newPrefix(), -1)
if terr == nil {
fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount)
@@ -49,14 +47,14 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ
}
-func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) {
+func treeTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir util.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) {
prefix.addMarker(level)
- err = filer2.ReadDirAllEntries(ctx, filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) {
+ err = filer_pb.ReadDirAllEntries(filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) error {
if level < 0 && name != "" {
if entry.Name != name {
- return
+ return nil
}
}
@@ -64,18 +62,15 @@ func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient fi
if entry.IsDirectory {
directoryCount++
- subDir := fmt.Sprintf("%s/%s", dir, entry.Name)
- if dir == "/" {
- subDir = "/" + entry.Name
- }
- dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, filerClient, subDir, "", prefix, level+1)
+ subDir := dir.Child(entry.Name)
+ dirCount, fCount, terr := treeTraverseDirectory(writer, filerClient, subDir, "", prefix, level+1)
directoryCount += dirCount
fileCount += fCount
err = terr
} else {
fileCount++
}
-
+ return nil
})
return
}
diff --git a/weed/shell/command_s3_bucket_create.go b/weed/shell/command_s3_bucket_create.go
new file mode 100644
index 000000000..a512ffc4a
--- /dev/null
+++ b/weed/shell/command_s3_bucket_create.go
@@ -0,0 +1,85 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandS3BucketCreate{})
+}
+
+type commandS3BucketCreate struct {
+}
+
+func (c *commandS3BucketCreate) Name() string {
+ return "s3.bucket.create"
+}
+
+func (c *commandS3BucketCreate) Help() string {
+ return `create a bucket with a given name
+
+ Example:
+ s3.bucket.create -name <bucket_name> -replication 001
+`
+}
+
+func (c *commandS3BucketCreate) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ bucketName := bucketCommand.String("name", "", "bucket name")
+ replication := bucketCommand.String("replication", "", "replication setting for the bucket, if not set "+
+ "it will honor the value defined by the filer if it exists, "+
+ "else it will honor the value defined on the master")
+ if err = bucketCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ if *bucketName == "" {
+ return fmt.Errorf("empty bucket name")
+ }
+
+ err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer configuration: %v", err)
+ }
+ filerBucketsPath := resp.DirBuckets
+
+ println("create bucket under", filerBucketsPath)
+
+ entry := &filer_pb.Entry{
+ Name: *bucketName,
+ IsDirectory: true,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0777 | os.ModeDir),
+ Collection: *bucketName,
+ Replication: *replication,
+ },
+ }
+
+ if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
+ Directory: filerBucketsPath,
+ Entry: entry,
+ }); err != nil {
+ return err
+ }
+
+ println("created bucket", *bucketName)
+
+ return nil
+
+ })
+
+ return err
+
+}
diff --git a/weed/shell/command_s3_bucket_delete.go b/weed/shell/command_s3_bucket_delete.go
new file mode 100644
index 000000000..a8d8c5c29
--- /dev/null
+++ b/weed/shell/command_s3_bucket_delete.go
@@ -0,0 +1,54 @@
+package shell
+
+import (
+ "flag"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandS3BucketDelete{})
+}
+
+type commandS3BucketDelete struct {
+}
+
+func (c *commandS3BucketDelete) Name() string {
+ return "s3.bucket.delete"
+}
+
+func (c *commandS3BucketDelete) Help() string {
+ return `delete a bucket by a given name
+
+ s3.bucket.delete -name <bucket_name>
+`
+}
+
+func (c *commandS3BucketDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ bucketName := bucketCommand.String("name", "", "bucket name")
+ if err = bucketCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ if *bucketName == "" {
+ return fmt.Errorf("empty bucket name")
+ }
+
+ _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args()))
+ if parseErr != nil {
+ return parseErr
+ }
+
+ var filerBucketsPath string
+ filerBucketsPath, err = readFilerBucketsPath(commandEnv)
+ if err != nil {
+ return fmt.Errorf("read buckets: %v", err)
+ }
+
+ return filer_pb.Remove(commandEnv, filerBucketsPath, *bucketName, false, true, true, false, nil)
+
+}
diff --git a/weed/shell/command_s3_bucket_list.go b/weed/shell/command_s3_bucket_list.go
new file mode 100644
index 000000000..4acf9a866
--- /dev/null
+++ b/weed/shell/command_s3_bucket_list.go
@@ -0,0 +1,78 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandS3BucketList{})
+}
+
+type commandS3BucketList struct {
+}
+
+func (c *commandS3BucketList) Name() string {
+ return "s3.bucket.list"
+}
+
+func (c *commandS3BucketList) Help() string {
+ return `list all buckets
+
+`
+}
+
+func (c *commandS3BucketList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if err = bucketCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args()))
+ if parseErr != nil {
+ return parseErr
+ }
+
+ var filerBucketsPath string
+ filerBucketsPath, err = readFilerBucketsPath(commandEnv)
+ if err != nil {
+ return fmt.Errorf("read buckets: %v", err)
+ }
+
+ err = filer_pb.List(commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error {
+ if entry.Attributes.Replication == "" || entry.Attributes.Replication == "000" {
+ fmt.Fprintf(writer, " %s\n", entry.Name)
+ } else {
+ fmt.Fprintf(writer, " %s\t\t\treplication: %s\n", entry.Name, entry.Attributes.Replication)
+ }
+ return nil
+ }, "", false, math.MaxUint32)
+ if err != nil {
+ return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err)
+ }
+
+ return err
+
+}
+
+func readFilerBucketsPath(filerClient filer_pb.FilerClient) (filerBucketsPath string, err error) {
+ err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer configuration: %v", err)
+ }
+ filerBucketsPath = resp.DirBuckets
+
+ return nil
+
+ })
+
+ return filerBucketsPath, err
+}
diff --git a/weed/shell/command_s3_clean_uploads.go b/weed/shell/command_s3_clean_uploads.go
new file mode 100644
index 000000000..1ba31292c
--- /dev/null
+++ b/weed/shell/command_s3_clean_uploads.go
@@ -0,0 +1,92 @@
+package shell
+
+import (
+ "flag"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "io"
+ "math"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandS3CleanUploads{})
+}
+
+type commandS3CleanUploads struct {
+}
+
+func (c *commandS3CleanUploads) Name() string {
+ return "s3.clean.uploads"
+}
+
+func (c *commandS3CleanUploads) Help() string {
+ return `clean up stale multipart uploads
+
+ Example:
+ s3.clean.uploads -timeAgo 1.5h
+
+`
+}
+
+func (c *commandS3CleanUploads) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ uploadedTimeAgo := bucketCommand.Duration("timeAgo", 24*time.Hour, "created time before now. \"1.5h\" or \"2h45m\". Valid time units are \"m\", \"h\"")
+ if err = bucketCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ var filerBucketsPath string
+ filerBucketsPath, err = readFilerBucketsPath(commandEnv)
+ if err != nil {
+ return fmt.Errorf("read buckets: %v", err)
+ }
+
+ var buckets []string
+ err = filer_pb.List(commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error {
+ buckets = append(buckets, entry.Name)
+ return nil
+ }, "", false, math.MaxUint32)
+ if err != nil {
+ return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err)
+ }
+
+ for _, bucket := range buckets {
+ c.cleanupUploads(commandEnv, writer, filerBucketsPath, bucket, *uploadedTimeAgo)
+ }
+
+ return err
+
+}
+
+func (c *commandS3CleanUploads) cleanupUploads(commandEnv *CommandEnv, writer io.Writer, filerBucketsPath string, bucket string, timeAgo time.Duration) error {
+ uploadsDir := filerBucketsPath + "/" + bucket + "/.uploads"
+ var staleUploads []string
+ now := time.Now()
+ err := filer_pb.List(commandEnv, uploadsDir, "", func(entry *filer_pb.Entry, isLast bool) error {
+ ctime := time.Unix(entry.Attributes.Crtime, 0)
+ if ctime.Add(timeAgo).Before(now) {
+ staleUploads = append(staleUploads, entry.Name)
+ }
+ return nil
+ }, "", false, math.MaxUint32)
+ if err != nil {
+ return fmt.Errorf("list uploads under %v: %v", uploadsDir, err)
+ }
+
+ for _, staleUpload := range staleUploads {
+ deleteUrl := fmt.Sprintf("http://%s:%d%s/%s?recursive=true&ignoreRecursiveError=true", commandEnv.option.FilerHost, commandEnv.option.FilerPort, uploadsDir, staleUpload)
+ fmt.Fprintf(writer, "purge %s\n", deleteUrl)
+
+ err = util.Delete(deleteUrl, "")
+ if err != nil {
+ return fmt.Errorf("purge %s/%s: %v", uploadsDir, staleUpload, err)
+ }
+ }
+
+ return nil
+
+}
diff --git a/weed/shell/command_s3_configure.go b/weed/shell/command_s3_configure.go
new file mode 100644
index 000000000..ca51ef72f
--- /dev/null
+++ b/weed/shell/command_s3_configure.go
@@ -0,0 +1,183 @@
+package shell
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "io"
+ "sort"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandS3Configure{})
+}
+
+type commandS3Configure struct {
+}
+
+func (c *commandS3Configure) Name() string {
+ return "s3.configure"
+}
+
+func (c *commandS3Configure) Help() string {
+ return `configure and apply s3 options for each bucket
+
+ # see the current configuration file content
+ s3.configure
+ `
+}
+
+func (c *commandS3Configure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ s3ConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ actions := s3ConfigureCommand.String("actions", "", "comma separated actions names: Read,Write,List,Tagging,Admin")
+ user := s3ConfigureCommand.String("user", "", "user name")
+ buckets := s3ConfigureCommand.String("buckets", "", "bucket name")
+ accessKey := s3ConfigureCommand.String("access_key", "", "specify the access key")
+ secretKey := s3ConfigureCommand.String("secret_key", "", "specify the secret key")
+ isDelete := s3ConfigureCommand.Bool("delete", false, "delete users, actions or access keys")
+ apply := s3ConfigureCommand.Bool("apply", false, "update and apply s3 configuration")
+
+ if err = s3ConfigureCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ if err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ return filer.ReadEntry(commandEnv.MasterClient, client, filer.IamConfigDirecotry, filer.IamIdentityFile, &buf)
+ }); err != nil && err != filer_pb.ErrNotFound {
+ return err
+ }
+
+ s3cfg := &iam_pb.S3ApiConfiguration{}
+ if buf.Len() > 0 {
+ if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), s3cfg); err != nil {
+ return err
+ }
+ }
+
+ idx := 0
+ changed := false
+ if *user != "" {
+ for i, identity := range s3cfg.Identities {
+ if *user == identity.Name {
+ idx = i
+ changed = true
+ break
+ }
+ }
+ }
+ var cmdActions []string
+ for _, action := range strings.Split(*actions, ",") {
+ if *buckets == "" {
+ cmdActions = append(cmdActions, action)
+ } else {
+ for _, bucket := range strings.Split(*buckets, ",") {
+ cmdActions = append(cmdActions, fmt.Sprintf("%s:%s", action, bucket))
+ }
+ }
+ }
+ if changed {
+ if *isDelete {
+ var exists []int
+ for _, cmdAction := range cmdActions {
+ for i, currentAction := range s3cfg.Identities[idx].Actions {
+ if cmdAction == currentAction {
+ exists = append(exists, i)
+ }
+ }
+ }
+ sort.Sort(sort.Reverse(sort.IntSlice(exists)))
+ for _, i := range exists {
+ s3cfg.Identities[idx].Actions = append(
+ s3cfg.Identities[idx].Actions[:i],
+ s3cfg.Identities[idx].Actions[i+1:]...,
+ )
+ }
+ if *accessKey != "" {
+ exists = []int{}
+ for i, credential := range s3cfg.Identities[idx].Credentials {
+ if credential.AccessKey == *accessKey {
+ exists = append(exists, i)
+ }
+ }
+ sort.Sort(sort.Reverse(sort.IntSlice(exists)))
+ for _, i := range exists {
+ s3cfg.Identities[idx].Credentials = append(
+ s3cfg.Identities[idx].Credentials[:i],
+ s3cfg.Identities[idx].Credentials[:i+1]...,
+ )
+ }
+
+ }
+ if *actions == "" && *accessKey == "" && *buckets == "" {
+ s3cfg.Identities = append(s3cfg.Identities[:idx], s3cfg.Identities[idx+1:]...)
+ }
+ } else {
+ if *actions != "" {
+ for _, cmdAction := range cmdActions {
+ found := false
+ for _, action := range s3cfg.Identities[idx].Actions {
+ if cmdAction == action {
+ found = true
+ break
+ }
+ }
+ if !found {
+ s3cfg.Identities[idx].Actions = append(s3cfg.Identities[idx].Actions, cmdAction)
+ }
+ }
+ }
+ if *accessKey != "" && *user != "anonymous" {
+ found := false
+ for _, credential := range s3cfg.Identities[idx].Credentials {
+ if credential.AccessKey == *accessKey {
+ found = true
+ credential.SecretKey = *secretKey
+ break
+ }
+ }
+ if !found {
+ s3cfg.Identities[idx].Credentials = append(s3cfg.Identities[idx].Credentials, &iam_pb.Credential{
+ AccessKey: *accessKey,
+ SecretKey: *secretKey,
+ })
+ }
+ }
+ }
+ } else if *user != "" && *actions != "" {
+ identity := iam_pb.Identity{
+ Name: *user,
+ Actions: cmdActions,
+ Credentials: []*iam_pb.Credential{},
+ }
+ if *user != "anonymous" {
+ identity.Credentials = append(identity.Credentials,
+ &iam_pb.Credential{AccessKey: *accessKey, SecretKey: *secretKey})
+ }
+ s3cfg.Identities = append(s3cfg.Identities, &identity)
+ }
+
+ buf.Reset()
+ filer.S3ConfigurationToText(&buf, s3cfg)
+
+ fmt.Fprintf(writer, string(buf.Bytes()))
+ fmt.Fprintln(writer)
+
+ if *apply {
+
+ if err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ return filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile, buf.Bytes())
+ }); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go
index bed4f4306..ad7da0e44 100644
--- a/weed/shell/command_volume_balance.go
+++ b/weed/shell/command_volume_balance.go
@@ -1,9 +1,10 @@
package shell
import (
- "context"
"flag"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"io"
"os"
"sort"
@@ -39,14 +40,15 @@ func (c *commandVolumeBalance) Help() string {
}
func balanceWritableVolumes(){
- idealWritableVolumes = totalWritableVolumes / numVolumeServers
+ idealWritableVolumeRatio = totalWritableVolumes / totalNumberOfMaxVolumes
for hasMovedOneVolume {
- sort all volume servers ordered by the number of local writable volumes
- pick the volume server A with the lowest number of writable volumes x
- pick the volume server B with the highest number of writable volumes y
- if y > idealWritableVolumes and x +1 <= idealWritableVolumes {
- if B has a writable volume id v that A does not have {
- move writable volume v from A to B
+ sort all volume servers ordered by the localWritableVolumeRatio = localWritableVolumes to localVolumeMax
+ pick the volume server B with the highest localWritableVolumeRatio y
+ for any the volume server A with the number of writable volumes x + 1 <= idealWritableVolumeRatio * localVolumeMax {
+ if y > localWritableVolumeRatio {
+ if B has a writable volume id v that A does not have, and satisfy v replication requirements {
+ move writable volume v from A to B
+ }
}
}
}
@@ -60,6 +62,10 @@ func (c *commandVolumeBalance) Help() string {
func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or use \"ALL_COLLECTIONS\" across collections, \"EACH_COLLECTION\" for each collection")
dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
@@ -68,55 +74,51 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
return nil
}
- var resp *master_pb.VolumeListResponse
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
- return err
- })
+ // collect topology information
+ topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
- typeToNodes := collectVolumeServersByType(resp.TopologyInfo, *dc)
+ volumeServers := collectVolumeServersByDc(topologyInfo, *dc)
+ volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
+ diskTypes := collectVolumeDiskTypes(topologyInfo)
- for maxVolumeCount, volumeServers := range typeToNodes {
- if len(volumeServers) < 2 {
- fmt.Printf("only 1 node is configured max %d volumes, skipping balancing\n", maxVolumeCount)
- continue
+ if *collection == "EACH_COLLECTION" {
+ collections, err := ListCollectionNames(commandEnv, true, false)
+ if err != nil {
+ return err
}
- if *collection == "EACH_COLLECTION" {
- collections, err := ListCollectionNames(commandEnv, true, false)
- if err != nil {
- return err
- }
- for _, c := range collections {
- if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, c, *applyBalancing); err != nil {
- return err
- }
- }
- } else if *collection == "ALL_COLLECTIONS" {
- if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil {
- return err
- }
- } else {
- if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, *collection, *applyBalancing); err != nil {
+ for _, c := range collections {
+ if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, c, *applyBalancing); err != nil {
return err
}
}
-
+ } else if *collection == "ALL_COLLECTIONS" {
+ if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil {
+ return err
+ }
+ } else {
+ if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, *collection, *applyBalancing); err != nil {
+ return err
+ }
}
+
return nil
}
-func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.DataNodeInfo, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
+func balanceVolumeServers(commandEnv *CommandEnv, diskTypes []types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
- var nodes []*Node
- for _, dn := range dataNodeInfos {
- nodes = append(nodes, &Node{
- info: dn,
- })
+ for _, diskType := range diskTypes {
+ if err := balanceVolumeServersByDiskType(commandEnv, diskType, volumeReplicas, nodes, volumeSizeLimit, collection, applyBalancing); err != nil {
+ return err
+ }
}
+ return nil
+
+}
+
+func balanceVolumeServersByDiskType(commandEnv *CommandEnv, diskType types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
// balance writable volumes
for _, n := range nodes {
@@ -126,10 +128,10 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat
return false
}
}
- return !v.ReadOnly && v.Size < volumeSizeLimit
+ return v.DiskType == string(diskType) && (!v.ReadOnly && v.Size < volumeSizeLimit)
})
}
- if err := balanceSelectedVolume(commandEnv, nodes, sortWritableVolumes, applyBalancing); err != nil {
+ if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, capacityByMaxVolumeCount(diskType), sortWritableVolumes, applyBalancing); err != nil {
return err
}
@@ -141,34 +143,99 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat
return false
}
}
- return v.ReadOnly || v.Size >= volumeSizeLimit
+ return v.DiskType == string(diskType) && (v.ReadOnly || v.Size >= volumeSizeLimit)
})
}
- if err := balanceSelectedVolume(commandEnv, nodes, sortReadOnlyVolumes, applyBalancing); err != nil {
+ if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, capacityByMaxVolumeCount(diskType), sortReadOnlyVolumes, applyBalancing); err != nil {
return err
}
return nil
}
-func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter string) (typeToNodes map[uint64][]*master_pb.DataNodeInfo) {
- typeToNodes = make(map[uint64][]*master_pb.DataNodeInfo)
+func collectVolumeServersByDc(t *master_pb.TopologyInfo, selectedDataCenter string) (nodes []*Node) {
for _, dc := range t.DataCenterInfos {
if selectedDataCenter != "" && dc.Id != selectedDataCenter {
continue
}
for _, r := range dc.RackInfos {
for _, dn := range r.DataNodeInfos {
- typeToNodes[dn.MaxVolumeCount] = append(typeToNodes[dn.MaxVolumeCount], dn)
+ nodes = append(nodes, &Node{
+ info: dn,
+ dc: dc.Id,
+ rack: r.Id,
+ })
}
}
}
return
}
+func collectVolumeDiskTypes(t *master_pb.TopologyInfo) (diskTypes []types.DiskType) {
+ knownTypes := make(map[string]bool)
+ for _, dc := range t.DataCenterInfos {
+ for _, r := range dc.RackInfos {
+ for _, dn := range r.DataNodeInfos {
+ for diskType, _ := range dn.DiskInfos {
+ if _, found := knownTypes[diskType]; !found {
+ knownTypes[diskType] = true
+ }
+ }
+ }
+ }
+ }
+ for diskType, _ := range knownTypes {
+ diskTypes = append(diskTypes, types.ToDiskType(diskType))
+ }
+ return
+}
+
type Node struct {
info *master_pb.DataNodeInfo
selectedVolumes map[uint32]*master_pb.VolumeInformationMessage
+ dc string
+ rack string
+}
+
+type CapacityFunc func(*master_pb.DataNodeInfo) int
+
+func capacityByMaxVolumeCount(diskType types.DiskType) CapacityFunc {
+ return func(info *master_pb.DataNodeInfo) int {
+ diskInfo, found := info.DiskInfos[string(diskType)]
+ if !found {
+ return 0
+ }
+ return int(diskInfo.MaxVolumeCount)
+ }
+}
+
+func capacityByFreeVolumeCount(diskType types.DiskType) CapacityFunc {
+ return func(info *master_pb.DataNodeInfo) int {
+ diskInfo, found := info.DiskInfos[string(diskType)]
+ if !found {
+ return 0
+ }
+ return int(diskInfo.MaxVolumeCount - diskInfo.VolumeCount)
+ }
+}
+
+func (n *Node) localVolumeRatio(capacityFunc CapacityFunc) float64 {
+ return divide(len(n.selectedVolumes), capacityFunc(n.info))
+}
+
+func (n *Node) localVolumeNextRatio(capacityFunc CapacityFunc) float64 {
+ return divide(len(n.selectedVolumes)+1, capacityFunc(n.info))
+}
+
+func (n *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool) {
+ n.selectedVolumes = make(map[uint32]*master_pb.VolumeInformationMessage)
+ for _, diskInfo := range n.info.DiskInfos {
+ for _, v := range diskInfo.VolumeInfos {
+ if fn(v) {
+ n.selectedVolumes[v.Id] = v
+ }
+ }
+ }
}
func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) {
@@ -183,67 +250,152 @@ func sortReadOnlyVolumes(volumes []*master_pb.VolumeInformationMessage) {
})
}
-func balanceSelectedVolume(commandEnv *CommandEnv, nodes []*Node, sortCandidatesFn func(volumes []*master_pb.VolumeInformationMessage), applyBalancing bool) error {
- selectedVolumeCount := 0
+func balanceSelectedVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, capacityFunc CapacityFunc, sortCandidatesFn func(volumes []*master_pb.VolumeInformationMessage), applyBalancing bool) (err error) {
+ selectedVolumeCount, volumeMaxCount := 0, 0
+ var nodesWithCapacity []*Node
for _, dn := range nodes {
selectedVolumeCount += len(dn.selectedVolumes)
+ capacity := capacityFunc(dn.info)
+ if capacity > 0 {
+ nodesWithCapacity = append(nodesWithCapacity, dn)
+ }
+ volumeMaxCount += capacity
}
- idealSelectedVolumes := ceilDivide(selectedVolumeCount, len(nodes))
+ idealVolumeRatio := divide(selectedVolumeCount, volumeMaxCount)
+
+ hasMoved := true
- hasMove := true
+ // fmt.Fprintf(os.Stdout, " total %d volumes, max %d volumes, idealVolumeRatio %f\n", selectedVolumeCount, volumeMaxCount, idealVolumeRatio)
- for hasMove {
- hasMove = false
- sort.Slice(nodes, func(i, j int) bool {
- // TODO sort by free volume slots???
- return len(nodes[i].selectedVolumes) < len(nodes[j].selectedVolumes)
+ for hasMoved {
+ hasMoved = false
+ sort.Slice(nodesWithCapacity, func(i, j int) bool {
+ return nodesWithCapacity[i].localVolumeRatio(capacityFunc) < nodesWithCapacity[j].localVolumeRatio(capacityFunc)
})
- emptyNode, fullNode := nodes[0], nodes[len(nodes)-1]
- if len(fullNode.selectedVolumes) > idealSelectedVolumes && len(emptyNode.selectedVolumes)+1 <= idealSelectedVolumes {
- // sort the volumes to move
- var candidateVolumes []*master_pb.VolumeInformationMessage
- for _, v := range fullNode.selectedVolumes {
- candidateVolumes = append(candidateVolumes, v)
+ fullNode := nodesWithCapacity[len(nodesWithCapacity)-1]
+ var candidateVolumes []*master_pb.VolumeInformationMessage
+ for _, v := range fullNode.selectedVolumes {
+ candidateVolumes = append(candidateVolumes, v)
+ }
+ sortCandidatesFn(candidateVolumes)
+
+ for i := 0; i < len(nodesWithCapacity)-1; i++ {
+ emptyNode := nodesWithCapacity[i]
+ if !(fullNode.localVolumeRatio(capacityFunc) > idealVolumeRatio && emptyNode.localVolumeNextRatio(capacityFunc) <= idealVolumeRatio) {
+ // no more volume servers with empty slots
+ break
}
- sortCandidatesFn(candidateVolumes)
-
- for _, v := range candidateVolumes {
- if _, found := emptyNode.selectedVolumes[v.Id]; !found {
- if err := moveVolume(commandEnv, v, fullNode, emptyNode, applyBalancing); err == nil {
- delete(fullNode.selectedVolumes, v.Id)
- emptyNode.selectedVolumes[v.Id] = v
- hasMove = true
- break
- } else {
- return err
- }
- }
+ hasMoved, err = attemptToMoveOneVolume(commandEnv, volumeReplicas, fullNode, candidateVolumes, emptyNode, applyBalancing)
+ if err != nil {
+ return
+ }
+ if hasMoved {
+ // moved one volume
+ break
}
}
}
return nil
}
-func moveVolume(commandEnv *CommandEnv, v *master_pb.VolumeInformationMessage, fullNode *Node, emptyNode *Node, applyBalancing bool) error {
+func attemptToMoveOneVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, fullNode *Node, candidateVolumes []*master_pb.VolumeInformationMessage, emptyNode *Node, applyBalancing bool) (hasMoved bool, err error) {
+
+ for _, v := range candidateVolumes {
+ hasMoved, err = maybeMoveOneVolume(commandEnv, volumeReplicas, fullNode, v, emptyNode, applyBalancing)
+ if err != nil {
+ return
+ }
+ if hasMoved {
+ break
+ }
+ }
+ return
+}
+
+func maybeMoveOneVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, fullNode *Node, candidateVolume *master_pb.VolumeInformationMessage, emptyNode *Node, applyChange bool) (hasMoved bool, err error) {
+
+ if candidateVolume.ReplicaPlacement > 0 {
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(candidateVolume.ReplicaPlacement))
+ if !isGoodMove(replicaPlacement, volumeReplicas[candidateVolume.Id], fullNode, emptyNode) {
+ return false, nil
+ }
+ }
+ if _, found := emptyNode.selectedVolumes[candidateVolume.Id]; !found {
+ if err = moveVolume(commandEnv, candidateVolume, fullNode, emptyNode, applyChange); err == nil {
+ adjustAfterMove(candidateVolume, volumeReplicas, fullNode, emptyNode)
+ return true, nil
+ } else {
+ return
+ }
+ }
+ return
+}
+
+func moveVolume(commandEnv *CommandEnv, v *master_pb.VolumeInformationMessage, fullNode *Node, emptyNode *Node, applyChange bool) error {
collectionPrefix := v.Collection + "_"
if v.Collection == "" {
collectionPrefix = ""
}
- fmt.Fprintf(os.Stdout, "moving volume %s%d %s => %s\n", collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id)
- if applyBalancing {
- ctx := context.Background()
- return LiveMoveVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second)
+ fmt.Fprintf(os.Stdout, " moving %s volume %s%d %s => %s\n", v.DiskType, collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id)
+ if applyChange {
+ return LiveMoveVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second, v.DiskType)
}
return nil
}
-func (node *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool) {
- node.selectedVolumes = make(map[uint32]*master_pb.VolumeInformationMessage)
- for _, v := range node.info.VolumeInfos {
- if fn(v) {
- node.selectedVolumes[v.Id] = v
+func isGoodMove(placement *super_block.ReplicaPlacement, existingReplicas []*VolumeReplica, sourceNode, targetNode *Node) bool {
+ for _, replica := range existingReplicas {
+ if replica.location.dataNode.Id == targetNode.info.Id &&
+ replica.location.rack == targetNode.rack &&
+ replica.location.dc == targetNode.dc {
+ // never move to existing nodes
+ return false
+ }
+ }
+ dcs, racks := make(map[string]bool), make(map[string]int)
+ for _, replica := range existingReplicas {
+ if replica.location.dataNode.Id != sourceNode.info.Id {
+ dcs[replica.location.DataCenter()] = true
+ racks[replica.location.Rack()]++
+ }
+ }
+
+ dcs[targetNode.dc] = true
+ racks[fmt.Sprintf("%s %s", targetNode.dc, targetNode.rack)]++
+
+ if len(dcs) != placement.DiffDataCenterCount+1 {
+ return false
+ }
+
+ if len(racks) != placement.DiffRackCount+placement.DiffDataCenterCount+1 {
+ return false
+ }
+
+ for _, sameRackCount := range racks {
+ if sameRackCount != placement.SameRackCount+1 {
+ return false
+ }
+ }
+
+ return true
+
+}
+
+func adjustAfterMove(v *master_pb.VolumeInformationMessage, volumeReplicas map[uint32][]*VolumeReplica, fullNode *Node, emptyNode *Node) {
+ delete(fullNode.selectedVolumes, v.Id)
+ if emptyNode.selectedVolumes != nil {
+ emptyNode.selectedVolumes[v.Id] = v
+ }
+ existingReplicas := volumeReplicas[v.Id]
+ for _, replica := range existingReplicas {
+ if replica.location.dataNode.Id == fullNode.info.Id &&
+ replica.location.rack == fullNode.rack &&
+ replica.location.dc == fullNode.dc {
+ loc := newLocation(emptyNode.dc, emptyNode.rack, emptyNode.info)
+ replica.location = &loc
+ return
}
}
}
diff --git a/weed/shell/command_volume_balance_test.go b/weed/shell/command_volume_balance_test.go
new file mode 100644
index 000000000..b77811f51
--- /dev/null
+++ b/weed/shell/command_volume_balance_test.go
@@ -0,0 +1,183 @@
+package shell
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+)
+
+type testMoveCase struct {
+ name string
+ replication string
+ replicas []*VolumeReplica
+ sourceLocation location
+ targetLocation location
+ expected bool
+}
+
+func TestIsGoodMove(t *testing.T) {
+
+ var tests = []testMoveCase{
+
+ {
+ name: "test 100 move to wrong data centers",
+ replication: "100",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ targetLocation: location{"dc2", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: false,
+ },
+
+ {
+ name: "test 100 move to spread into proper data centers",
+ replication: "100",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ targetLocation: location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+
+ {
+ name: "test move to the same node",
+ replication: "001",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: false,
+ },
+
+ {
+ name: "test move to the same rack, but existing node",
+ replication: "001",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ expected: false,
+ },
+
+ {
+ name: "test move to the same rack, a new node",
+ replication: "001",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+
+ {
+ name: "test 010 move all to the same rack",
+ replication: "010",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: false,
+ },
+
+ {
+ name: "test 010 move to spread racks",
+ replication: "010",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+
+ {
+ name: "test 010 move to spread racks",
+ replication: "010",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ targetLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+ }
+
+ for _, tt := range tests {
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication)
+ println("replication:", tt.replication, "expected", tt.expected, "name:", tt.name)
+ sourceNode := &Node{
+ info: tt.sourceLocation.dataNode,
+ dc: tt.sourceLocation.dc,
+ rack: tt.sourceLocation.rack,
+ }
+ targetNode := &Node{
+ info: tt.targetLocation.dataNode,
+ dc: tt.targetLocation.dc,
+ rack: tt.targetLocation.rack,
+ }
+ if isGoodMove(replicaPlacement, tt.replicas, sourceNode, targetNode) != tt.expected {
+ t.Errorf("%s: expect %v move from %v to %s, replication:%v",
+ tt.name, tt.expected, tt.sourceLocation, tt.targetLocation, tt.replication)
+ }
+ }
+
+}
+
+func TestBalance(t *testing.T) {
+ topologyInfo := parseOutput(topoData)
+ volumeServers := collectVolumeServersByDc(topologyInfo, "")
+ volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
+ diskTypes := collectVolumeDiskTypes(topologyInfo)
+
+ if err := balanceVolumeServers(nil, diskTypes, volumeReplicas, volumeServers, 30*1024*1024*1024, "ALL_COLLECTIONS", false); err != nil {
+ t.Errorf("balance: %v", err)
+ }
+
+}
diff --git a/weed/shell/command_volume_check_disk.go b/weed/shell/command_volume_check_disk.go
new file mode 100644
index 000000000..0f156ac2f
--- /dev/null
+++ b/weed/shell/command_volume_check_disk.go
@@ -0,0 +1,258 @@
+package shell
+
+import (
+ "bytes"
+ "context"
+ "flag"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ "io"
+ "math"
+ "sort"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeCheckDisk{})
+}
+
+type commandVolumeCheckDisk struct {
+ env *CommandEnv
+}
+
+func (c *commandVolumeCheckDisk) Name() string {
+ return "volume.check.disk"
+}
+
+func (c *commandVolumeCheckDisk) Help() string {
+ return `check all replicated volumes to find and fix inconsistencies
+
+ How it works:
+
+ find all volumes that are replicated
+ for each volume id, if there are more than 2 replicas, find one pair with the largest 2 in file count.
+ for the pair volume A and B
+ append entries in A and not in B to B
+ append entries in B and not in A to A
+
+`
+}
+
+func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ slowMode := fsckCommand.Bool("slow", false, "slow mode checks all replicas even file counts are the same")
+ verbose := fsckCommand.Bool("v", false, "verbose mode")
+ applyChanges := fsckCommand.Bool("force", false, "apply the fix")
+ nonRepairThreshold := fsckCommand.Float64("nonRepairThreshold", 0.3, "repair when missing keys is not more than this limit")
+ if err = fsckCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ c.env = commandEnv
+
+ // collect topology information
+ topologyInfo, _, err := collectTopologyInfo(commandEnv)
+ if err != nil {
+ return err
+ }
+ volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
+
+ // pick 1 pairs of volume replica
+ fileCount := func(replica *VolumeReplica) uint64 {
+ return replica.info.FileCount - replica.info.DeleteCount
+ }
+ aDB, bDB := needle_map.NewMemDb(), needle_map.NewMemDb()
+ defer aDB.Close()
+ defer bDB.Close()
+
+ for _, replicas := range volumeReplicas {
+ sort.Slice(replicas, func(i, j int) bool {
+ return fileCount(replicas[i]) > fileCount(replicas[j])
+ })
+ for len(replicas) >= 2 {
+ a, b := replicas[0], replicas[1]
+ if !*slowMode {
+ if fileCount(a) == fileCount(b) {
+ replicas = replicas[1:]
+ continue
+ }
+ }
+ if a.info.ReadOnly || b.info.ReadOnly {
+ fmt.Fprintf(writer, "skipping readonly volume %d on %s and %s\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id)
+ replicas = replicas[1:]
+ continue
+ }
+
+ // reset index db
+ aDB.Close()
+ bDB.Close()
+ aDB, bDB = needle_map.NewMemDb(), needle_map.NewMemDb()
+
+ // read index db
+ if err := c.readIndexDatabase(aDB, a.info.Collection, a.info.Id, a.location.dataNode.Id, *verbose, writer); err != nil {
+ return err
+ }
+ if err := c.readIndexDatabase(bDB, b.info.Collection, b.info.Id, b.location.dataNode.Id, *verbose, writer); err != nil {
+ return err
+ }
+
+ // find and make up the differnces
+ if err := c.doVolumeCheckDisk(aDB, bDB, a, b, *verbose, writer, *applyChanges, *nonRepairThreshold); err != nil {
+ return err
+ }
+ if err := c.doVolumeCheckDisk(bDB, aDB, b, a, *verbose, writer, *applyChanges, *nonRepairThreshold); err != nil {
+ return err
+ }
+ replicas = replicas[1:]
+ }
+ }
+
+ return nil
+}
+
+func (c *commandVolumeCheckDisk) doVolumeCheckDisk(subtrahend, minuend *needle_map.MemDb, source, target *VolumeReplica, verbose bool, writer io.Writer, applyChanges bool, nonRepairThreshold float64) error {
+
+ // find missing keys
+ // hash join, can be more efficient
+ var missingNeedles []needle_map.NeedleValue
+ var counter int
+ subtrahend.AscendingVisit(func(value needle_map.NeedleValue) error {
+ counter++
+ if _, found := minuend.Get(value.Key); !found {
+ missingNeedles = append(missingNeedles, value)
+ }
+ return nil
+ })
+
+ fmt.Fprintf(writer, "volume %d %s has %d entries, %s missed %d entries\n", source.info.Id, source.location.dataNode.Id, counter, target.location.dataNode.Id, len(missingNeedles))
+
+ if counter == 0 || len(missingNeedles) == 0 {
+ return nil
+ }
+
+ missingNeedlesFraction := float64(len(missingNeedles)) / float64(counter)
+ if missingNeedlesFraction > nonRepairThreshold {
+ return fmt.Errorf(
+ "failed to start repair volume %d, percentage of missing keys is greater than the threshold: %.2f > %.2f",
+ source.info.Id, missingNeedlesFraction, nonRepairThreshold)
+ }
+
+ for _, needleValue := range missingNeedles {
+
+ needleBlob, err := c.readSourceNeedleBlob(source.location.dataNode.Id, source.info.Id, needleValue)
+ if err != nil {
+ return err
+ }
+
+ if !applyChanges {
+ continue
+ }
+
+ if verbose {
+ fmt.Fprintf(writer, "read %d,%x %s => %s \n", source.info.Id, needleValue.Key, source.location.dataNode.Id, target.location.dataNode.Id)
+ }
+
+ if err := c.writeNeedleBlobToTarget(target.location.dataNode.Id, source.info.Id, needleValue, needleBlob); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+func (c *commandVolumeCheckDisk) readSourceNeedleBlob(sourceVolumeServer string, volumeId uint32, needleValue needle_map.NeedleValue) (needleBlob []byte, err error) {
+
+ err = operation.WithVolumeServerClient(sourceVolumeServer, c.env.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ resp, err := client.ReadNeedleBlob(context.Background(), &volume_server_pb.ReadNeedleBlobRequest{
+ VolumeId: volumeId,
+ NeedleId: uint64(needleValue.Key),
+ Offset: needleValue.Offset.ToActualOffset(),
+ Size: int32(needleValue.Size),
+ })
+ if err != nil {
+ return err
+ }
+ needleBlob = resp.NeedleBlob
+ return nil
+ })
+ return
+}
+
+func (c *commandVolumeCheckDisk) writeNeedleBlobToTarget(targetVolumeServer string, volumeId uint32, needleValue needle_map.NeedleValue, needleBlob []byte) error {
+
+ return operation.WithVolumeServerClient(targetVolumeServer, c.env.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ _, err := client.WriteNeedleBlob(context.Background(), &volume_server_pb.WriteNeedleBlobRequest{
+ VolumeId: volumeId,
+ NeedleId: uint64(needleValue.Key),
+ Size: int32(needleValue.Size),
+ NeedleBlob: needleBlob,
+ })
+ return err
+ })
+
+}
+
+func (c *commandVolumeCheckDisk) readIndexDatabase(db *needle_map.MemDb, collection string, volumeId uint32, volumeServer string, verbose bool, writer io.Writer) error {
+
+ var buf bytes.Buffer
+ if err := c.copyVolumeIndexFile(collection, volumeId, volumeServer, &buf, verbose, writer); err != nil {
+ return err
+ }
+
+ if verbose {
+ fmt.Fprintf(writer, "load collection %s volume %d index size %d from %s ...\n", collection, volumeId, buf.Len(), volumeServer)
+ }
+
+ return db.LoadFromReaderAt(bytes.NewReader(buf.Bytes()))
+
+}
+
+func (c *commandVolumeCheckDisk) copyVolumeIndexFile(collection string, volumeId uint32, volumeServer string, buf *bytes.Buffer, verbose bool, writer io.Writer) error {
+
+ return operation.WithVolumeServerClient(volumeServer, c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+
+ ext := ".idx"
+
+ copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
+ VolumeId: volumeId,
+ Ext: ".idx",
+ CompactionRevision: math.MaxUint32,
+ StopOffset: math.MaxInt64,
+ Collection: collection,
+ IsEcVolume: false,
+ IgnoreSourceFileNotFound: false,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
+ }
+
+ err = writeToBuffer(copyFileClient, buf)
+ if err != nil {
+ return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, volumeServer, err)
+ }
+
+ return nil
+
+ })
+}
+
+func writeToBuffer(client volume_server_pb.VolumeServer_CopyFileClient, buf *bytes.Buffer) error {
+ for {
+ resp, receiveErr := client.Recv()
+ if receiveErr == io.EOF {
+ break
+ }
+ if receiveErr != nil {
+ return fmt.Errorf("receiving: %v", receiveErr)
+ }
+ buf.Write(resp.FileContent)
+ }
+ return nil
+}
diff --git a/weed/shell/command_volume_configure_replication.go b/weed/shell/command_volume_configure_replication.go
new file mode 100644
index 000000000..e3f034873
--- /dev/null
+++ b/weed/shell/command_volume_configure_replication.go
@@ -0,0 +1,107 @@
+package shell
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeConfigureReplication{})
+}
+
+type commandVolumeConfigureReplication struct {
+}
+
+func (c *commandVolumeConfigureReplication) Name() string {
+ return "volume.configure.replication"
+}
+
+func (c *commandVolumeConfigureReplication) Help() string {
+ return `change volume replication value
+
+ This command changes a volume replication value. It should be followed by "volume.fix.replication".
+
+`
+}
+
+func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ configureReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeIdInt := configureReplicationCommand.Int("volumeId", 0, "the volume id")
+ replicationString := configureReplicationCommand.String("replication", "", "the intended replication value")
+ if err = configureReplicationCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ if *replicationString == "" {
+ return fmt.Errorf("empty replication value")
+ }
+
+ replicaPlacement, err := super_block.NewReplicaPlacementFromString(*replicationString)
+ if err != nil {
+ return fmt.Errorf("replication format: %v", err)
+ }
+ replicaPlacementInt32 := uint32(replicaPlacement.Byte())
+
+ // collect topology information
+ topologyInfo, _, err := collectTopologyInfo(commandEnv)
+ if err != nil {
+ return err
+ }
+
+ vid := needle.VolumeId(*volumeIdInt)
+
+ // find all data nodes with volumes that needs replication change
+ var allLocations []location
+ eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ loc := newLocation(dc, string(rack), dn)
+ for _, diskInfo := range dn.DiskInfos {
+ for _, v := range diskInfo.VolumeInfos {
+ if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 {
+ allLocations = append(allLocations, loc)
+ continue
+ }
+ }
+ }
+ })
+
+ if len(allLocations) == 0 {
+ return fmt.Errorf("no volume needs change")
+ }
+
+ for _, dst := range allLocations {
+ err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, configureErr := volumeServerClient.VolumeConfigure(context.Background(), &volume_server_pb.VolumeConfigureRequest{
+ VolumeId: uint32(vid),
+ Replication: replicaPlacement.String(),
+ })
+ if configureErr != nil {
+ return configureErr
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
diff --git a/weed/shell/command_volume_copy.go b/weed/shell/command_volume_copy.go
index 1c83ba655..85b4bb095 100644
--- a/weed/shell/command_volume_copy.go
+++ b/weed/shell/command_volume_copy.go
@@ -1,7 +1,7 @@
package shell
import (
- "context"
+ "flag"
"fmt"
"io"
@@ -22,7 +22,7 @@ func (c *commandVolumeCopy) Name() string {
func (c *commandVolumeCopy) Help() string {
return `copy a volume from one volume server to another volume server
- volume.copy <source volume server host:port> <target volume server host:port> <volume id>
+ volume.copy -source <source volume server host:port> -target <target volume server host:port> -volumeId <volume id>
This command copies a volume from one volume server to another volume server.
Usually you will want to unmount the volume first before copying.
@@ -32,22 +32,26 @@ func (c *commandVolumeCopy) Help() string {
func (c *commandVolumeCopy) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- if len(args) != 3 {
- fmt.Fprintf(writer, "received args: %+v\n", args)
- return fmt.Errorf("need 3 args of <source volume server host:port> <target volume server host:port> <volume id>")
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
}
- sourceVolumeServer, targetVolumeServer, volumeIdString := args[0], args[1], args[2]
- volumeId, err := needle.NewVolumeId(volumeIdString)
- if err != nil {
- return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
+ volCopyCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeIdInt := volCopyCommand.Int("volumeId", 0, "the volume id")
+ sourceNodeStr := volCopyCommand.String("source", "", "the source volume server <host>:<port>")
+ targetNodeStr := volCopyCommand.String("target", "", "the target volume server <host>:<port>")
+ if err = volCopyCommand.Parse(args); err != nil {
+ return nil
}
+ sourceVolumeServer, targetVolumeServer := *sourceNodeStr, *targetNodeStr
+
+ volumeId := needle.VolumeId(*volumeIdInt)
+
if sourceVolumeServer == targetVolumeServer {
return fmt.Errorf("source and target volume servers are the same!")
}
- ctx := context.Background()
- _, err = copyVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer)
+ _, err = copyVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, "")
return
}
diff --git a/weed/shell/command_volume_delete.go b/weed/shell/command_volume_delete.go
index 17d27ea3a..187caa1a4 100644
--- a/weed/shell/command_volume_delete.go
+++ b/weed/shell/command_volume_delete.go
@@ -1,8 +1,7 @@
package shell
import (
- "context"
- "fmt"
+ "flag"
"io"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -22,7 +21,7 @@ func (c *commandVolumeDelete) Name() string {
func (c *commandVolumeDelete) Help() string {
return `delete a live volume from one volume server
- volume.delete <volume server host:port> <volume id>
+ volume.delete -node <volume server host:port> -volumeId <volume id>
This command deletes a volume from one volume server.
@@ -31,18 +30,21 @@ func (c *commandVolumeDelete) Help() string {
func (c *commandVolumeDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- if len(args) != 2 {
- fmt.Fprintf(writer, "received args: %+v\n", args)
- return fmt.Errorf("need 2 args of <volume server host:port> <volume id>")
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
}
- sourceVolumeServer, volumeIdString := args[0], args[1]
- volumeId, err := needle.NewVolumeId(volumeIdString)
- if err != nil {
- return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
+ volDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeIdInt := volDeleteCommand.Int("volumeId", 0, "the volume id")
+ nodeStr := volDeleteCommand.String("node", "", "the volume server <host>:<port>")
+ if err = volDeleteCommand.Parse(args); err != nil {
+ return nil
}
- ctx := context.Background()
- return deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
+ sourceVolumeServer := *nodeStr
+
+ volumeId := needle.VolumeId(*volumeIdInt)
+
+ return deleteVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
}
diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go
index 6f35dd5d2..538351fd0 100644
--- a/weed/shell/command_volume_fix_replication.go
+++ b/weed/shell/command_volume_fix_replication.go
@@ -2,9 +2,12 @@ package shell
import (
"context"
+ "flag"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"io"
- "math/rand"
+ "path/filepath"
"sort"
"github.com/chrislusf/seaweedfs/weed/operation"
@@ -18,6 +21,7 @@ func init() {
}
type commandVolumeFixReplication struct {
+ collectionPattern *string
}
func (c *commandVolumeFixReplication) Name() string {
@@ -27,16 +31,19 @@ func (c *commandVolumeFixReplication) Name() string {
func (c *commandVolumeFixReplication) Help() string {
return `add replicas to volumes that are missing replicas
- This command file all under-replicated volumes, and find volume servers with free slots.
+ This command finds all over-replicated volumes. If found, it will purge the oldest copies and stop.
+
+ This command also finds all under-replicated volumes, and finds volume servers with free slots.
If the free slots satisfy the replication requirement, the volume content is copied over and mounted.
- volume.fix.replication -n # do not take action
- volume.fix.replication # actually copying the volume files and mount the volume
+ volume.fix.replication -n # do not take action
+ volume.fix.replication # actually deleting or copying the volume files and mount the volume
+ volume.fix.replication -collectionPattern=important* # fix any collections with prefix "important"
Note:
- * each time this will only add back one replica for one volume id. If there are multiple replicas
- are missing, e.g. multiple volume servers are new, you may need to run this multiple times.
- * do not run this too quick within seconds, since the new volume replica may take a few seconds
+ * each time this will only add back one replica for each volume id that is under replicated.
+ If there are multiple replicas are missing, e.g. replica count is > 2, you may need to run this multiple times.
+ * do not run this too quickly within seconds, since the new volume replica may take a few seconds
to register itself to the master.
`
@@ -44,81 +51,151 @@ func (c *commandVolumeFixReplication) Help() string {
func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- takeAction := true
- if len(args) > 0 && args[0] == "-n" {
- takeAction = false
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
}
- var resp *master_pb.VolumeListResponse
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
- return err
- })
+ volFixReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.collectionPattern = volFixReplicationCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
+ skipChange := volFixReplicationCommand.Bool("n", false, "skip the changes")
+ if err = volFixReplicationCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ takeAction := !*skipChange
+
+ // collect topology information
+ topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
// find all volumes that needs replication
// collect all data nodes
- replicatedVolumeLocations := make(map[uint32][]location)
- replicatedVolumeInfo := make(map[uint32]*master_pb.VolumeInformationMessage)
+ volumeReplicas, allLocations := collectVolumeReplicaLocations(topologyInfo)
+
+ if len(allLocations) == 0 {
+ return fmt.Errorf("no data nodes at all")
+ }
+
+ // find all under replicated volumes
+ var underReplicatedVolumeIds, overReplicatedVolumeIds []uint32
+ for vid, replicas := range volumeReplicas {
+ replica := replicas[0]
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(replica.info.ReplicaPlacement))
+ if replicaPlacement.GetCopyCount() > len(replicas) {
+ underReplicatedVolumeIds = append(underReplicatedVolumeIds, vid)
+ } else if replicaPlacement.GetCopyCount() < len(replicas) {
+ overReplicatedVolumeIds = append(overReplicatedVolumeIds, vid)
+ fmt.Fprintf(writer, "volume %d replication %s, but over replicated %+d\n", replica.info.Id, replicaPlacement, len(replicas))
+ }
+ }
+
+ if len(overReplicatedVolumeIds) > 0 {
+ return c.fixOverReplicatedVolumes(commandEnv, writer, takeAction, overReplicatedVolumeIds, volumeReplicas, allLocations)
+ }
+
+ if len(underReplicatedVolumeIds) == 0 {
+ return nil
+ }
+
+ // find the most under populated data nodes
+ return c.fixUnderReplicatedVolumes(commandEnv, writer, takeAction, underReplicatedVolumeIds, volumeReplicas, allLocations)
+
+}
+
+func collectVolumeReplicaLocations(topologyInfo *master_pb.TopologyInfo) (map[uint32][]*VolumeReplica, []location) {
+ volumeReplicas := make(map[uint32][]*VolumeReplica)
var allLocations []location
- eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
loc := newLocation(dc, string(rack), dn)
- for _, v := range dn.VolumeInfos {
- if v.ReplicaPlacement > 0 {
- replicatedVolumeLocations[v.Id] = append(replicatedVolumeLocations[v.Id], loc)
- replicatedVolumeInfo[v.Id] = v
+ for _, diskInfo := range dn.DiskInfos {
+ for _, v := range diskInfo.VolumeInfos {
+ volumeReplicas[v.Id] = append(volumeReplicas[v.Id], &VolumeReplica{
+ location: &loc,
+ info: v,
+ })
}
}
allLocations = append(allLocations, loc)
})
+ return volumeReplicas, allLocations
+}
- // find all under replicated volumes
- underReplicatedVolumeLocations := make(map[uint32][]location)
- for vid, locations := range replicatedVolumeLocations {
- volumeInfo := replicatedVolumeInfo[vid]
- replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement))
- if replicaPlacement.GetCopyCount() > len(locations) {
- underReplicatedVolumeLocations[vid] = locations
+func (c *commandVolumeFixReplication) fixOverReplicatedVolumes(commandEnv *CommandEnv, writer io.Writer, takeAction bool, overReplicatedVolumeIds []uint32, volumeReplicas map[uint32][]*VolumeReplica, allLocations []location) error {
+ for _, vid := range overReplicatedVolumeIds {
+ replicas := volumeReplicas[vid]
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(replicas[0].info.ReplicaPlacement))
+
+ replica := pickOneReplicaToDelete(replicas, replicaPlacement)
+
+ // check collection name pattern
+ if *c.collectionPattern != "" {
+ matched, err := filepath.Match(*c.collectionPattern, replica.info.Collection)
+ if err != nil {
+ return fmt.Errorf("match pattern %s with collection %s: %v", *c.collectionPattern, replica.info.Collection, err)
+ }
+ if !matched {
+ break
+ }
}
- }
- if len(underReplicatedVolumeLocations) == 0 {
- return fmt.Errorf("no under replicated volumes")
- }
+ fmt.Fprintf(writer, "deleting volume %d from %s ...\n", replica.info.Id, replica.location.dataNode.Id)
+
+ if !takeAction {
+ break
+ }
+
+ if err := deleteVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(replica.info.Id), replica.location.dataNode.Id); err != nil {
+ return fmt.Errorf("deleting volume %d from %s : %v", replica.info.Id, replica.location.dataNode.Id, err)
+ }
- if len(allLocations) == 0 {
- return fmt.Errorf("no data nodes at all")
}
+ return nil
+}
- // find the most under populated data nodes
- keepDataNodesSorted(allLocations)
+func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *CommandEnv, writer io.Writer, takeAction bool, underReplicatedVolumeIds []uint32, volumeReplicas map[uint32][]*VolumeReplica, allLocations []location) error {
- for vid, locations := range underReplicatedVolumeLocations {
- volumeInfo := replicatedVolumeInfo[vid]
- replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement))
+ for _, vid := range underReplicatedVolumeIds {
+ replicas := volumeReplicas[vid]
+ replica := pickOneReplicaToCopyFrom(replicas)
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(replica.info.ReplicaPlacement))
foundNewLocation := false
+ hasSkippedCollection := false
+ keepDataNodesSorted(allLocations, types.ToDiskType(replica.info.DiskType))
+ fn := capacityByFreeVolumeCount(types.ToDiskType(replica.info.DiskType))
for _, dst := range allLocations {
// check whether data nodes satisfy the constraints
- if dst.dataNode.FreeVolumeCount > 0 && satisfyReplicaPlacement(replicaPlacement, locations, dst) {
+ if fn(dst.dataNode) > 0 && satisfyReplicaPlacement(replicaPlacement, replicas, dst) {
+ // check collection name pattern
+ if *c.collectionPattern != "" {
+ matched, err := filepath.Match(*c.collectionPattern, replica.info.Collection)
+ if err != nil {
+ return fmt.Errorf("match pattern %s with collection %s: %v", *c.collectionPattern, replica.info.Collection, err)
+ }
+ if !matched {
+ hasSkippedCollection = true
+ break
+ }
+ }
+
// ask the volume server to replicate the volume
- sourceNodes := underReplicatedVolumeLocations[vid]
- sourceNode := sourceNodes[rand.Intn(len(sourceNodes))]
foundNewLocation = true
- fmt.Fprintf(writer, "replicating volume %d %s from %s to dataNode %s ...\n", volumeInfo.Id, replicaPlacement, sourceNode.dataNode.Id, dst.dataNode.Id)
+ fmt.Fprintf(writer, "replicating volume %d %s from %s to dataNode %s ...\n", replica.info.Id, replicaPlacement, replica.location.dataNode.Id, dst.dataNode.Id)
if !takeAction {
break
}
err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{
- VolumeId: volumeInfo.Id,
- SourceDataNode: sourceNode.dataNode.Id,
+ _, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
+ VolumeId: replica.info.Id,
+ SourceDataNode: replica.location.dataNode.Id,
})
- return replicateErr
+ if replicateErr != nil {
+ return fmt.Errorf("copying from %s => %s : %v", replica.location.dataNode.Id, dst.dataNode.Id, replicateErr)
+ }
+ return nil
})
if err != nil {
@@ -126,54 +203,152 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
}
// adjust free volume count
- dst.dataNode.FreeVolumeCount--
- keepDataNodesSorted(allLocations)
+ dst.dataNode.DiskInfos[replica.info.DiskType].FreeVolumeCount--
break
}
}
- if !foundNewLocation {
- fmt.Fprintf(writer, "failed to place volume %d replica as %s, existing:%+v\n", volumeInfo.Id, replicaPlacement, locations)
+
+ if !foundNewLocation && !hasSkippedCollection {
+ fmt.Fprintf(writer, "failed to place volume %d replica as %s, existing:%+v\n", replica.info.Id, replicaPlacement, len(replicas))
}
}
-
return nil
}
-func keepDataNodesSorted(dataNodes []location) {
+func keepDataNodesSorted(dataNodes []location, diskType types.DiskType) {
+ fn := capacityByFreeVolumeCount(diskType)
sort.Slice(dataNodes, func(i, j int) bool {
- return dataNodes[i].dataNode.FreeVolumeCount > dataNodes[j].dataNode.FreeVolumeCount
+ return fn(dataNodes[i].dataNode) > fn(dataNodes[j].dataNode)
})
}
-func satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, existingLocations []location, possibleLocation location) bool {
+/*
+ if on an existing data node {
+ return false
+ }
+ if different from existing dcs {
+ if lack on different dcs {
+ return true
+ }else{
+ return false
+ }
+ }
+ if not on primary dc {
+ return false
+ }
+ if different from existing racks {
+ if lack on different racks {
+ return true
+ }else{
+ return false
+ }
+ }
+ if not on primary rack {
+ return false
+ }
+ if lacks on same rack {
+ return true
+ } else {
+ return false
+ }
+*/
+func satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, replicas []*VolumeReplica, possibleLocation location) bool {
+
+ existingDataCenters, _, existingDataNodes := countReplicas(replicas)
+
+ if _, found := existingDataNodes[possibleLocation.String()]; found {
+ // avoid duplicated volume on the same data node
+ return false
+ }
- existingDataCenters := make(map[string]bool)
- existingRacks := make(map[string]bool)
- existingDataNodes := make(map[string]bool)
- for _, loc := range existingLocations {
- existingDataCenters[loc.DataCenter()] = true
- existingRacks[loc.Rack()] = true
- existingDataNodes[loc.String()] = true
+ primaryDataCenters, _ := findTopKeys(existingDataCenters)
+
+ // ensure data center count is within limit
+ if _, found := existingDataCenters[possibleLocation.DataCenter()]; !found {
+ // different from existing dcs
+ if len(existingDataCenters) < replicaPlacement.DiffDataCenterCount+1 {
+ // lack on different dcs
+ return true
+ } else {
+ // adding this would go over the different dcs limit
+ return false
+ }
+ }
+ // now this is same as one of the existing data center
+ if !isAmong(possibleLocation.DataCenter(), primaryDataCenters) {
+ // not on one of the primary dcs
+ return false
}
- if replicaPlacement.DiffDataCenterCount >= len(existingDataCenters) {
- // check dc, good if different from any existing data centers
- _, found := existingDataCenters[possibleLocation.DataCenter()]
- return !found
- } else if replicaPlacement.DiffRackCount >= len(existingRacks) {
- // check rack, good if different from any existing racks
- _, found := existingRacks[possibleLocation.Rack()]
- return !found
- } else if replicaPlacement.SameRackCount >= len(existingDataNodes) {
- // check data node, good if different from any existing data nodes
- _, found := existingDataNodes[possibleLocation.String()]
- return !found
+ // now this is one of the primary dcs
+ primaryDcRacks := make(map[string]int)
+ for _, replica := range replicas {
+ if replica.location.DataCenter() != possibleLocation.DataCenter() {
+ continue
+ }
+ primaryDcRacks[replica.location.Rack()] += 1
+ }
+ primaryRacks, _ := findTopKeys(primaryDcRacks)
+ sameRackCount := primaryDcRacks[possibleLocation.Rack()]
+
+ // ensure rack count is within limit
+ if _, found := primaryDcRacks[possibleLocation.Rack()]; !found {
+ // different from existing racks
+ if len(primaryDcRacks) < replicaPlacement.DiffRackCount+1 {
+ // lack on different racks
+ return true
+ } else {
+ // adding this would go over the different racks limit
+ return false
+ }
}
+ // now this is same as one of the existing racks
+ if !isAmong(possibleLocation.Rack(), primaryRacks) {
+ // not on the primary rack
+ return false
+ }
+
+ // now this is on the primary rack
+
+ // different from existing data nodes
+ if sameRackCount < replicaPlacement.SameRackCount+1 {
+ // lack on same rack
+ return true
+ } else {
+ // adding this would go over the same data node limit
+ return false
+ }
+
+}
+
+func findTopKeys(m map[string]int) (topKeys []string, max int) {
+ for k, c := range m {
+ if max < c {
+ topKeys = topKeys[:0]
+ topKeys = append(topKeys, k)
+ max = c
+ } else if max == c {
+ topKeys = append(topKeys, k)
+ }
+ }
+ return
+}
+func isAmong(key string, keys []string) bool {
+ for _, k := range keys {
+ if k == key {
+ return true
+ }
+ }
return false
}
+type VolumeReplica struct {
+ location *location
+ info *master_pb.VolumeInformationMessage
+}
+
type location struct {
dc string
rack string
@@ -199,3 +374,45 @@ func (l location) Rack() string {
func (l location) DataCenter() string {
return l.dc
}
+
+func pickOneReplicaToCopyFrom(replicas []*VolumeReplica) *VolumeReplica {
+ mostRecent := replicas[0]
+ for _, replica := range replicas {
+ if replica.info.ModifiedAtSecond > mostRecent.info.ModifiedAtSecond {
+ mostRecent = replica
+ }
+ }
+ return mostRecent
+}
+
+func countReplicas(replicas []*VolumeReplica) (diffDc, diffRack, diffNode map[string]int) {
+ diffDc = make(map[string]int)
+ diffRack = make(map[string]int)
+ diffNode = make(map[string]int)
+ for _, replica := range replicas {
+ diffDc[replica.location.DataCenter()] += 1
+ diffRack[replica.location.Rack()] += 1
+ diffNode[replica.location.String()] += 1
+ }
+ return
+}
+
+func pickOneReplicaToDelete(replicas []*VolumeReplica, replicaPlacement *super_block.ReplicaPlacement) *VolumeReplica {
+
+ sort.Slice(replicas, func(i, j int) bool {
+ a, b := replicas[i], replicas[j]
+ if a.info.CompactRevision != b.info.CompactRevision {
+ return a.info.CompactRevision < b.info.CompactRevision
+ }
+ if a.info.ModifiedAtSecond != b.info.ModifiedAtSecond {
+ return a.info.ModifiedAtSecond < b.info.ModifiedAtSecond
+ }
+ if a.info.Size != b.info.Size {
+ return a.info.Size < b.info.Size
+ }
+ return false
+ })
+
+ return replicas[0]
+
+}
diff --git a/weed/shell/command_volume_fix_replication_test.go b/weed/shell/command_volume_fix_replication_test.go
new file mode 100644
index 000000000..bb61be1ef
--- /dev/null
+++ b/weed/shell/command_volume_fix_replication_test.go
@@ -0,0 +1,273 @@
+package shell
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+)
+
+type testcase struct {
+ name string
+ replication string
+ replicas []*VolumeReplica
+ possibleLocation location
+ expected bool
+}
+
+func TestSatisfyReplicaPlacementComplicated(t *testing.T) {
+
+ var tests = []testcase{
+ {
+ name: "test 100 negative",
+ replication: "100",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: false,
+ },
+ {
+ name: "test 100 positive",
+ replication: "100",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ },
+ possibleLocation: location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: true,
+ },
+ {
+ name: "test 022 positive",
+ replication: "022",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ {
+ location: &location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: true,
+ },
+ {
+ name: "test 022 negative",
+ replication: "022",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ {
+ location: &location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r4", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ {
+ name: "test 210 moved from 200 positive",
+ replication: "210",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ {
+ location: &location{"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r4", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: true,
+ },
+ {
+ name: "test 210 moved from 200 negative extra dc",
+ replication: "210",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ {
+ location: &location{"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ },
+ possibleLocation: location{"dc4", "r4", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ {
+ name: "test 210 moved from 200 negative extra data node",
+ replication: "210",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ {
+ location: &location{"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ }
+
+ runTests(tests, t)
+
+}
+
+func TestSatisfyReplicaPlacement01x(t *testing.T) {
+
+ var tests = []testcase{
+ {
+ name: "test 011 same existing rack",
+ replication: "011",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+ {
+ name: "test 011 negative",
+ replication: "011",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: false,
+ },
+ {
+ name: "test 011 different existing racks",
+ replication: "011",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+ {
+ name: "test 011 different existing racks negative",
+ replication: "011",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: false,
+ },
+ }
+
+ runTests(tests, t)
+
+}
+
+func TestSatisfyReplicaPlacement00x(t *testing.T) {
+
+ var tests = []testcase{
+ {
+ name: "test 001",
+ replication: "001",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: true,
+ },
+ {
+ name: "test 002 positive",
+ replication: "002",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+ {
+ name: "test 002 negative, repeat the same node",
+ replication: "002",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: false,
+ },
+ {
+ name: "test 002 negative, enough node already",
+ replication: "002",
+ replicas: []*VolumeReplica{
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ {
+ location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ }
+
+ runTests(tests, t)
+
+}
+
+func runTests(tests []testcase, t *testing.T) {
+ for _, tt := range tests {
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication)
+ println("replication:", tt.replication, "expected", tt.expected, "name:", tt.name)
+ if satisfyReplicaPlacement(replicaPlacement, tt.replicas, tt.possibleLocation) != tt.expected {
+ t.Errorf("%s: expect %v add %v to %s %+v",
+ tt.name, tt.expected, tt.possibleLocation, tt.replication, tt.replicas)
+ }
+ }
+}
diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
new file mode 100644
index 000000000..1fbc9ad35
--- /dev/null
+++ b/weed/shell/command_volume_fsck.go
@@ -0,0 +1,372 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeFsck{})
+}
+
+type commandVolumeFsck struct {
+ env *CommandEnv
+}
+
+func (c *commandVolumeFsck) Name() string {
+ return "volume.fsck"
+}
+
+func (c *commandVolumeFsck) Help() string {
+ return `check all volumes to find entries not used by the filer
+
+ Important assumption!!!
+ the system is all used by one filer.
+
+ This command works this way:
+ 1. collect all file ids from all volumes, as set A
+ 2. collect all file ids from the filer, as set B
+ 3. find out the set A subtract B
+
+`
+}
+
+func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ verbose := fsckCommand.Bool("v", false, "verbose mode")
+ applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, "<expert only> delete data not referenced by the filer")
+ if err = fsckCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ c.env = commandEnv
+
+ // create a temp folder
+ tempFolder, err := ioutil.TempDir("", "sw_fsck")
+ if err != nil {
+ return fmt.Errorf("failed to create temp folder: %v", err)
+ }
+ if *verbose {
+ fmt.Fprintf(writer, "working directory: %s\n", tempFolder)
+ }
+ defer os.RemoveAll(tempFolder)
+
+ // collect all volume id locations
+ volumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
+ if err != nil {
+ return fmt.Errorf("failed to collect all volume locations: %v", err)
+ }
+
+ // collect each volume file ids
+ for volumeId, vinfo := range volumeIdToVInfo {
+ err = c.collectOneVolumeFileIds(tempFolder, volumeId, vinfo, *verbose, writer)
+ if err != nil {
+ return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
+ }
+ }
+
+ // collect all filer file ids
+ if err = c.collectFilerFileIds(tempFolder, volumeIdToVInfo, *verbose, writer); err != nil {
+ return fmt.Errorf("failed to collect file ids from filer: %v", err)
+ }
+
+ // volume file ids substract filer file ids
+ var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
+ for volumeId, vinfo := range volumeIdToVInfo {
+ inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, volumeId, writer, *verbose)
+ if checkErr != nil {
+ return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
+ }
+ totalInUseCount += inUseCount
+ totalOrphanChunkCount += uint64(len(orphanFileIds))
+ totalOrphanDataSize += orphanDataSize
+
+ if *verbose {
+ for _, fid := range orphanFileIds {
+ fmt.Fprintf(writer, "%sxxxxxxxx\n", fid)
+ }
+ }
+
+ if *applyPurging && len(orphanFileIds) > 0 {
+ if vinfo.isEcVolume {
+ fmt.Fprintf(writer, "Skip purging for Erasure Coded volumes.\n")
+ }
+ if err = c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
+ return fmt.Errorf("purge for volume %d: %v\n", volumeId, err)
+ }
+ }
+ }
+
+ if totalOrphanChunkCount == 0 {
+ fmt.Fprintf(writer, "no orphan data\n")
+ return nil
+ }
+
+ if !*applyPurging {
+ pct := float64(totalOrphanChunkCount*100) / (float64(totalOrphanChunkCount + totalInUseCount))
+ fmt.Fprintf(writer, "\nTotal\t\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
+ totalOrphanChunkCount+totalInUseCount, totalOrphanChunkCount, pct, totalOrphanDataSize)
+
+ fmt.Fprintf(writer, "This could be normal if multiple filers or no filers are used.\n")
+ }
+
+ return nil
+}
+
+func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
+
+ if verbose {
+ fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
+ }
+
+ return operation.WithVolumeServerClient(vinfo.server, c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+
+ ext := ".idx"
+ if vinfo.isEcVolume {
+ ext = ".ecx"
+ }
+
+ copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
+ VolumeId: volumeId,
+ Ext: ext,
+ CompactionRevision: math.MaxUint32,
+ StopOffset: math.MaxInt64,
+ Collection: vinfo.collection,
+ IsEcVolume: vinfo.isEcVolume,
+ IgnoreSourceFileNotFound: false,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
+ }
+
+ err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, volumeId))
+ if err != nil {
+ return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, vinfo.server, err)
+ }
+
+ return nil
+
+ })
+
+}
+
+func (c *commandVolumeFsck) collectFilerFileIds(tempFolder string, volumeIdToServer map[uint32]VInfo, verbose bool, writer io.Writer) error {
+
+ if verbose {
+ fmt.Fprintf(writer, "collecting file ids from filer ...\n")
+ }
+
+ files := make(map[uint32]*os.File)
+ for vid := range volumeIdToServer {
+ dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if openErr != nil {
+ return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+ }
+ files[vid] = dst
+ }
+ defer func() {
+ for _, f := range files {
+ f.Close()
+ }
+ }()
+
+ type Item struct {
+ vid uint32
+ fileKey uint64
+ }
+ return doTraverseBfsAndSaving(c.env, nil, "/", false, func(outputChan chan interface{}) {
+ buffer := make([]byte, 8)
+ for item := range outputChan {
+ i := item.(*Item)
+ util.Uint64toBytes(buffer, i.fileKey)
+ files[i.vid].Write(buffer)
+ }
+ }, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
+ dChunks, mChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.Chunks)
+ if resolveErr != nil {
+ return nil
+ }
+ dChunks = append(dChunks, mChunks...)
+ for _, chunk := range dChunks {
+ outputChan <- &Item{
+ vid: chunk.Fid.VolumeId,
+ fileKey: chunk.Fid.FileKey,
+ }
+ }
+ return nil
+ })
+}
+
+func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
+
+ db := needle_map.NewMemDb()
+ defer db.Close()
+
+ if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
+ return
+ }
+
+ filerFileIdsData, err := ioutil.ReadFile(getFilerFileIdFile(tempFolder, volumeId))
+ if err != nil {
+ return
+ }
+
+ dataLen := len(filerFileIdsData)
+ if dataLen%8 != 0 {
+ return 0, nil, 0, fmt.Errorf("filer data is corrupted")
+ }
+
+ for i := 0; i < len(filerFileIdsData); i += 8 {
+ fileKey := util.BytesToUint64(filerFileIdsData[i : i+8])
+ db.Delete(types.NeedleId(fileKey))
+ inUseCount++
+ }
+
+ var orphanFileCount uint64
+ db.AscendingVisit(func(n needle_map.NeedleValue) error {
+ // fmt.Printf("%d,%x\n", volumeId, n.Key)
+ orphanFileIds = append(orphanFileIds, fmt.Sprintf("%d,%s", volumeId, n.Key.String()))
+ orphanFileCount++
+ orphanDataSize += uint64(n.Size)
+ return nil
+ })
+
+ if orphanFileCount > 0 {
+ pct := float64(orphanFileCount*100) / (float64(orphanFileCount + inUseCount))
+ fmt.Fprintf(writer, "volume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
+ volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
+ }
+
+ return
+
+}
+
+type VInfo struct {
+ server string
+ collection string
+ isEcVolume bool
+}
+
+func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) {
+
+ if verbose {
+ fmt.Fprintf(writer, "collecting volume id and locations from master ...\n")
+ }
+
+ volumeIdToServer = make(map[uint32]VInfo)
+ // collect topology information
+ topologyInfo, _, err := collectTopologyInfo(commandEnv)
+ if err != nil {
+ return
+ }
+
+ eachDataNode(topologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
+ for _, diskInfo := range t.DiskInfos {
+ for _, vi := range diskInfo.VolumeInfos {
+ volumeIdToServer[vi.Id] = VInfo{
+ server: t.Id,
+ collection: vi.Collection,
+ isEcVolume: false,
+ }
+ }
+ for _, ecShardInfo := range diskInfo.EcShardInfos {
+ volumeIdToServer[ecShardInfo.Id] = VInfo{
+ server: t.Id,
+ collection: ecShardInfo.Collection,
+ isEcVolume: true,
+ }
+ }
+ }
+ })
+
+ if verbose {
+ fmt.Fprintf(writer, "collected %d volumes and locations.\n", len(volumeIdToServer))
+ }
+ return
+}
+
+func (c *commandVolumeFsck) purgeFileIdsForOneVolume(volumeId uint32, fileIds []string, writer io.Writer) (err error) {
+ fmt.Fprintf(writer, "purging orphan data for volume %d...\n", volumeId)
+ locations, found := c.env.MasterClient.GetLocations(volumeId)
+ if !found {
+ return fmt.Errorf("failed to find volume %d locations", volumeId)
+ }
+
+ resultChan := make(chan []*volume_server_pb.DeleteResult, len(locations))
+ var wg sync.WaitGroup
+ for _, location := range locations {
+ wg.Add(1)
+ go func(server string, fidList []string) {
+ defer wg.Done()
+
+ if deleteResults, deleteErr := operation.DeleteFilesAtOneVolumeServer(server, c.env.option.GrpcDialOption, fidList, false); deleteErr != nil {
+ err = deleteErr
+ } else if deleteResults != nil {
+ resultChan <- deleteResults
+ }
+
+ }(location.Url, fileIds)
+ }
+ wg.Wait()
+ close(resultChan)
+
+ for results := range resultChan {
+ for _, result := range results {
+ if result.Error != "" {
+ fmt.Fprintf(writer, "purge error: %s\n", result.Error)
+ }
+ }
+ }
+
+ return
+}
+
+func getVolumeFileIdFile(tempFolder string, vid uint32) string {
+ return filepath.Join(tempFolder, fmt.Sprintf("%d.idx", vid))
+}
+
+func getFilerFileIdFile(tempFolder string, vid uint32) string {
+ return filepath.Join(tempFolder, fmt.Sprintf("%d.fid", vid))
+}
+
+func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string) error {
+ flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
+ dst, err := os.OpenFile(fileName, flags, 0644)
+ if err != nil {
+ return nil
+ }
+ defer dst.Close()
+
+ for {
+ resp, receiveErr := client.Recv()
+ if receiveErr == io.EOF {
+ break
+ }
+ if receiveErr != nil {
+ return fmt.Errorf("receiving %s: %v", fileName, receiveErr)
+ }
+ dst.Write(resp.FileContent)
+ }
+ return nil
+}
diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go
index c6c79d150..9856de10b 100644
--- a/weed/shell/command_volume_list.go
+++ b/weed/shell/command_volume_list.go
@@ -1,7 +1,7 @@
package shell
import (
- "context"
+ "bytes"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
@@ -31,22 +31,35 @@ func (c *commandVolumeList) Help() string {
func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- var resp *master_pb.VolumeListResponse
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
- return err
- })
+ // collect topology information
+ topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
- writeTopologyInfo(writer, resp.TopologyInfo, resp.VolumeSizeLimitMb)
+ writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb)
return nil
}
+func diskInfosToString(diskInfos map[string]*master_pb.DiskInfo) string {
+ var buf bytes.Buffer
+ for diskType, diskInfo := range diskInfos {
+ if diskType == "" {
+ diskType = "hdd"
+ }
+ fmt.Fprintf(&buf, " %s(volume:%d/%d active:%d free:%d remote:%d)", diskType, diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
+ }
+ return buf.String()
+}
+
+func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "volume:%d/%d active:%d free:%d remote:%d", diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
+ return buf.String()
+}
+
func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64) statistics {
- fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d remote:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount, volumeSizeLimitMb)
+ fmt.Fprintf(writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
sort.Slice(t.DataCenterInfos, func(i, j int) bool {
return t.DataCenterInfos[i].Id < t.DataCenterInfos[j].Id
})
@@ -58,7 +71,7 @@ func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLi
return s
}
func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics {
- fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
+ fmt.Fprintf(writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
sort.Slice(t.RackInfos, func(i, j int) bool {
return t.RackInfos[i].Id < t.RackInfos[j].Id
@@ -70,7 +83,7 @@ func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statisti
return s
}
func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics {
- fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
+ fmt.Fprintf(writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
sort.Slice(t.DataNodeInfos, func(i, j int) bool {
return t.DataNodeInfos[i].Id < t.DataNodeInfos[j].Id
@@ -82,8 +95,22 @@ func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics {
return s
}
func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics {
- fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
+ fmt.Fprintf(writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
+ var s statistics
+ for _, diskInfo := range t.DiskInfos {
+ s = s.plus(writeDiskInfo(writer, diskInfo))
+ }
+ fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s)
+ return s
+}
+
+func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo) statistics {
var s statistics
+ diskType := t.Type
+ if diskType == "" {
+ diskType = "hdd"
+ }
+ fmt.Fprintf(writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
sort.Slice(t.VolumeInfos, func(i, j int) bool {
return t.VolumeInfos[i].Id < t.VolumeInfos[j].Id
})
@@ -91,13 +118,14 @@ func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics {
s = s.plus(writeVolumeInformationMessage(writer, vi))
}
for _, ecShardInfo := range t.EcShardInfos {
- fmt.Fprintf(writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
+ fmt.Fprintf(writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
}
- fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s)
+ fmt.Fprintf(writer, " Disk %s %+v \n", diskType, s)
return s
}
+
func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) statistics {
- fmt.Fprintf(writer, " volume %+v \n", t)
+ fmt.Fprintf(writer, " volume %+v \n", t)
return newStatistics(t)
}
diff --git a/weed/shell/command_volume_list_test.go b/weed/shell/command_volume_list_test.go
new file mode 100644
index 000000000..72c76f242
--- /dev/null
+++ b/weed/shell/command_volume_list_test.go
@@ -0,0 +1,893 @@
+package shell
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/golang/protobuf/proto"
+ "github.com/stretchr/testify/assert"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+)
+
+func TestParsing(t *testing.T) {
+ topo := parseOutput(topoData)
+
+ assert.Equal(t, 5, len(topo.DataCenterInfos))
+
+}
+
+func parseOutput(output string) *master_pb.TopologyInfo {
+ lines := strings.Split(output, "\n")
+ var topo *master_pb.TopologyInfo
+ var dc *master_pb.DataCenterInfo
+ var rack *master_pb.RackInfo
+ var dn *master_pb.DataNodeInfo
+ var disk *master_pb.DiskInfo
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ parts := strings.Split(line, " ")
+ switch parts[0] {
+ case "Topology":
+ if topo == nil {
+ topo = &master_pb.TopologyInfo{}
+ }
+ case "DataCenter":
+ if dc == nil {
+ dc = &master_pb.DataCenterInfo{
+ Id: parts[1],
+ }
+ topo.DataCenterInfos = append(topo.DataCenterInfos, dc)
+ } else {
+ dc = nil
+ }
+ case "Rack":
+ if rack == nil {
+ rack = &master_pb.RackInfo{
+ Id: parts[1],
+ }
+ dc.RackInfos = append(dc.RackInfos, rack)
+ } else {
+ rack = nil
+ }
+ case "DataNode":
+ if dn == nil {
+ dn = &master_pb.DataNodeInfo{
+ Id: parts[1],
+ DiskInfos: make(map[string]*master_pb.DiskInfo),
+ }
+ rack.DataNodeInfos = append(rack.DataNodeInfos, dn)
+ } else {
+ dn = nil
+ }
+ case "Disk":
+ if disk == nil {
+ diskType := parts[1][:strings.Index(parts[1], "(")]
+ maxVolumeCountStr := parts[1][strings.Index(parts[1], "/")+1:]
+ maxVolumeCount, _ := strconv.Atoi(maxVolumeCountStr)
+ disk = &master_pb.DiskInfo{
+ Type: diskType,
+ MaxVolumeCount: uint64(maxVolumeCount),
+ }
+ dn.DiskInfos[types.ToDiskType(diskType).String()] = disk
+ } else {
+ disk = nil
+ }
+ case "volume":
+ volumeLine := line[len("volume "):]
+ volume := &master_pb.VolumeInformationMessage{}
+ proto.UnmarshalText(volumeLine, volume)
+ disk.VolumeInfos = append(disk.VolumeInfos, volume)
+ }
+ }
+
+ return topo
+}
+
+const topoData = `
+Topology volumeSizeLimit:1024 MB hdd(volume:760/7280 active:760 free:6520 remote:0)
+ DataCenter dc1 hdd(volume:0/0 active:0 free:0 remote:0)
+ Rack DefaultRack hdd(volume:0/0 active:0 free:0 remote:0)
+ Rack DefaultRack total size:0 file_count:0
+ DataCenter dc1 total size:0 file_count:0
+ DataCenter dc2 hdd(volume:86/430 active:86 free:344 remote:0)
+ Rack rack1 hdd(volume:50/240 active:50 free:190 remote:0)
+ DataNode 192.168.1.4:8080 hdd(volume:50/240 active:50 free:190 remote:0)
+ Disk hdd(volume:50/240 active:50 free:190 remote:0)
+ volume id:15 size:1115965064 collection:"collection0" file_count:83 replica_placement:100 version:3 modified_at_second:1609923671
+ volume id:21 size:1097631536 collection:"collection0" file_count:82 delete_count:7 deleted_byte_count:68975485 replica_placement:100 version:3 modified_at_second:1609929578
+ volume id:22 size:1086828272 collection:"collection0" file_count:75 replica_placement:100 version:3 modified_at_second:1609930001
+ volume id:23 size:1076380216 collection:"collection0" file_count:68 replica_placement:100 version:3 modified_at_second:1609930434
+ volume id:24 size:1074139776 collection:"collection0" file_count:90 replica_placement:100 version:3 modified_at_second:1609930909
+ volume id:25 size:690757512 collection:"collection0" file_count:38 replica_placement:100 version:3 modified_at_second:1611144216
+ volume id:27 size:298886792 file_count:1608 replica_placement:100 version:3 modified_at_second:1615632482
+ volume id:28 size:308919192 file_count:1591 delete_count:1 deleted_byte_count:125280 replica_placement:100 version:3 modified_at_second:1615631762
+ volume id:29 size:281582680 file_count:1537 replica_placement:100 version:3 modified_at_second:1615629422
+ volume id:30 size:289466144 file_count:1566 delete_count:1 deleted_byte_count:124972 replica_placement:100 version:3 modified_at_second:1615632422
+ volume id:31 size:273363256 file_count:1498 replica_placement:100 version:3 modified_at_second:1615631642
+ volume id:33 size:1130226400 collection:"collection1" file_count:7322 delete_count:172 deleted_byte_count:45199399 replica_placement:100 version:3 modified_at_second:1615618789
+ volume id:38 size:1075545744 collection:"collection1" file_count:13324 delete_count:100 deleted_byte_count:25223906 replica_placement:100 version:3 modified_at_second:1615569830
+ volume id:51 size:1076796120 collection:"collection1" file_count:10550 delete_count:39 deleted_byte_count:12723654 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615547786
+ volume id:52 size:1083529728 collection:"collection1" file_count:10128 delete_count:32 deleted_byte_count:10608391 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615599195
+ volume id:54 size:1045022344 collection:"collection1" file_count:9408 delete_count:30 deleted_byte_count:15132106 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630812
+ volume id:63 size:956941112 collection:"collection1" file_count:8271 delete_count:32 deleted_byte_count:15876189 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632036
+ volume id:69 size:869213648 collection:"collection1" file_count:7293 delete_count:102 deleted_byte_count:30643207 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630534
+ volume id:74 size:957046128 collection:"collection1" file_count:6982 delete_count:258 deleted_byte_count:73054259 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631460
+ volume id:80 size:827912928 collection:"collection1" file_count:6914 delete_count:17 deleted_byte_count:5689635 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631157
+ volume id:84 size:873121856 collection:"collection1" file_count:8200 delete_count:13 deleted_byte_count:3131676 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631161
+ volume id:85 size:1023869320 collection:"collection1" file_count:7788 delete_count:234 deleted_byte_count:78037967 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631723
+ volume id:97 size:1053112992 collection:"collection1" file_count:6789 delete_count:50 deleted_byte_count:38894001 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631193
+ volume id:98 size:1077836440 collection:"collection1" file_count:7605 delete_count:202 deleted_byte_count:73180379 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615523691
+ volume id:105 size:1073996824 collection:"collection1" file_count:6872 delete_count:20 deleted_byte_count:14482293 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615499757
+ volume id:106 size:1075458664 collection:"collection1" file_count:7182 delete_count:307 deleted_byte_count:69349053 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615598137
+ volume id:112 size:1076392512 collection:"collection1" file_count:8291 delete_count:156 deleted_byte_count:74120183 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569823
+ volume id:116 size:1074489504 collection:"collection1" file_count:9981 delete_count:174 deleted_byte_count:53998777 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615611565
+ volume id:119 size:1075940104 collection:"collection1" file_count:9003 delete_count:12 deleted_byte_count:9128155 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573878
+ volume id:128 size:1074874632 collection:"collection1" file_count:9821 delete_count:148 deleted_byte_count:43633334 replica_placement:100 version:3 modified_at_second:1615602670
+ volume id:133 size:1075952760 collection:"collection1" file_count:9538 delete_count:74 deleted_byte_count:19558008 replica_placement:100 version:3 modified_at_second:1615584779
+ volume id:136 size:1074433552 collection:"collection1" file_count:9593 delete_count:72 deleted_byte_count:26912512 replica_placement:100 version:3 modified_at_second:1615376036
+ volume id:138 size:1074465744 collection:"collection1" file_count:10120 delete_count:55 deleted_byte_count:15875438 replica_placement:100 version:3 modified_at_second:1615572231
+ volume id:140 size:1076203744 collection:"collection1" file_count:11219 delete_count:57 deleted_byte_count:19864498 replica_placement:100 version:3 modified_at_second:1615571947
+ volume id:144 size:1074549720 collection:"collection1" file_count:8780 delete_count:50 deleted_byte_count:52475146 replica_placement:100 version:3 modified_at_second:1615573451
+ volume id:161 size:1077397192 collection:"collection1" file_count:9988 delete_count:28 deleted_byte_count:12509164 replica_placement:100 version:3 modified_at_second:1615631452
+ volume id:173 size:1074154704 collection:"collection1" file_count:30884 delete_count:34 deleted_byte_count:2578509 replica_placement:100 version:3 modified_at_second:1615591904
+ volume id:174 size:1073824232 collection:"collection1" file_count:30689 delete_count:36 deleted_byte_count:2160116 replica_placement:100 version:3 modified_at_second:1615598914
+ volume id:197 size:1075423240 collection:"collection1" file_count:16473 delete_count:15 deleted_byte_count:12552442 replica_placement:100 version:3 modified_at_second:1615485254
+ volume id:219 size:1092298904 collection:"collection1" file_count:3193 delete_count:17 deleted_byte_count:2047576 replica_placement:100 version:3 modified_at_second:1615579316
+ volume id:263 size:1077167352 collection:"collection2" file_count:20227 delete_count:4 deleted_byte_count:97887 replica_placement:100 version:3 modified_at_second:1614871567
+ volume id:272 size:1076146040 collection:"collection2" file_count:21034 delete_count:2 deleted_byte_count:216564 replica_placement:100 version:3 modified_at_second:1614884139
+ volume id:291 size:1076256760 collection:"collection2" file_count:28301 delete_count:5 deleted_byte_count:116027 replica_placement:100 version:3 modified_at_second:1614904924
+ volume id:299 size:1075147824 collection:"collection2" file_count:22927 delete_count:4 deleted_byte_count:345569 replica_placement:100 version:3 modified_at_second:1614918454
+ volume id:301 size:1074655600 collection:"collection2" file_count:22543 delete_count:6 deleted_byte_count:136968 replica_placement:100 version:3 modified_at_second:1614918378
+ volume id:302 size:1077559792 collection:"collection2" file_count:23124 delete_count:7 deleted_byte_count:293111 replica_placement:100 version:3 modified_at_second:1614925500
+ volume id:339 size:1078402392 collection:"collection2" file_count:22309 replica_placement:100 version:3 modified_at_second:1614969996
+ volume id:345 size:1074560760 collection:"collection2" file_count:22117 delete_count:2 deleted_byte_count:373286 replica_placement:100 version:3 modified_at_second:1614977458
+ volume id:355 size:1075239792 collection:"collection2" file_count:22244 delete_count:1 deleted_byte_count:23282 replica_placement:100 version:3 modified_at_second:1614992157
+ volume id:373 size:1080928000 collection:"collection2" file_count:22617 delete_count:4 deleted_byte_count:91849 replica_placement:100 version:3 modified_at_second:1615016877
+ Disk hdd total size:48630015544 file_count:537880 deleted_file:2580 deleted_bytes:929560253
+ DataNode 192.168.1.4:8080 total size:48630015544 file_count:537880 deleted_file:2580 deleted_bytes:929560253
+ Rack rack1 total size:48630015544 file_count:537880 deleted_file:2580 deleted_bytes:929560253
+ Rack rack2 hdd(volume:36/190 active:36 free:154 remote:0)
+ DataNode 192.168.1.2:8080 hdd(volume:36/190 active:36 free:154 remote:0)
+ Disk hdd(volume:36/190 active:36 free:154 remote:0)
+ volume id:2 size:289228560 file_count:1640 delete_count:4 deleted_byte_count:480564 replica_placement:100 version:3 compact_revision:6 modified_at_second:1615630622
+ volume id:3 size:308743136 file_count:1638 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632242
+ volume id:4 size:285986968 file_count:1641 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632302
+ volume id:6 size:302411024 file_count:1604 delete_count:2 deleted_byte_count:274587 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631402
+ volume id:7 size:1924728 collection:"collection4" file_count:15 replica_placement:100 version:3 modified_at_second:1609331040
+ volume id:9 size:77337416 collection:"collection3" file_count:58 replica_placement:100 version:3 ttl:772 modified_at_second:1615513762
+ volume id:10 size:1212784656 collection:"collection0" file_count:58 replica_placement:100 version:3 modified_at_second:1609814550
+ volume id:12 size:1110923848 collection:"collection0" file_count:45 replica_placement:100 version:3 modified_at_second:1609819732
+ volume id:13 size:1184910656 collection:"collection0" file_count:47 replica_placement:100 version:3 modified_at_second:1609827837
+ volume id:14 size:1107475720 collection:"collection0" file_count:80 delete_count:3 deleted_byte_count:6870 replica_placement:100 version:3 modified_at_second:1612956980
+ volume id:16 size:1113666104 collection:"collection0" file_count:73 delete_count:5 deleted_byte_count:6318 replica_placement:100 version:3 modified_at_second:1612957007
+ volume id:17 size:1095115800 collection:"collection0" file_count:83 delete_count:3 deleted_byte_count:7099 replica_placement:100 version:3 modified_at_second:1612957000
+ volume id:21 size:1097631664 collection:"collection0" file_count:82 delete_count:11 deleted_byte_count:68985100 replica_placement:100 version:3 modified_at_second:1612957007
+ volume id:56 size:1001897616 collection:"collection1" file_count:8762 delete_count:37 deleted_byte_count:65375405 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632014
+ volume id:81 size:880693104 collection:"collection1" file_count:7481 delete_count:236 deleted_byte_count:80386421 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631396
+ volume id:104 size:1076383624 collection:"collection1" file_count:7663 delete_count:184 deleted_byte_count:100728071 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615602658
+ volume id:107 size:1073811840 collection:"collection1" file_count:7436 delete_count:168 deleted_byte_count:57747484 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615293569
+ volume id:113 size:1076709184 collection:"collection1" file_count:9355 delete_count:177 deleted_byte_count:59796765 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569822
+ volume id:139 size:1074163936 collection:"collection1" file_count:9315 delete_count:42 deleted_byte_count:10630966 replica_placement:100 version:3 modified_at_second:1615571946
+ volume id:151 size:1098659752 collection:"collection1" file_count:10808 delete_count:24 deleted_byte_count:7088102 replica_placement:100 version:3 modified_at_second:1615586389
+ volume id:155 size:1075140688 collection:"collection1" file_count:10882 delete_count:32 deleted_byte_count:9076141 replica_placement:100 version:3 modified_at_second:1615606614
+ volume id:167 size:1073958176 collection:"collection1" file_count:25229 delete_count:48 deleted_byte_count:25871565 replica_placement:100 version:3 modified_at_second:1615602669
+ volume id:177 size:1074120216 collection:"collection1" file_count:22293 delete_count:16 deleted_byte_count:3803952 replica_placement:100 version:3 modified_at_second:1615516892
+ volume id:179 size:1074313920 collection:"collection1" file_count:21829 delete_count:24 deleted_byte_count:45552859 replica_placement:100 version:3 modified_at_second:1615580308
+ volume id:182 size:1076131280 collection:"collection1" file_count:31987 delete_count:21 deleted_byte_count:1452346 replica_placement:100 version:3 modified_at_second:1615568922
+ volume id:215 size:1068268216 collection:"collection1" file_count:2813 delete_count:10 deleted_byte_count:5676795 replica_placement:100 version:3 modified_at_second:1615586386
+ volume id:217 size:1075381872 collection:"collection1" file_count:3331 delete_count:14 deleted_byte_count:2009141 replica_placement:100 version:3 modified_at_second:1615401638
+ volume id:283 size:1080178944 collection:"collection2" file_count:19462 delete_count:7 deleted_byte_count:660407 replica_placement:100 version:3 modified_at_second:1614896626
+ volume id:303 size:1075944504 collection:"collection2" file_count:22541 delete_count:2 deleted_byte_count:13617 replica_placement:100 version:3 modified_at_second:1614925431
+ volume id:309 size:1075178624 collection:"collection2" file_count:22692 delete_count:3 deleted_byte_count:171124 replica_placement:100 version:3 modified_at_second:1614931409
+ volume id:323 size:1074608200 collection:"collection2" file_count:21605 delete_count:4 deleted_byte_count:172090 replica_placement:100 version:3 modified_at_second:1614950526
+ volume id:344 size:1075035448 collection:"collection2" file_count:21765 delete_count:1 deleted_byte_count:24623 replica_placement:100 version:3 modified_at_second:1614977465
+ volume id:347 size:1075145496 collection:"collection2" file_count:22178 delete_count:1 deleted_byte_count:79392 replica_placement:100 version:3 modified_at_second:1614984727
+ volume id:357 size:1074276208 collection:"collection2" file_count:23137 delete_count:4 deleted_byte_count:188487 replica_placement:100 version:3 modified_at_second:1614998792
+ volume id:380 size:1010760456 collection:"collection2" file_count:14921 delete_count:6 deleted_byte_count:65678 replica_placement:100 version:3 modified_at_second:1615632322
+ volume id:381 size:939292792 collection:"collection2" file_count:14619 delete_count:2 deleted_byte_count:5119 replica_placement:100 version:3 modified_at_second:1615632324
+ Disk hdd total size:33468194376 file_count:369168 deleted_file:1091 deleted_bytes:546337088
+ DataNode 192.168.1.2:8080 total size:33468194376 file_count:369168 deleted_file:1091 deleted_bytes:546337088
+ Rack rack2 total size:33468194376 file_count:369168 deleted_file:1091 deleted_bytes:546337088
+ DataCenter dc2 total size:82098209920 file_count:907048 deleted_file:3671 deleted_bytes:1475897341
+ DataCenter dc3 hdd(volume:108/1850 active:108 free:1742 remote:0)
+ Rack rack3 hdd(volume:108/1850 active:108 free:1742 remote:0)
+ DataNode 192.168.1.6:8080 hdd(volume:108/1850 active:108 free:1742 remote:0)
+ Disk hdd(volume:108/1850 active:108 free:1742 remote:0)
+ volume id:1 size:284685936 file_count:1557 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632062
+ volume id:32 size:281390512 file_count:1496 delete_count:6 deleted_byte_count:546403 replica_placement:100 version:3 modified_at_second:1615632362
+ volume id:47 size:444599784 collection:"collection1" file_count:709 delete_count:19 deleted_byte_count:11913451 replica_placement:100 version:3 modified_at_second:1615632397
+ volume id:49 size:1078775288 collection:"collection1" file_count:9636 delete_count:22 deleted_byte_count:5625976 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630446
+ volume id:68 size:898630584 collection:"collection1" file_count:6934 delete_count:95 deleted_byte_count:27460707 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284
+ volume id:88 size:1073767976 collection:"collection1" file_count:14995 delete_count:206 deleted_byte_count:81222360 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615629897
+ volume id:202 size:1077533160 collection:"collection1" file_count:2847 delete_count:67 deleted_byte_count:65172985 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615588497
+ volume id:203 size:1027316272 collection:"collection1" file_count:3040 delete_count:11 deleted_byte_count:3993230 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631728
+ volume id:205 size:1078485304 collection:"collection1" file_count:2869 delete_count:43 deleted_byte_count:18290259 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615579314
+ volume id:206 size:1082045848 collection:"collection1" file_count:2979 delete_count:225 deleted_byte_count:88220074 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615564274
+ volume id:209 size:1074083592 collection:"collection1" file_count:3238 delete_count:4 deleted_byte_count:1494244 replica_placement:100 version:3 modified_at_second:1615419954
+ volume id:211 size:1080610712 collection:"collection1" file_count:3247 delete_count:7 deleted_byte_count:1891456 replica_placement:100 version:3 modified_at_second:1615269124
+ volume id:212 size:1078293360 collection:"collection1" file_count:3106 delete_count:6 deleted_byte_count:2085755 replica_placement:100 version:3 modified_at_second:1615586387
+ volume id:213 size:1093587976 collection:"collection1" file_count:3681 delete_count:12 deleted_byte_count:3138791 replica_placement:100 version:3 modified_at_second:1615586387
+ volume id:214 size:1074486992 collection:"collection1" file_count:3217 delete_count:10 deleted_byte_count:6392871 replica_placement:100 version:3 modified_at_second:1615586383
+ volume id:216 size:1080073496 collection:"collection1" file_count:3316 delete_count:4 deleted_byte_count:179819 replica_placement:100 version:3 modified_at_second:1615586387
+ volume id:222 size:1106623104 collection:"collection1" file_count:3273 delete_count:11 deleted_byte_count:2114627 replica_placement:100 version:3 modified_at_second:1615586243
+ volume id:223 size:1075233064 collection:"collection1" file_count:2966 delete_count:9 deleted_byte_count:744001 replica_placement:100 version:3 modified_at_second:1615586244
+ volume id:227 size:1106699896 collection:"collection1" file_count:2827 delete_count:20 deleted_byte_count:5496790 replica_placement:100 version:3 modified_at_second:1615609989
+ volume id:229 size:1109855312 collection:"collection1" file_count:2857 delete_count:22 deleted_byte_count:2839883 replica_placement:100 version:3 modified_at_second:1615609988
+ volume id:230 size:1080722984 collection:"collection1" file_count:2898 delete_count:15 deleted_byte_count:3929261 replica_placement:100 version:3 modified_at_second:1615610537
+ volume id:231 size:1112917696 collection:"collection1" file_count:3151 delete_count:20 deleted_byte_count:2989828 replica_placement:100 version:3 modified_at_second:1615611350
+ volume id:233 size:1080526464 collection:"collection1" file_count:3136 delete_count:61 deleted_byte_count:17991717 replica_placement:100 version:3 modified_at_second:1615611352
+ volume id:234 size:1073835280 collection:"collection1" file_count:2965 delete_count:41 deleted_byte_count:4960354 replica_placement:100 version:3 modified_at_second:1615611351
+ volume id:235 size:1075586104 collection:"collection1" file_count:2767 delete_count:33 deleted_byte_count:3216540 replica_placement:100 version:3 modified_at_second:1615611354
+ volume id:237 size:375722792 collection:"collection1" file_count:736 delete_count:16 deleted_byte_count:4464870 replica_placement:100 version:3 modified_at_second:1615631727
+ volume id:239 size:426569024 collection:"collection1" file_count:693 delete_count:19 deleted_byte_count:13020783 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630838
+ volume id:241 size:380217424 collection:"collection1" file_count:633 delete_count:6 deleted_byte_count:1715768 replica_placement:100 version:3 modified_at_second:1615632006
+ volume id:244 size:1080295352 collection:"collection2" file_count:10812 delete_count:1 deleted_byte_count:795 replica_placement:100 version:3 modified_at_second:1614852171
+ volume id:245 size:1074597056 collection:"collection2" file_count:10371 delete_count:3 deleted_byte_count:209701 replica_placement:100 version:3 modified_at_second:1614852093
+ volume id:246 size:1075998648 collection:"collection2" file_count:10365 delete_count:1 deleted_byte_count:13112 replica_placement:100 version:3 modified_at_second:1614852105
+ volume id:248 size:1084301184 collection:"collection2" file_count:11217 delete_count:4 deleted_byte_count:746488 replica_placement:100 version:3 modified_at_second:1614856285
+ volume id:249 size:1074819136 collection:"collection2" file_count:10763 delete_count:2 deleted_byte_count:271699 replica_placement:100 version:3 modified_at_second:1614856230
+ volume id:251 size:1075684488 collection:"collection2" file_count:10847 replica_placement:100 version:3 modified_at_second:1614856270
+ volume id:252 size:1075065208 collection:"collection2" file_count:14622 delete_count:2 deleted_byte_count:5228 replica_placement:100 version:3 modified_at_second:1614861196
+ volume id:253 size:1087328816 collection:"collection2" file_count:14920 delete_count:3 deleted_byte_count:522994 replica_placement:100 version:3 modified_at_second:1614861255
+ volume id:255 size:1079581640 collection:"collection2" file_count:14877 delete_count:3 deleted_byte_count:101223 replica_placement:100 version:3 modified_at_second:1614861233
+ volume id:256 size:1074283592 collection:"collection2" file_count:14157 delete_count:1 deleted_byte_count:18156 replica_placement:100 version:3 modified_at_second:1614861100
+ volume id:258 size:1075527216 collection:"collection2" file_count:18421 delete_count:4 deleted_byte_count:267833 replica_placement:100 version:3 modified_at_second:1614866420
+ volume id:259 size:1075507776 collection:"collection2" file_count:18079 delete_count:2 deleted_byte_count:71992 replica_placement:100 version:3 modified_at_second:1614866381
+ volume id:264 size:1081624192 collection:"collection2" file_count:21151 replica_placement:100 version:3 modified_at_second:1614871629
+ volume id:265 size:1076401104 collection:"collection2" file_count:19932 delete_count:2 deleted_byte_count:160823 replica_placement:100 version:3 modified_at_second:1615629130
+ volume id:266 size:1075617464 collection:"collection2" file_count:20075 delete_count:1 deleted_byte_count:1039 replica_placement:100 version:3 modified_at_second:1614871526
+ volume id:267 size:1075699544 collection:"collection2" file_count:21039 delete_count:3 deleted_byte_count:59956 replica_placement:100 version:3 modified_at_second:1614877294
+ volume id:268 size:1074490592 collection:"collection2" file_count:21698 delete_count:1 deleted_byte_count:33968 replica_placement:100 version:3 modified_at_second:1614877434
+ volume id:269 size:1077552872 collection:"collection2" file_count:21875 delete_count:4 deleted_byte_count:347272 replica_placement:100 version:3 modified_at_second:1614877481
+ volume id:270 size:1076876568 collection:"collection2" file_count:22057 delete_count:1 deleted_byte_count:43916 replica_placement:100 version:3 modified_at_second:1614877469
+ volume id:275 size:1078349024 collection:"collection2" file_count:20808 delete_count:1 deleted_byte_count:1118 replica_placement:100 version:3 modified_at_second:1614884147
+ volume id:277 size:1074956288 collection:"collection2" file_count:19260 delete_count:2 deleted_byte_count:172356 replica_placement:100 version:3 modified_at_second:1614889988
+ volume id:278 size:1078798640 collection:"collection2" file_count:20597 delete_count:5 deleted_byte_count:400060 replica_placement:100 version:3 modified_at_second:1614890292
+ volume id:279 size:1077325040 collection:"collection2" file_count:19671 delete_count:6 deleted_byte_count:379116 replica_placement:100 version:3 modified_at_second:1614890229
+ volume id:280 size:1077432216 collection:"collection2" file_count:20286 delete_count:1 deleted_byte_count:879 replica_placement:100 version:3 modified_at_second:1614890262
+ volume id:281 size:1077581096 collection:"collection2" file_count:20206 delete_count:3 deleted_byte_count:143964 replica_placement:100 version:3 modified_at_second:1614890237
+ volume id:284 size:1074533384 collection:"collection2" file_count:22196 delete_count:4 deleted_byte_count:154683 replica_placement:100 version:3 modified_at_second:1614897231
+ volume id:285 size:1082128688 collection:"collection2" file_count:21804 delete_count:1 deleted_byte_count:1064 replica_placement:100 version:3 modified_at_second:1614897165
+ volume id:289 size:1075284256 collection:"collection2" file_count:29342 delete_count:5 deleted_byte_count:100454 replica_placement:100 version:3 modified_at_second:1614904977
+ volume id:290 size:1074723792 collection:"collection2" file_count:28340 delete_count:4 deleted_byte_count:199064 replica_placement:100 version:3 modified_at_second:1614904924
+ volume id:291 size:1076256768 collection:"collection2" file_count:28301 delete_count:5 deleted_byte_count:116027 replica_placement:100 version:3 modified_at_second:1614904924
+ volume id:293 size:1075409792 collection:"collection2" file_count:26063 delete_count:4 deleted_byte_count:183834 replica_placement:100 version:3 modified_at_second:1614912235
+ volume id:294 size:1075444048 collection:"collection2" file_count:26076 delete_count:4 deleted_byte_count:194914 replica_placement:100 version:3 modified_at_second:1614912220
+ volume id:296 size:1077824032 collection:"collection2" file_count:26741 delete_count:4 deleted_byte_count:199906 replica_placement:100 version:3 modified_at_second:1614912301
+ volume id:297 size:1080229136 collection:"collection2" file_count:23409 delete_count:5 deleted_byte_count:46268 replica_placement:100 version:3 modified_at_second:1614918481
+ volume id:298 size:1075410136 collection:"collection2" file_count:23222 delete_count:2 deleted_byte_count:46110 replica_placement:100 version:3 modified_at_second:1614918474
+ volume id:299 size:1075147936 collection:"collection2" file_count:22927 delete_count:4 deleted_byte_count:345569 replica_placement:100 version:3 modified_at_second:1614918455
+ volume id:300 size:1076212392 collection:"collection2" file_count:22892 delete_count:2 deleted_byte_count:61320 replica_placement:100 version:3 modified_at_second:1614918464
+ volume id:301 size:1074655600 collection:"collection2" file_count:22543 delete_count:6 deleted_byte_count:136968 replica_placement:100 version:3 modified_at_second:1614918378
+ volume id:303 size:1075944480 collection:"collection2" file_count:22541 delete_count:2 deleted_byte_count:13617 replica_placement:100 version:3 modified_at_second:1614925431
+ volume id:306 size:1074764016 collection:"collection2" file_count:22939 replica_placement:100 version:3 modified_at_second:1614925462
+ volume id:307 size:1076568000 collection:"collection2" file_count:23377 delete_count:2 deleted_byte_count:25453 replica_placement:100 version:3 modified_at_second:1614931448
+ volume id:308 size:1074022392 collection:"collection2" file_count:23086 delete_count:2 deleted_byte_count:2127 replica_placement:100 version:3 modified_at_second:1614931401
+ volume id:309 size:1075178664 collection:"collection2" file_count:22692 delete_count:3 deleted_byte_count:171124 replica_placement:100 version:3 modified_at_second:1614931409
+ volume id:310 size:1074761528 collection:"collection2" file_count:21441 delete_count:3 deleted_byte_count:13934 replica_placement:100 version:3 modified_at_second:1614931077
+ volume id:314 size:1074670840 collection:"collection2" file_count:20964 delete_count:4 deleted_byte_count:304291 replica_placement:100 version:3 modified_at_second:1614937441
+ volume id:315 size:1084153544 collection:"collection2" file_count:23638 delete_count:2 deleted_byte_count:53956 replica_placement:100 version:3 modified_at_second:1614937885
+ volume id:317 size:1076215096 collection:"collection2" file_count:23572 delete_count:2 deleted_byte_count:1441356 replica_placement:100 version:3 modified_at_second:1614943965
+ volume id:318 size:1075965168 collection:"collection2" file_count:22459 delete_count:2 deleted_byte_count:37778 replica_placement:100 version:3 modified_at_second:1614943862
+ volume id:319 size:1073952880 collection:"collection2" file_count:22286 delete_count:2 deleted_byte_count:43421 replica_placement:100 version:3 modified_at_second:1614943810
+ volume id:320 size:1082437792 collection:"collection2" file_count:21544 delete_count:3 deleted_byte_count:16712 replica_placement:100 version:3 modified_at_second:1614943599
+ volume id:321 size:1081477904 collection:"collection2" file_count:23531 delete_count:5 deleted_byte_count:262564 replica_placement:100 version:3 modified_at_second:1614943982
+ volume id:324 size:1075606680 collection:"collection2" file_count:20799 delete_count:1 deleted_byte_count:251210 replica_placement:100 version:3 modified_at_second:1614950310
+ volume id:325 size:1080701144 collection:"collection2" file_count:21735 replica_placement:100 version:3 modified_at_second:1614950525
+ volume id:330 size:1080825832 collection:"collection2" file_count:22464 delete_count:2 deleted_byte_count:15771 replica_placement:100 version:3 modified_at_second:1614956477
+ volume id:332 size:1075569928 collection:"collection2" file_count:22097 delete_count:3 deleted_byte_count:98273 replica_placement:100 version:3 modified_at_second:1614962869
+ volume id:334 size:1075607880 collection:"collection2" file_count:22546 delete_count:6 deleted_byte_count:101538 replica_placement:100 version:3 modified_at_second:1614962978
+ volume id:336 size:1087853056 collection:"collection2" file_count:22801 delete_count:2 deleted_byte_count:26394 replica_placement:100 version:3 modified_at_second:1614963005
+ volume id:337 size:1075646784 collection:"collection2" file_count:21934 delete_count:1 deleted_byte_count:3397 replica_placement:100 version:3 modified_at_second:1614969937
+ volume id:338 size:1076118304 collection:"collection2" file_count:21680 replica_placement:100 version:3 modified_at_second:1614969850
+ volume id:340 size:1079462184 collection:"collection2" file_count:22319 delete_count:4 deleted_byte_count:93620 replica_placement:100 version:3 modified_at_second:1614969978
+ volume id:341 size:1074448400 collection:"collection2" file_count:21590 delete_count:5 deleted_byte_count:160085 replica_placement:100 version:3 modified_at_second:1614969858
+ volume id:342 size:1080186424 collection:"collection2" file_count:22405 delete_count:4 deleted_byte_count:64819 replica_placement:100 version:3 modified_at_second:1614977521
+ volume id:344 size:1075035416 collection:"collection2" file_count:21765 delete_count:1 deleted_byte_count:24623 replica_placement:100 version:3 modified_at_second:1614977465
+ volume id:345 size:1074560760 collection:"collection2" file_count:22117 delete_count:2 deleted_byte_count:373286 replica_placement:100 version:3 modified_at_second:1614977457
+ volume id:346 size:1076464112 collection:"collection2" file_count:22320 delete_count:4 deleted_byte_count:798258 replica_placement:100 version:3 modified_at_second:1615631322
+ volume id:348 size:1080623640 collection:"collection2" file_count:21667 delete_count:1 deleted_byte_count:2443 replica_placement:100 version:3 modified_at_second:1614984606
+ volume id:350 size:1074756688 collection:"collection2" file_count:21990 delete_count:3 deleted_byte_count:233881 replica_placement:100 version:3 modified_at_second:1614984682
+ volume id:351 size:1078795112 collection:"collection2" file_count:23660 delete_count:3 deleted_byte_count:102141 replica_placement:100 version:3 modified_at_second:1614984816
+ volume id:352 size:1077145936 collection:"collection2" file_count:22066 delete_count:1 deleted_byte_count:1018 replica_placement:100 version:3 modified_at_second:1614992130
+ volume id:353 size:1074897496 collection:"collection2" file_count:21266 delete_count:2 deleted_byte_count:3105374 replica_placement:100 version:3 modified_at_second:1614991951
+ volume id:355 size:1075239728 collection:"collection2" file_count:22244 delete_count:1 deleted_byte_count:23282 replica_placement:100 version:3 modified_at_second:1614992157
+ volume id:356 size:1083305048 collection:"collection2" file_count:21552 delete_count:4 deleted_byte_count:14472 replica_placement:100 version:3 modified_at_second:1614992028
+ volume id:358 size:1085152368 collection:"collection2" file_count:23756 delete_count:3 deleted_byte_count:44531 replica_placement:100 version:3 modified_at_second:1614998824
+ volume id:360 size:1075532456 collection:"collection2" file_count:22574 delete_count:3 deleted_byte_count:1774776 replica_placement:100 version:3 modified_at_second:1614998770
+ volume id:361 size:1075362744 collection:"collection2" file_count:22272 delete_count:1 deleted_byte_count:3497 replica_placement:100 version:3 modified_at_second:1614998668
+ volume id:375 size:1076140568 collection:"collection2" file_count:21880 delete_count:2 deleted_byte_count:51103 replica_placement:100 version:3 modified_at_second:1615016787
+ volume id:376 size:1074845944 collection:"collection2" file_count:22908 delete_count:4 deleted_byte_count:432305 replica_placement:100 version:3 modified_at_second:1615016916
+ volume id:377 size:957284144 collection:"collection2" file_count:14923 delete_count:1 deleted_byte_count:1797 replica_placement:100 version:3 modified_at_second:1615632323
+ volume id:378 size:959273936 collection:"collection2" file_count:15027 delete_count:4 deleted_byte_count:231414 replica_placement:100 version:3 modified_at_second:1615632323
+ volume id:381 size:939261032 collection:"collection2" file_count:14615 delete_count:5 deleted_byte_count:1192272 replica_placement:100 version:3 modified_at_second:1615632324
+ Disk hdd total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585
+ DataNode 192.168.1.6:8080 total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585
+ Rack rack3 total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585
+ DataCenter dc3 total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585
+ DataCenter dc4 hdd(volume:267/2000 active:267 free:1733 remote:0)
+ Rack DefaultRack hdd(volume:267/2000 active:267 free:1733 remote:0)
+ DataNode 192.168.1.1:8080 hdd(volume:267/2000 active:267 free:1733 remote:0)
+ Disk hdd(volume:267/2000 active:267 free:1733 remote:0)
+ volume id:1 size:284693256 file_count:1558 delete_count:2 deleted_byte_count:4818 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632062
+ volume id:2 size:289228560 file_count:1640 delete_count:4 deleted_byte_count:464508 replica_placement:100 version:3 compact_revision:6 modified_at_second:1615630622
+ volume id:3 size:308741952 file_count:1637 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632242
+ volume id:4 size:285986968 file_count:1640 delete_count:1 deleted_byte_count:145095 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632302
+ volume id:5 size:293806008 file_count:1669 delete_count:2 deleted_byte_count:274334 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631342
+ volume id:6 size:302411024 file_count:1604 delete_count:2 deleted_byte_count:274587 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631402
+ volume id:7 size:1924728 collection:"collection4" file_count:15 replica_placement:100 version:3 modified_at_second:1609331040
+ volume id:9 size:77337416 collection:"collection3" file_count:58 replica_placement:100 version:3 ttl:772 modified_at_second:1615513762
+ volume id:10 size:1212784656 collection:"collection0" file_count:58 replica_placement:100 version:3 modified_at_second:1609814543
+ volume id:11 size:1109224552 collection:"collection0" file_count:44 replica_placement:100 version:3 modified_at_second:1609815123
+ volume id:12 size:1110923848 collection:"collection0" file_count:45 replica_placement:100 version:3 modified_at_second:1609819726
+ volume id:13 size:1184910656 collection:"collection0" file_count:47 replica_placement:100 version:3 modified_at_second:1609827832
+ volume id:14 size:1107475720 collection:"collection0" file_count:80 delete_count:3 deleted_byte_count:6870 replica_placement:100 version:3 modified_at_second:1612956983
+ volume id:15 size:1115965160 collection:"collection0" file_count:83 delete_count:3 deleted_byte_count:4956 replica_placement:100 version:3 modified_at_second:1612957001
+ volume id:16 size:1113666048 collection:"collection0" file_count:73 delete_count:5 deleted_byte_count:6318 replica_placement:100 version:3 modified_at_second:1612957007
+ volume id:17 size:1095115800 collection:"collection0" file_count:83 delete_count:3 deleted_byte_count:7099 replica_placement:100 version:3 modified_at_second:1612957000
+ volume id:18 size:1096678688 collection:"collection0" file_count:88 delete_count:4 deleted_byte_count:8633 replica_placement:100 version:3 modified_at_second:1612957000
+ volume id:19 size:1096923792 collection:"collection0" file_count:100 delete_count:10 deleted_byte_count:75779917 replica_placement:100 version:3 compact_revision:4 modified_at_second:1612957011
+ volume id:20 size:1074760432 collection:"collection0" file_count:82 delete_count:5 deleted_byte_count:12156 replica_placement:100 version:3 compact_revision:2 modified_at_second:1612957011
+ volume id:22 size:1086828368 collection:"collection0" file_count:75 delete_count:3 deleted_byte_count:5551 replica_placement:100 version:3 modified_at_second:1612957007
+ volume id:23 size:1076380280 collection:"collection0" file_count:68 delete_count:2 deleted_byte_count:2910 replica_placement:100 version:3 modified_at_second:1612957011
+ volume id:24 size:1074139808 collection:"collection0" file_count:90 delete_count:1 deleted_byte_count:1977 replica_placement:100 version:3 modified_at_second:1612957011
+ volume id:25 size:690757544 collection:"collection0" file_count:38 delete_count:1 deleted_byte_count:1944 replica_placement:100 version:3 modified_at_second:1612956995
+ volume id:26 size:532657632 collection:"collection0" file_count:100 delete_count:4 deleted_byte_count:9081 replica_placement:100 version:3 modified_at_second:1614170023
+ volume id:34 size:1077111136 collection:"collection1" file_count:9781 delete_count:110 deleted_byte_count:20894827 replica_placement:100 version:3 modified_at_second:1615619366
+ volume id:35 size:1075241656 collection:"collection1" file_count:10523 delete_count:96 deleted_byte_count:46618989 replica_placement:100 version:3 modified_at_second:1615618790
+ volume id:36 size:1075118360 collection:"collection1" file_count:10342 delete_count:116 deleted_byte_count:25493106 replica_placement:100 version:3 modified_at_second:1615606148
+ volume id:37 size:1075895584 collection:"collection1" file_count:12013 delete_count:98 deleted_byte_count:50747932 replica_placement:100 version:3 modified_at_second:1615594777
+ volume id:39 size:1076606536 collection:"collection1" file_count:12612 delete_count:78 deleted_byte_count:17462730 replica_placement:100 version:3 modified_at_second:1615611959
+ volume id:40 size:1075358552 collection:"collection1" file_count:12597 delete_count:62 deleted_byte_count:11657901 replica_placement:100 version:3 modified_at_second:1615612994
+ volume id:41 size:1076283528 collection:"collection1" file_count:12088 delete_count:84 deleted_byte_count:19319268 replica_placement:100 version:3 modified_at_second:1615596736
+ volume id:42 size:1093948352 collection:"collection1" file_count:7889 delete_count:47 deleted_byte_count:5697275 replica_placement:100 version:3 modified_at_second:1615548908
+ volume id:43 size:1116445864 collection:"collection1" file_count:7358 delete_count:54 deleted_byte_count:9534379 replica_placement:100 version:3 modified_at_second:1615566170
+ volume id:44 size:1077582560 collection:"collection1" file_count:7295 delete_count:50 deleted_byte_count:12618414 replica_placement:100 version:3 modified_at_second:1615566170
+ volume id:45 size:1075254640 collection:"collection1" file_count:10772 delete_count:76 deleted_byte_count:22426345 replica_placement:100 version:3 modified_at_second:1615573499
+ volume id:46 size:1075286056 collection:"collection1" file_count:9947 delete_count:309 deleted_byte_count:105601163 replica_placement:100 version:3 modified_at_second:1615569826
+ volume id:48 size:1076778720 collection:"collection1" file_count:9850 delete_count:77 deleted_byte_count:16641907 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630690
+ volume id:50 size:1076688224 collection:"collection1" file_count:7921 delete_count:26 deleted_byte_count:5162032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615610879
+ volume id:52 size:1083529704 collection:"collection1" file_count:10128 delete_count:32 deleted_byte_count:10608391 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615599195
+ volume id:53 size:1063089216 collection:"collection1" file_count:9832 delete_count:31 deleted_byte_count:9273066 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156
+ volume id:55 size:1012890016 collection:"collection1" file_count:8651 delete_count:27 deleted_byte_count:9418841 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631452
+ volume id:57 size:839849792 collection:"collection1" file_count:7514 delete_count:24 deleted_byte_count:6228543 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631774
+ volume id:58 size:908064200 collection:"collection1" file_count:8128 delete_count:21 deleted_byte_count:6113731 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632342
+ volume id:59 size:988302272 collection:"collection1" file_count:8098 delete_count:20 deleted_byte_count:3947615 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632238
+ volume id:60 size:1010702480 collection:"collection1" file_count:8969 delete_count:79 deleted_byte_count:24782814 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632439
+ volume id:61 size:975604488 collection:"collection1" file_count:8683 delete_count:20 deleted_byte_count:10276072 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631176
+ volume id:62 size:873845936 collection:"collection1" file_count:7897 delete_count:23 deleted_byte_count:10920170 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631133
+ volume id:64 size:965638488 collection:"collection1" file_count:8218 delete_count:27 deleted_byte_count:6922489 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631031
+ volume id:65 size:823283552 collection:"collection1" file_count:7834 delete_count:29 deleted_byte_count:5950610 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632306
+ volume id:66 size:821343440 collection:"collection1" file_count:7383 delete_count:29 deleted_byte_count:12010343 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631968
+ volume id:67 size:878713872 collection:"collection1" file_count:7299 delete_count:117 deleted_byte_count:24857326 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156
+ volume id:68 size:898630584 collection:"collection1" file_count:6934 delete_count:95 deleted_byte_count:27460707 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284
+ volume id:70 size:886695472 collection:"collection1" file_count:7769 delete_count:164 deleted_byte_count:45162513 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632398
+ volume id:71 size:907608392 collection:"collection1" file_count:7658 delete_count:122 deleted_byte_count:27622941 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632307
+ volume id:72 size:903990720 collection:"collection1" file_count:6996 delete_count:240 deleted_byte_count:74147727 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630982
+ volume id:73 size:929047664 collection:"collection1" file_count:7038 delete_count:227 deleted_byte_count:65336664 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630707
+ volume id:74 size:957046128 collection:"collection1" file_count:6981 delete_count:259 deleted_byte_count:73080838 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631460
+ volume id:75 size:908044992 collection:"collection1" file_count:6911 delete_count:268 deleted_byte_count:73934373 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632430
+ volume id:76 size:985296744 collection:"collection1" file_count:6566 delete_count:61 deleted_byte_count:44464430 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284
+ volume id:77 size:929398296 collection:"collection1" file_count:7427 delete_count:238 deleted_byte_count:59581579 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632013
+ volume id:78 size:1075671512 collection:"collection1" file_count:7540 delete_count:258 deleted_byte_count:71726846 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615582829
+ volume id:79 size:948225472 collection:"collection1" file_count:6997 delete_count:227 deleted_byte_count:60625763 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631326
+ volume id:82 size:1041661800 collection:"collection1" file_count:7043 delete_count:207 deleted_byte_count:52275724 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632430
+ volume id:83 size:936195856 collection:"collection1" file_count:7593 delete_count:13 deleted_byte_count:4633917 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632029
+ volume id:85 size:1023867520 collection:"collection1" file_count:7787 delete_count:240 deleted_byte_count:82091742 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631723
+ volume id:86 size:1009437488 collection:"collection1" file_count:8474 delete_count:236 deleted_byte_count:64543674 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630812
+ volume id:87 size:922276640 collection:"collection1" file_count:12902 delete_count:13 deleted_byte_count:3412959 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632438
+ volume id:89 size:1044401976 collection:"collection1" file_count:14943 delete_count:243 deleted_byte_count:58543159 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632208
+ volume id:90 size:891145784 collection:"collection1" file_count:14608 delete_count:10 deleted_byte_count:2564369 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615629390
+ volume id:91 size:936572832 collection:"collection1" file_count:14686 delete_count:11 deleted_byte_count:4717727 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631851
+ volume id:92 size:992440712 collection:"collection1" file_count:7061 delete_count:195 deleted_byte_count:60649573 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630566
+ volume id:93 size:1079603768 collection:"collection1" file_count:7878 delete_count:270 deleted_byte_count:74150048 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615556015
+ volume id:94 size:1030685824 collection:"collection1" file_count:7660 delete_count:207 deleted_byte_count:70150733 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631616
+ volume id:95 size:990879168 collection:"collection1" file_count:6620 delete_count:206 deleted_byte_count:60363604 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631866
+ volume id:96 size:989296136 collection:"collection1" file_count:7544 delete_count:229 deleted_byte_count:59931853 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630778
+ volume id:97 size:1053112992 collection:"collection1" file_count:6789 delete_count:50 deleted_byte_count:38894001 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631194
+ volume id:99 size:1071718504 collection:"collection1" file_count:7470 delete_count:8 deleted_byte_count:9624950 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631175
+ volume id:100 size:1083617440 collection:"collection1" file_count:7018 delete_count:187 deleted_byte_count:61304236 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615505917
+ volume id:101 size:1077109520 collection:"collection1" file_count:7706 delete_count:226 deleted_byte_count:77864841 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630994
+ volume id:102 size:1074359920 collection:"collection1" file_count:7338 delete_count:7 deleted_byte_count:6499151 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615626683
+ volume id:103 size:1075863904 collection:"collection1" file_count:7184 delete_count:186 deleted_byte_count:58872238 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615628417
+ volume id:104 size:1076383768 collection:"collection1" file_count:7663 delete_count:184 deleted_byte_count:100578087 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615602661
+ volume id:105 size:1073996824 collection:"collection1" file_count:6873 delete_count:19 deleted_byte_count:14271533 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615499756
+ volume id:108 size:1074648024 collection:"collection1" file_count:7472 delete_count:194 deleted_byte_count:70864699 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593232
+ volume id:109 size:1075254560 collection:"collection1" file_count:7556 delete_count:263 deleted_byte_count:55155265 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615502487
+ volume id:110 size:1076575744 collection:"collection1" file_count:6996 delete_count:163 deleted_byte_count:52954032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615590786
+ volume id:111 size:1073826232 collection:"collection1" file_count:7355 delete_count:155 deleted_byte_count:50083578 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593233
+ volume id:114 size:1074762784 collection:"collection1" file_count:8802 delete_count:156 deleted_byte_count:38470055 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615591826
+ volume id:115 size:1076192240 collection:"collection1" file_count:7690 delete_count:154 deleted_byte_count:32267193 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615285295
+ volume id:116 size:1074489504 collection:"collection1" file_count:9981 delete_count:174 deleted_byte_count:53998777 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615611567
+ volume id:117 size:1073917192 collection:"collection1" file_count:9520 delete_count:114 deleted_byte_count:21835126 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573714
+ volume id:118 size:1074064400 collection:"collection1" file_count:8738 delete_count:15 deleted_byte_count:3460697 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615516265
+ volume id:119 size:1075940104 collection:"collection1" file_count:9003 delete_count:12 deleted_byte_count:9128155 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573880
+ volume id:120 size:1076115928 collection:"collection1" file_count:9639 delete_count:118 deleted_byte_count:33357871 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615482567
+ volume id:121 size:1078803248 collection:"collection1" file_count:10113 delete_count:441 deleted_byte_count:94128627 replica_placement:100 version:3 modified_at_second:1615506629
+ volume id:122 size:1076235312 collection:"collection1" file_count:9106 delete_count:252 deleted_byte_count:93041272 replica_placement:100 version:3 modified_at_second:1615585913
+ volume id:123 size:1080491112 collection:"collection1" file_count:10623 delete_count:302 deleted_byte_count:83956998 replica_placement:100 version:3 modified_at_second:1615585916
+ volume id:124 size:1074519360 collection:"collection1" file_count:9457 delete_count:286 deleted_byte_count:74752459 replica_placement:100 version:3 modified_at_second:1615585916
+ volume id:125 size:1088687040 collection:"collection1" file_count:9518 delete_count:281 deleted_byte_count:76037905 replica_placement:100 version:3 modified_at_second:1615585913
+ volume id:126 size:1073867464 collection:"collection1" file_count:9320 delete_count:278 deleted_byte_count:94547424 replica_placement:100 version:3 modified_at_second:1615585912
+ volume id:127 size:1074907336 collection:"collection1" file_count:9900 delete_count:133 deleted_byte_count:48570820 replica_placement:100 version:3 modified_at_second:1615612991
+ volume id:129 size:1074704272 collection:"collection1" file_count:10012 delete_count:150 deleted_byte_count:64491721 replica_placement:100 version:3 modified_at_second:1615627566
+ volume id:130 size:1075000632 collection:"collection1" file_count:10633 delete_count:161 deleted_byte_count:34768201 replica_placement:100 version:3 modified_at_second:1615582330
+ volume id:131 size:1075279584 collection:"collection1" file_count:10075 delete_count:135 deleted_byte_count:29795712 replica_placement:100 version:3 modified_at_second:1615523898
+ volume id:132 size:1088539496 collection:"collection1" file_count:11051 delete_count:71 deleted_byte_count:17178322 replica_placement:100 version:3 modified_at_second:1615619584
+ volume id:133 size:1075952760 collection:"collection1" file_count:9538 delete_count:74 deleted_byte_count:19558008 replica_placement:100 version:3 modified_at_second:1615584780
+ volume id:134 size:1074367304 collection:"collection1" file_count:10662 delete_count:69 deleted_byte_count:25530139 replica_placement:100 version:3 modified_at_second:1615555876
+ volume id:135 size:1073906720 collection:"collection1" file_count:10446 delete_count:71 deleted_byte_count:28599975 replica_placement:100 version:3 modified_at_second:1615569816
+ volume id:137 size:1074309264 collection:"collection1" file_count:9633 delete_count:50 deleted_byte_count:27487972 replica_placement:100 version:3 modified_at_second:1615572231
+ volume id:139 size:1074163936 collection:"collection1" file_count:9314 delete_count:43 deleted_byte_count:10631353 replica_placement:100 version:3 modified_at_second:1615571946
+ volume id:141 size:1074619488 collection:"collection1" file_count:9840 delete_count:45 deleted_byte_count:40890181 replica_placement:100 version:3 modified_at_second:1615630994
+ volume id:142 size:1075732992 collection:"collection1" file_count:9009 delete_count:48 deleted_byte_count:9912854 replica_placement:100 version:3 modified_at_second:1615598914
+ volume id:143 size:1075011280 collection:"collection1" file_count:9608 delete_count:51 deleted_byte_count:37282460 replica_placement:100 version:3 modified_at_second:1615488586
+ volume id:145 size:1074394928 collection:"collection1" file_count:9255 delete_count:34 deleted_byte_count:38011392 replica_placement:100 version:3 modified_at_second:1615591825
+ volume id:146 size:1076337520 collection:"collection1" file_count:10492 delete_count:50 deleted_byte_count:17071505 replica_placement:100 version:3 modified_at_second:1615632005
+ volume id:147 size:1077130544 collection:"collection1" file_count:10451 delete_count:27 deleted_byte_count:8290907 replica_placement:100 version:3 modified_at_second:1615604117
+ volume id:148 size:1076066568 collection:"collection1" file_count:9547 delete_count:33 deleted_byte_count:7034089 replica_placement:100 version:3 modified_at_second:1615586393
+ volume id:149 size:1074989016 collection:"collection1" file_count:8352 delete_count:35 deleted_byte_count:7179742 replica_placement:100 version:3 modified_at_second:1615494496
+ volume id:150 size:1076290408 collection:"collection1" file_count:9328 delete_count:33 deleted_byte_count:43417791 replica_placement:100 version:3 modified_at_second:1615611569
+ volume id:151 size:1098659752 collection:"collection1" file_count:10805 delete_count:27 deleted_byte_count:7209106 replica_placement:100 version:3 modified_at_second:1615586390
+ volume id:152 size:1075941376 collection:"collection1" file_count:9951 delete_count:36 deleted_byte_count:25348335 replica_placement:100 version:3 modified_at_second:1615606614
+ volume id:153 size:1078539784 collection:"collection1" file_count:10924 delete_count:34 deleted_byte_count:12603081 replica_placement:100 version:3 modified_at_second:1615606614
+ volume id:154 size:1081244752 collection:"collection1" file_count:11002 delete_count:31 deleted_byte_count:8467560 replica_placement:100 version:3 modified_at_second:1615478471
+ volume id:156 size:1074975832 collection:"collection1" file_count:9535 delete_count:40 deleted_byte_count:11426621 replica_placement:100 version:3 modified_at_second:1615628342
+ volume id:157 size:1076758536 collection:"collection1" file_count:10012 delete_count:19 deleted_byte_count:11688737 replica_placement:100 version:3 modified_at_second:1615597782
+ volume id:158 size:1087251976 collection:"collection1" file_count:9972 delete_count:20 deleted_byte_count:10328429 replica_placement:100 version:3 modified_at_second:1615588879
+ volume id:159 size:1074132336 collection:"collection1" file_count:9382 delete_count:27 deleted_byte_count:11474574 replica_placement:100 version:3 modified_at_second:1615593593
+ volume id:160 size:1075680976 collection:"collection1" file_count:9772 delete_count:22 deleted_byte_count:4981968 replica_placement:100 version:3 modified_at_second:1615597782
+ volume id:161 size:1077397136 collection:"collection1" file_count:9988 delete_count:28 deleted_byte_count:12509164 replica_placement:100 version:3 modified_at_second:1615631452
+ volume id:162 size:1074286880 collection:"collection1" file_count:11220 delete_count:17 deleted_byte_count:1815547 replica_placement:100 version:3 modified_at_second:1615478127
+ volume id:163 size:1074457224 collection:"collection1" file_count:12524 delete_count:27 deleted_byte_count:6359619 replica_placement:100 version:3 modified_at_second:1615579313
+ volume id:164 size:1074261256 collection:"collection1" file_count:11922 delete_count:25 deleted_byte_count:2923288 replica_placement:100 version:3 modified_at_second:1615620085
+ volume id:165 size:1073891080 collection:"collection1" file_count:9152 delete_count:12 deleted_byte_count:19164659 replica_placement:100 version:3 modified_at_second:1615471910
+ volume id:166 size:1075637536 collection:"collection1" file_count:14211 delete_count:24 deleted_byte_count:20415490 replica_placement:100 version:3 modified_at_second:1615491021
+ volume id:167 size:1073958280 collection:"collection1" file_count:25231 delete_count:48 deleted_byte_count:26022344 replica_placement:100 version:3 modified_at_second:1615620014
+ volume id:168 size:1074718864 collection:"collection1" file_count:25702 delete_count:40 deleted_byte_count:4024775 replica_placement:100 version:3 modified_at_second:1615585664
+ volume id:169 size:1073863032 collection:"collection1" file_count:25248 delete_count:43 deleted_byte_count:3013817 replica_placement:100 version:3 modified_at_second:1615569832
+ volume id:170 size:1075747088 collection:"collection1" file_count:24596 delete_count:41 deleted_byte_count:3494711 replica_placement:100 version:3 modified_at_second:1615579207
+ volume id:171 size:1081881400 collection:"collection1" file_count:24215 delete_count:36 deleted_byte_count:3191335 replica_placement:100 version:3 modified_at_second:1615596486
+ volume id:172 size:1074787304 collection:"collection1" file_count:31236 delete_count:50 deleted_byte_count:3316482 replica_placement:100 version:3 modified_at_second:1615612385
+ volume id:174 size:1073824160 collection:"collection1" file_count:30689 delete_count:36 deleted_byte_count:2160116 replica_placement:100 version:3 modified_at_second:1615598914
+ volume id:175 size:1077742472 collection:"collection1" file_count:32353 delete_count:33 deleted_byte_count:1861403 replica_placement:100 version:3 modified_at_second:1615559516
+ volume id:176 size:1073854800 collection:"collection1" file_count:30582 delete_count:34 deleted_byte_count:7701976 replica_placement:100 version:3 modified_at_second:1615626169
+ volume id:178 size:1087560112 collection:"collection1" file_count:23482 delete_count:22 deleted_byte_count:18810492 replica_placement:100 version:3 modified_at_second:1615541369
+ volume id:179 size:1074313920 collection:"collection1" file_count:21829 delete_count:24 deleted_byte_count:45574435 replica_placement:100 version:3 modified_at_second:1615580308
+ volume id:180 size:1078438448 collection:"collection1" file_count:23614 delete_count:12 deleted_byte_count:4496474 replica_placement:100 version:3 modified_at_second:1614773243
+ volume id:181 size:1074571672 collection:"collection1" file_count:22898 delete_count:19 deleted_byte_count:6628413 replica_placement:100 version:3 modified_at_second:1614745117
+ volume id:183 size:1076361616 collection:"collection1" file_count:31293 delete_count:16 deleted_byte_count:468841 replica_placement:100 version:3 modified_at_second:1615572985
+ volume id:184 size:1074594216 collection:"collection1" file_count:31368 delete_count:22 deleted_byte_count:857453 replica_placement:100 version:3 modified_at_second:1615586578
+ volume id:185 size:1074099592 collection:"collection1" file_count:30612 delete_count:17 deleted_byte_count:2610847 replica_placement:100 version:3 modified_at_second:1615506835
+ volume id:186 size:1074220664 collection:"collection1" file_count:31450 delete_count:15 deleted_byte_count:391855 replica_placement:100 version:3 modified_at_second:1615614934
+ volume id:187 size:1074396112 collection:"collection1" file_count:31853 delete_count:17 deleted_byte_count:454283 replica_placement:100 version:3 modified_at_second:1615590491
+ volume id:188 size:1074732632 collection:"collection1" file_count:31867 delete_count:19 deleted_byte_count:393743 replica_placement:100 version:3 modified_at_second:1615487645
+ volume id:189 size:1074847824 collection:"collection1" file_count:31450 delete_count:16 deleted_byte_count:1040552 replica_placement:100 version:3 modified_at_second:1615335661
+ volume id:190 size:1074008968 collection:"collection1" file_count:31987 delete_count:11 deleted_byte_count:685125 replica_placement:100 version:3 modified_at_second:1615447162
+ volume id:191 size:1075492960 collection:"collection1" file_count:31301 delete_count:19 deleted_byte_count:708401 replica_placement:100 version:3 modified_at_second:1615357457
+ volume id:192 size:1075857384 collection:"collection1" file_count:31490 delete_count:25 deleted_byte_count:720617 replica_placement:100 version:3 modified_at_second:1615621632
+ volume id:193 size:1076616760 collection:"collection1" file_count:31907 delete_count:16 deleted_byte_count:464900 replica_placement:100 version:3 modified_at_second:1615507877
+ volume id:194 size:1073985792 collection:"collection1" file_count:31434 delete_count:18 deleted_byte_count:391432 replica_placement:100 version:3 modified_at_second:1615559502
+ volume id:195 size:1074158304 collection:"collection1" file_count:31453 delete_count:15 deleted_byte_count:718266 replica_placement:100 version:3 modified_at_second:1615559331
+ volume id:196 size:1074594640 collection:"collection1" file_count:31665 delete_count:18 deleted_byte_count:3468922 replica_placement:100 version:3 modified_at_second:1615501690
+ volume id:198 size:1075104624 collection:"collection1" file_count:16577 delete_count:18 deleted_byte_count:6583181 replica_placement:100 version:3 modified_at_second:1615623371
+ volume id:199 size:1078117688 collection:"collection1" file_count:16497 delete_count:14 deleted_byte_count:1514286 replica_placement:100 version:3 modified_at_second:1615585987
+ volume id:200 size:1075630464 collection:"collection1" file_count:16380 delete_count:18 deleted_byte_count:1103109 replica_placement:100 version:3 modified_at_second:1615485252
+ volume id:201 size:1091460440 collection:"collection1" file_count:16684 delete_count:26 deleted_byte_count:5590335 replica_placement:100 version:3 modified_at_second:1615585987
+ volume id:204 size:1079766904 collection:"collection1" file_count:3233 delete_count:255 deleted_byte_count:104707641 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615565702
+ volume id:207 size:1081939960 collection:"collection1" file_count:3010 delete_count:4 deleted_byte_count:692350 replica_placement:100 version:3 modified_at_second:1615269061
+ volume id:208 size:1077863624 collection:"collection1" file_count:3147 delete_count:6 deleted_byte_count:858726 replica_placement:100 version:3 modified_at_second:1615495515
+ volume id:209 size:1074083592 collection:"collection1" file_count:3238 delete_count:4 deleted_byte_count:1494244 replica_placement:100 version:3 modified_at_second:1615419954
+ volume id:210 size:1094311304 collection:"collection1" file_count:3468 delete_count:4 deleted_byte_count:466433 replica_placement:100 version:3 modified_at_second:1615495515
+ volume id:211 size:1080610712 collection:"collection1" file_count:3247 delete_count:7 deleted_byte_count:1891456 replica_placement:100 version:3 modified_at_second:1615269124
+ volume id:216 size:1080073496 collection:"collection1" file_count:3316 delete_count:4 deleted_byte_count:179819 replica_placement:100 version:3 modified_at_second:1615586387
+ volume id:218 size:1081263944 collection:"collection1" file_count:3433 delete_count:14 deleted_byte_count:3454237 replica_placement:100 version:3 modified_at_second:1615603637
+ volume id:220 size:1081928312 collection:"collection1" file_count:3166 delete_count:13 deleted_byte_count:4127709 replica_placement:100 version:3 modified_at_second:1615579317
+ volume id:221 size:1106545536 collection:"collection1" file_count:3153 delete_count:11 deleted_byte_count:1496835 replica_placement:100 version:3 modified_at_second:1615269138
+ volume id:224 size:1093691520 collection:"collection1" file_count:3463 delete_count:10 deleted_byte_count:1128328 replica_placement:100 version:3 modified_at_second:1615601870
+ volume id:225 size:1080698928 collection:"collection1" file_count:3115 delete_count:7 deleted_byte_count:18170416 replica_placement:100 version:3 modified_at_second:1615434685
+ volume id:226 size:1103504792 collection:"collection1" file_count:2965 delete_count:10 deleted_byte_count:2639254 replica_placement:100 version:3 modified_at_second:1615601870
+ volume id:227 size:1106699864 collection:"collection1" file_count:2827 delete_count:19 deleted_byte_count:5393310 replica_placement:100 version:3 modified_at_second:1615609989
+ volume id:228 size:1109784072 collection:"collection1" file_count:2504 delete_count:24 deleted_byte_count:5458950 replica_placement:100 version:3 modified_at_second:1615610489
+ volume id:229 size:1109855256 collection:"collection1" file_count:2857 delete_count:22 deleted_byte_count:2839883 replica_placement:100 version:3 modified_at_second:1615609989
+ volume id:231 size:1112917664 collection:"collection1" file_count:3151 delete_count:19 deleted_byte_count:2852517 replica_placement:100 version:3 modified_at_second:1615611350
+ volume id:232 size:1073901520 collection:"collection1" file_count:3004 delete_count:54 deleted_byte_count:10273081 replica_placement:100 version:3 modified_at_second:1615611352
+ volume id:233 size:1080526464 collection:"collection1" file_count:3136 delete_count:61 deleted_byte_count:17991717 replica_placement:100 version:3 modified_at_second:1615611354
+ volume id:236 size:1089476200 collection:"collection1" file_count:3231 delete_count:53 deleted_byte_count:11625921 replica_placement:100 version:3 modified_at_second:1615611351
+ volume id:238 size:354320000 collection:"collection1" file_count:701 delete_count:17 deleted_byte_count:5940420 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632030
+ volume id:240 size:424791528 collection:"collection1" file_count:734 delete_count:12 deleted_byte_count:7353071 replica_placement:100 version:3 modified_at_second:1615631669
+ volume id:242 size:1075383304 collection:"collection2" file_count:10470 replica_placement:100 version:3 modified_at_second:1614852115
+ volume id:243 size:1088174560 collection:"collection2" file_count:11109 delete_count:1 deleted_byte_count:938 replica_placement:100 version:3 modified_at_second:1614852202
+ volume id:245 size:1074597056 collection:"collection2" file_count:10371 delete_count:3 deleted_byte_count:209701 replica_placement:100 version:3 modified_at_second:1614852093
+ volume id:247 size:1075859784 collection:"collection2" file_count:10443 delete_count:2 deleted_byte_count:564486 replica_placement:100 version:3 modified_at_second:1614856152
+ volume id:249 size:1074819168 collection:"collection2" file_count:10763 delete_count:2 deleted_byte_count:271699 replica_placement:100 version:3 modified_at_second:1614856231
+ volume id:250 size:1080572256 collection:"collection2" file_count:10220 replica_placement:100 version:3 modified_at_second:1614856129
+ volume id:251 size:1075684408 collection:"collection2" file_count:10847 replica_placement:100 version:3 modified_at_second:1614856270
+ volume id:254 size:1074830800 collection:"collection2" file_count:14140 delete_count:2 deleted_byte_count:105892 replica_placement:100 version:3 modified_at_second:1614861115
+ volume id:257 size:1082621664 collection:"collection2" file_count:18172 delete_count:2 deleted_byte_count:25125 replica_placement:100 version:3 modified_at_second:1614866395
+ volume id:260 size:1075105664 collection:"collection2" file_count:17316 delete_count:4 deleted_byte_count:2015310 replica_placement:100 version:3 modified_at_second:1614866226
+ volume id:261 size:1076628592 collection:"collection2" file_count:18355 delete_count:1 deleted_byte_count:1155 replica_placement:100 version:3 modified_at_second:1614866420
+ volume id:262 size:1078492464 collection:"collection2" file_count:20390 delete_count:3 deleted_byte_count:287601 replica_placement:100 version:3 modified_at_second:1614871601
+ volume id:263 size:1077167440 collection:"collection2" file_count:20227 delete_count:4 deleted_byte_count:97887 replica_placement:100 version:3 modified_at_second:1614871567
+ volume id:268 size:1074490592 collection:"collection2" file_count:21698 delete_count:1 deleted_byte_count:33968 replica_placement:100 version:3 modified_at_second:1614877435
+ volume id:269 size:1077552720 collection:"collection2" file_count:21875 delete_count:4 deleted_byte_count:347272 replica_placement:100 version:3 modified_at_second:1614877481
+ volume id:271 size:1076992648 collection:"collection2" file_count:22640 delete_count:1 deleted_byte_count:30645 replica_placement:100 version:3 modified_at_second:1614877504
+ volume id:273 size:1074873432 collection:"collection2" file_count:20511 delete_count:3 deleted_byte_count:46076 replica_placement:100 version:3 modified_at_second:1614884046
+ volume id:274 size:1075994128 collection:"collection2" file_count:20997 replica_placement:100 version:3 modified_at_second:1614884113
+ volume id:276 size:1076899888 collection:"collection2" file_count:20190 delete_count:1 deleted_byte_count:8798 replica_placement:100 version:3 modified_at_second:1614884003
+ volume id:277 size:1074956160 collection:"collection2" file_count:19260 delete_count:2 deleted_byte_count:172356 replica_placement:100 version:3 modified_at_second:1614889988
+ volume id:279 size:1077325096 collection:"collection2" file_count:19671 delete_count:6 deleted_byte_count:379116 replica_placement:100 version:3 modified_at_second:1614890230
+ volume id:282 size:1075232240 collection:"collection2" file_count:22659 delete_count:4 deleted_byte_count:67915 replica_placement:100 version:3 modified_at_second:1614897304
+ volume id:284 size:1074533384 collection:"collection2" file_count:22196 delete_count:4 deleted_byte_count:154683 replica_placement:100 version:3 modified_at_second:1614897231
+ volume id:285 size:1082128576 collection:"collection2" file_count:21804 delete_count:1 deleted_byte_count:1064 replica_placement:100 version:3 modified_at_second:1614897165
+ volume id:286 size:1077464824 collection:"collection2" file_count:23905 delete_count:6 deleted_byte_count:630577 replica_placement:100 version:3 modified_at_second:1614897401
+ volume id:287 size:1074590528 collection:"collection2" file_count:28163 delete_count:5 deleted_byte_count:35727 replica_placement:100 version:3 modified_at_second:1614904874
+ volume id:288 size:1075406800 collection:"collection2" file_count:27243 delete_count:2 deleted_byte_count:51519 replica_placement:100 version:3 modified_at_second:1614904738
+ volume id:292 size:1092010744 collection:"collection2" file_count:26781 delete_count:5 deleted_byte_count:508910 replica_placement:100 version:3 modified_at_second:1614912327
+ volume id:293 size:1075409776 collection:"collection2" file_count:26063 delete_count:4 deleted_byte_count:183834 replica_placement:100 version:3 modified_at_second:1614912235
+ volume id:294 size:1075443992 collection:"collection2" file_count:26076 delete_count:4 deleted_byte_count:194914 replica_placement:100 version:3 modified_at_second:1614912220
+ volume id:295 size:1074702376 collection:"collection2" file_count:24488 delete_count:3 deleted_byte_count:48555 replica_placement:100 version:3 modified_at_second:1614911929
+ volume id:300 size:1076212424 collection:"collection2" file_count:22892 delete_count:2 deleted_byte_count:61320 replica_placement:100 version:3 modified_at_second:1614918464
+ volume id:304 size:1081038888 collection:"collection2" file_count:24505 delete_count:2 deleted_byte_count:124447 replica_placement:100 version:3 modified_at_second:1614925567
+ volume id:305 size:1074185552 collection:"collection2" file_count:22074 delete_count:5 deleted_byte_count:20221 replica_placement:100 version:3 modified_at_second:1614925312
+ volume id:310 size:1074761520 collection:"collection2" file_count:21441 delete_count:3 deleted_byte_count:13934 replica_placement:100 version:3 modified_at_second:1614931077
+ volume id:311 size:1088248208 collection:"collection2" file_count:23553 delete_count:6 deleted_byte_count:191716 replica_placement:100 version:3 modified_at_second:1614931460
+ volume id:312 size:1075037808 collection:"collection2" file_count:22524 replica_placement:100 version:3 modified_at_second:1614937832
+ volume id:313 size:1074876016 collection:"collection2" file_count:22404 delete_count:4 deleted_byte_count:51728 replica_placement:100 version:3 modified_at_second:1614937755
+ volume id:314 size:1074670840 collection:"collection2" file_count:20964 delete_count:4 deleted_byte_count:304291 replica_placement:100 version:3 modified_at_second:1614937441
+ volume id:315 size:1084153456 collection:"collection2" file_count:23638 delete_count:2 deleted_byte_count:53956 replica_placement:100 version:3 modified_at_second:1614937884
+ volume id:316 size:1077720784 collection:"collection2" file_count:22605 delete_count:1 deleted_byte_count:8503 replica_placement:100 version:3 modified_at_second:1614937838
+ volume id:317 size:1076215040 collection:"collection2" file_count:23572 delete_count:2 deleted_byte_count:1441356 replica_placement:100 version:3 modified_at_second:1614943965
+ volume id:319 size:1073952744 collection:"collection2" file_count:22286 delete_count:2 deleted_byte_count:43421 replica_placement:100 version:3 modified_at_second:1614943810
+ volume id:320 size:1082437736 collection:"collection2" file_count:21544 delete_count:3 deleted_byte_count:16712 replica_placement:100 version:3 modified_at_second:1614943591
+ volume id:321 size:1081477960 collection:"collection2" file_count:23531 delete_count:5 deleted_byte_count:262564 replica_placement:100 version:3 modified_at_second:1614943982
+ volume id:322 size:1078471600 collection:"collection2" file_count:21905 delete_count:3 deleted_byte_count:145002 replica_placement:100 version:3 modified_at_second:1614950574
+ volume id:324 size:1075606712 collection:"collection2" file_count:20799 delete_count:1 deleted_byte_count:251210 replica_placement:100 version:3 modified_at_second:1614950310
+ volume id:326 size:1076059936 collection:"collection2" file_count:22564 delete_count:2 deleted_byte_count:192886 replica_placement:100 version:3 modified_at_second:1614950619
+ volume id:327 size:1076121224 collection:"collection2" file_count:22007 delete_count:3 deleted_byte_count:60358 replica_placement:100 version:3 modified_at_second:1614956487
+ volume id:328 size:1074767928 collection:"collection2" file_count:21720 delete_count:3 deleted_byte_count:56429 replica_placement:100 version:3 modified_at_second:1614956362
+ volume id:329 size:1076691776 collection:"collection2" file_count:22411 delete_count:5 deleted_byte_count:214092 replica_placement:100 version:3 modified_at_second:1614956485
+ volume id:331 size:1074957192 collection:"collection2" file_count:21230 delete_count:4 deleted_byte_count:62145 replica_placement:100 version:3 modified_at_second:1614956259
+ volume id:333 size:1074270192 collection:"collection2" file_count:21271 delete_count:2 deleted_byte_count:168122 replica_placement:100 version:3 modified_at_second:1614962697
+ volume id:335 size:1076235176 collection:"collection2" file_count:22391 delete_count:3 deleted_byte_count:8838 replica_placement:100 version:3 modified_at_second:1614962970
+ volume id:336 size:1087853032 collection:"collection2" file_count:22801 delete_count:2 deleted_byte_count:26394 replica_placement:100 version:3 modified_at_second:1614963003
+ volume id:338 size:1076118360 collection:"collection2" file_count:21680 replica_placement:100 version:3 modified_at_second:1614969850
+ volume id:342 size:1080186296 collection:"collection2" file_count:22405 delete_count:4 deleted_byte_count:64819 replica_placement:100 version:3 modified_at_second:1614977518
+ volume id:343 size:1075345184 collection:"collection2" file_count:21095 delete_count:2 deleted_byte_count:20581 replica_placement:100 version:3 modified_at_second:1614977148
+ volume id:349 size:1075957824 collection:"collection2" file_count:22395 delete_count:2 deleted_byte_count:61565 replica_placement:100 version:3 modified_at_second:1614984748
+ volume id:350 size:1074756688 collection:"collection2" file_count:21990 delete_count:3 deleted_byte_count:233881 replica_placement:100 version:3 modified_at_second:1614984682
+ volume id:354 size:1085213992 collection:"collection2" file_count:23150 delete_count:4 deleted_byte_count:82391 replica_placement:100 version:3 modified_at_second:1614992207
+ volume id:356 size:1083304992 collection:"collection2" file_count:21552 delete_count:4 deleted_byte_count:14472 replica_placement:100 version:3 modified_at_second:1614992027
+ volume id:358 size:1085152312 collection:"collection2" file_count:23756 delete_count:3 deleted_byte_count:44531 replica_placement:100 version:3 modified_at_second:1614998824
+ volume id:359 size:1074211240 collection:"collection2" file_count:22437 delete_count:2 deleted_byte_count:187953 replica_placement:100 version:3 modified_at_second:1614998711
+ volume id:362 size:1074074120 collection:"collection2" file_count:20595 delete_count:1 deleted_byte_count:112145 replica_placement:100 version:3 modified_at_second:1615004407
+ volume id:363 size:1078859496 collection:"collection2" file_count:23177 delete_count:4 deleted_byte_count:9601 replica_placement:100 version:3 modified_at_second:1615004822
+ volume id:364 size:1081280816 collection:"collection2" file_count:22686 delete_count:1 deleted_byte_count:84375 replica_placement:100 version:3 modified_at_second:1615004813
+ volume id:365 size:1075736632 collection:"collection2" file_count:22193 delete_count:5 deleted_byte_count:259033 replica_placement:100 version:3 modified_at_second:1615004776
+ volume id:366 size:1075267272 collection:"collection2" file_count:21856 delete_count:5 deleted_byte_count:138363 replica_placement:100 version:3 modified_at_second:1615004703
+ volume id:367 size:1076403648 collection:"collection2" file_count:22995 delete_count:2 deleted_byte_count:36955 replica_placement:100 version:3 modified_at_second:1615010985
+ volume id:368 size:1074822016 collection:"collection2" file_count:22252 delete_count:4 deleted_byte_count:3291946 replica_placement:100 version:3 modified_at_second:1615010878
+ volume id:369 size:1091472040 collection:"collection2" file_count:23709 delete_count:4 deleted_byte_count:400876 replica_placement:100 version:3 modified_at_second:1615011019
+ volume id:370 size:1076040480 collection:"collection2" file_count:22092 delete_count:2 deleted_byte_count:115388 replica_placement:100 version:3 modified_at_second:1615010877
+ volume id:371 size:1078806160 collection:"collection2" file_count:22685 delete_count:2 deleted_byte_count:68905 replica_placement:100 version:3 modified_at_second:1615010994
+ volume id:372 size:1076193312 collection:"collection2" file_count:22774 delete_count:1 deleted_byte_count:3495 replica_placement:100 version:3 modified_at_second:1615016911
+ volume id:374 size:1085011080 collection:"collection2" file_count:23054 delete_count:2 deleted_byte_count:89034 replica_placement:100 version:3 modified_at_second:1615016917
+ volume id:375 size:1076140688 collection:"collection2" file_count:21880 delete_count:2 deleted_byte_count:51103 replica_placement:100 version:3 modified_at_second:1615016787
+ volume id:378 size:959273824 collection:"collection2" file_count:15031 replica_placement:100 version:3 modified_at_second:1615632323
+ volume id:379 size:1014108592 collection:"collection2" file_count:15360 delete_count:8 deleted_byte_count:2524591 replica_placement:100 version:3 modified_at_second:1615632323
+ volume id:380 size:1010760464 collection:"collection2" file_count:14920 delete_count:7 deleted_byte_count:134859 replica_placement:100 version:3 modified_at_second:1615632322
+ Disk hdd total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457
+ DataNode 192.168.1.1:8080 total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457
+ Rack DefaultRack total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457
+ DataCenter dc4 total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457
+ DataCenter dc5 hdd(volume:299/3000 active:299 free:2701 remote:0)
+ Rack DefaultRack hdd(volume:299/3000 active:299 free:2701 remote:0)
+ DataNode 192.168.1.5:8080 hdd(volume:299/3000 active:299 free:2701 remote:0)
+ Disk hdd(volume:299/3000 active:299 free:2701 remote:0)
+ volume id:5 size:293806008 file_count:1669 delete_count:2 deleted_byte_count:274334 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631342
+ volume id:11 size:1109224552 collection:"collection0" file_count:44 replica_placement:100 version:3 modified_at_second:1615629606
+ volume id:18 size:1096678688 collection:"collection0" file_count:88 delete_count:4 deleted_byte_count:8633 replica_placement:100 version:3 modified_at_second:1615631673
+ volume id:19 size:1096923792 collection:"collection0" file_count:100 delete_count:10 deleted_byte_count:75779917 replica_placement:100 version:3 compact_revision:4 modified_at_second:1615630117
+ volume id:20 size:1074760432 collection:"collection0" file_count:82 delete_count:5 deleted_byte_count:12156 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615629340
+ volume id:26 size:532657632 collection:"collection0" file_count:100 delete_count:4 deleted_byte_count:9081 replica_placement:100 version:3 modified_at_second:1614170024
+ volume id:27 size:298886792 file_count:1608 replica_placement:100 version:3 modified_at_second:1615632482
+ volume id:28 size:308919192 file_count:1591 delete_count:1 deleted_byte_count:125280 replica_placement:100 version:3 modified_at_second:1615631762
+ volume id:29 size:281582688 file_count:1537 replica_placement:100 version:3 modified_at_second:1615629422
+ volume id:30 size:289466144 file_count:1566 delete_count:1 deleted_byte_count:124972 replica_placement:100 version:3 modified_at_second:1615632422
+ volume id:31 size:273363256 file_count:1498 replica_placement:100 version:3 modified_at_second:1615631642
+ volume id:32 size:281343360 file_count:1497 replica_placement:100 version:3 modified_at_second:1615632362
+ volume id:33 size:1130226344 collection:"collection1" file_count:7322 delete_count:172 deleted_byte_count:45199399 replica_placement:100 version:3 modified_at_second:1615618789
+ volume id:34 size:1077111136 collection:"collection1" file_count:9781 delete_count:110 deleted_byte_count:20894827 replica_placement:100 version:3 modified_at_second:1615619366
+ volume id:35 size:1075241744 collection:"collection1" file_count:10523 delete_count:97 deleted_byte_count:46586217 replica_placement:100 version:3 modified_at_second:1615618790
+ volume id:36 size:1075118336 collection:"collection1" file_count:10341 delete_count:118 deleted_byte_count:24753278 replica_placement:100 version:3 modified_at_second:1615606148
+ volume id:37 size:1075895576 collection:"collection1" file_count:12013 delete_count:98 deleted_byte_count:50747932 replica_placement:100 version:3 modified_at_second:1615594776
+ volume id:38 size:1075545744 collection:"collection1" file_count:13324 delete_count:100 deleted_byte_count:25223906 replica_placement:100 version:3 modified_at_second:1615569830
+ volume id:39 size:1076606536 collection:"collection1" file_count:12612 delete_count:78 deleted_byte_count:17462730 replica_placement:100 version:3 modified_at_second:1615611959
+ volume id:40 size:1075358552 collection:"collection1" file_count:12597 delete_count:62 deleted_byte_count:11657901 replica_placement:100 version:3 modified_at_second:1615612994
+ volume id:41 size:1076283592 collection:"collection1" file_count:12088 delete_count:84 deleted_byte_count:19311237 replica_placement:100 version:3 modified_at_second:1615596736
+ volume id:42 size:1093948352 collection:"collection1" file_count:7889 delete_count:47 deleted_byte_count:5697275 replica_placement:100 version:3 modified_at_second:1615548906
+ volume id:43 size:1116445864 collection:"collection1" file_count:7355 delete_count:57 deleted_byte_count:9727158 replica_placement:100 version:3 modified_at_second:1615566167
+ volume id:44 size:1077582560 collection:"collection1" file_count:7295 delete_count:50 deleted_byte_count:12618414 replica_placement:100 version:3 modified_at_second:1615566170
+ volume id:45 size:1075254640 collection:"collection1" file_count:10772 delete_count:76 deleted_byte_count:22426345 replica_placement:100 version:3 modified_at_second:1615573498
+ volume id:46 size:1075286056 collection:"collection1" file_count:9947 delete_count:309 deleted_byte_count:105601163 replica_placement:100 version:3 modified_at_second:1615569825
+ volume id:47 size:444599784 collection:"collection1" file_count:709 delete_count:19 deleted_byte_count:11913451 replica_placement:100 version:3 modified_at_second:1615632397
+ volume id:48 size:1076778664 collection:"collection1" file_count:9850 delete_count:77 deleted_byte_count:16641907 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630690
+ volume id:49 size:1078775288 collection:"collection1" file_count:9631 delete_count:27 deleted_byte_count:5985628 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615575823
+ volume id:50 size:1076688288 collection:"collection1" file_count:7921 delete_count:26 deleted_byte_count:5162032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615610876
+ volume id:51 size:1076796120 collection:"collection1" file_count:10550 delete_count:39 deleted_byte_count:12723654 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615547786
+ volume id:53 size:1063089216 collection:"collection1" file_count:9832 delete_count:31 deleted_byte_count:9273066 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156
+ volume id:54 size:1045022288 collection:"collection1" file_count:9409 delete_count:29 deleted_byte_count:15102818 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630813
+ volume id:55 size:1012890016 collection:"collection1" file_count:8651 delete_count:27 deleted_byte_count:9418841 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631453
+ volume id:56 size:1002412240 collection:"collection1" file_count:8762 delete_count:40 deleted_byte_count:65885831 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632014
+ volume id:57 size:839849792 collection:"collection1" file_count:7514 delete_count:24 deleted_byte_count:6228543 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631775
+ volume id:58 size:908064192 collection:"collection1" file_count:8128 delete_count:21 deleted_byte_count:6113731 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632343
+ volume id:59 size:988302272 collection:"collection1" file_count:8098 delete_count:20 deleted_byte_count:3947615 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632238
+ volume id:60 size:1010702480 collection:"collection1" file_count:8969 delete_count:79 deleted_byte_count:24782814 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632439
+ volume id:61 size:975604544 collection:"collection1" file_count:8683 delete_count:20 deleted_byte_count:10276072 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631176
+ volume id:62 size:873845904 collection:"collection1" file_count:7897 delete_count:23 deleted_byte_count:10920170 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631133
+ volume id:63 size:956941176 collection:"collection1" file_count:8271 delete_count:32 deleted_byte_count:15876189 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632036
+ volume id:64 size:965638424 collection:"collection1" file_count:8218 delete_count:27 deleted_byte_count:6922489 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631032
+ volume id:65 size:823283608 collection:"collection1" file_count:7834 delete_count:29 deleted_byte_count:5950610 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632307
+ volume id:66 size:821343440 collection:"collection1" file_count:7383 delete_count:29 deleted_byte_count:12010343 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631968
+ volume id:67 size:878713880 collection:"collection1" file_count:7299 delete_count:117 deleted_byte_count:24857326 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156
+ volume id:69 size:863913896 collection:"collection1" file_count:7291 delete_count:100 deleted_byte_count:25335024 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630534
+ volume id:70 size:886695472 collection:"collection1" file_count:7769 delete_count:164 deleted_byte_count:45162513 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632398
+ volume id:71 size:907608392 collection:"collection1" file_count:7658 delete_count:122 deleted_byte_count:27622941 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632307
+ volume id:72 size:903990720 collection:"collection1" file_count:6996 delete_count:240 deleted_byte_count:74147727 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630985
+ volume id:73 size:929047544 collection:"collection1" file_count:7038 delete_count:227 deleted_byte_count:65336664 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630707
+ volume id:75 size:908045000 collection:"collection1" file_count:6911 delete_count:268 deleted_byte_count:73934373 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632430
+ volume id:76 size:985296744 collection:"collection1" file_count:6566 delete_count:61 deleted_byte_count:44464430 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284
+ volume id:77 size:929398296 collection:"collection1" file_count:7427 delete_count:238 deleted_byte_count:59581579 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632014
+ volume id:78 size:1075671512 collection:"collection1" file_count:7540 delete_count:258 deleted_byte_count:71726846 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615582829
+ volume id:79 size:948225472 collection:"collection1" file_count:6997 delete_count:227 deleted_byte_count:60625763 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631326
+ volume id:80 size:827912928 collection:"collection1" file_count:6916 delete_count:15 deleted_byte_count:5611604 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631159
+ volume id:81 size:880693168 collection:"collection1" file_count:7481 delete_count:238 deleted_byte_count:80880878 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631395
+ volume id:82 size:1041660512 collection:"collection1" file_count:7043 delete_count:207 deleted_byte_count:52275724 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632430
+ volume id:83 size:936194288 collection:"collection1" file_count:7593 delete_count:13 deleted_byte_count:4633917 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632029
+ volume id:84 size:871262320 collection:"collection1" file_count:8190 delete_count:14 deleted_byte_count:3150948 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631161
+ volume id:86 size:1009434632 collection:"collection1" file_count:8474 delete_count:236 deleted_byte_count:64543674 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630812
+ volume id:87 size:922274624 collection:"collection1" file_count:12902 delete_count:13 deleted_byte_count:3412959 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632438
+ volume id:88 size:1073767976 collection:"collection1" file_count:14994 delete_count:207 deleted_byte_count:82380696 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615541383
+ volume id:89 size:1044421824 collection:"collection1" file_count:14943 delete_count:243 deleted_byte_count:58543159 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632208
+ volume id:90 size:891163760 collection:"collection1" file_count:14608 delete_count:10 deleted_byte_count:2564369 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615629392
+ volume id:91 size:936573952 collection:"collection1" file_count:14686 delete_count:11 deleted_byte_count:4717727 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631851
+ volume id:92 size:992439144 collection:"collection1" file_count:7061 delete_count:195 deleted_byte_count:60649573 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630566
+ volume id:93 size:1079602592 collection:"collection1" file_count:7878 delete_count:270 deleted_byte_count:74150048 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615556013
+ volume id:94 size:1030684704 collection:"collection1" file_count:7660 delete_count:207 deleted_byte_count:70150733 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631616
+ volume id:95 size:990877824 collection:"collection1" file_count:6620 delete_count:206 deleted_byte_count:60363604 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631867
+ volume id:96 size:989294848 collection:"collection1" file_count:7544 delete_count:229 deleted_byte_count:59931853 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630778
+ volume id:98 size:1077836472 collection:"collection1" file_count:7605 delete_count:202 deleted_byte_count:73180379 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615523691
+ volume id:99 size:1071718496 collection:"collection1" file_count:7470 delete_count:8 deleted_byte_count:9624950 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631175
+ volume id:100 size:1083617472 collection:"collection1" file_count:7018 delete_count:187 deleted_byte_count:61304236 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615505914
+ volume id:101 size:1077109408 collection:"collection1" file_count:7706 delete_count:226 deleted_byte_count:77864780 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630994
+ volume id:102 size:1074359920 collection:"collection1" file_count:7338 delete_count:7 deleted_byte_count:6499151 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615626682
+ volume id:103 size:1075863904 collection:"collection1" file_count:7184 delete_count:186 deleted_byte_count:58872238 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615628417
+ volume id:106 size:1075458680 collection:"collection1" file_count:7182 delete_count:307 deleted_byte_count:69349053 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615598137
+ volume id:107 size:1073811776 collection:"collection1" file_count:7436 delete_count:168 deleted_byte_count:57747428 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615293569
+ volume id:108 size:1074648024 collection:"collection1" file_count:7472 delete_count:194 deleted_byte_count:70864699 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593231
+ volume id:109 size:1075254560 collection:"collection1" file_count:7556 delete_count:263 deleted_byte_count:55155265 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615502487
+ volume id:110 size:1076575744 collection:"collection1" file_count:6996 delete_count:163 deleted_byte_count:52954032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615590786
+ volume id:111 size:1073826176 collection:"collection1" file_count:7355 delete_count:155 deleted_byte_count:50083578 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593232
+ volume id:112 size:1076392512 collection:"collection1" file_count:8291 delete_count:156 deleted_byte_count:74120183 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569823
+ volume id:113 size:1076709184 collection:"collection1" file_count:9355 delete_count:177 deleted_byte_count:59796765 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569822
+ volume id:114 size:1074762792 collection:"collection1" file_count:8802 delete_count:156 deleted_byte_count:38470055 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615591826
+ volume id:115 size:1076192296 collection:"collection1" file_count:7690 delete_count:154 deleted_byte_count:32267193 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615285296
+ volume id:117 size:1073917192 collection:"collection1" file_count:9520 delete_count:114 deleted_byte_count:21835126 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573712
+ volume id:118 size:1074064344 collection:"collection1" file_count:8738 delete_count:15 deleted_byte_count:3460697 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615516264
+ volume id:120 size:1076115928 collection:"collection1" file_count:9639 delete_count:118 deleted_byte_count:33357871 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615482567
+ volume id:121 size:1078803320 collection:"collection1" file_count:10113 delete_count:441 deleted_byte_count:94128627 replica_placement:100 version:3 modified_at_second:1615506626
+ volume id:122 size:1076235312 collection:"collection1" file_count:9106 delete_count:252 deleted_byte_count:93041272 replica_placement:100 version:3 modified_at_second:1615585912
+ volume id:123 size:1080491112 collection:"collection1" file_count:10623 delete_count:302 deleted_byte_count:83956998 replica_placement:100 version:3 modified_at_second:1615585916
+ volume id:124 size:1074519360 collection:"collection1" file_count:9457 delete_count:286 deleted_byte_count:74752459 replica_placement:100 version:3 modified_at_second:1615585913
+ volume id:125 size:1088687040 collection:"collection1" file_count:9518 delete_count:281 deleted_byte_count:76037905 replica_placement:100 version:3 modified_at_second:1615585913
+ volume id:126 size:1073867408 collection:"collection1" file_count:9320 delete_count:278 deleted_byte_count:94547424 replica_placement:100 version:3 modified_at_second:1615585911
+ volume id:127 size:1074907336 collection:"collection1" file_count:9900 delete_count:133 deleted_byte_count:48570820 replica_placement:100 version:3 modified_at_second:1615612990
+ volume id:128 size:1074874632 collection:"collection1" file_count:9821 delete_count:148 deleted_byte_count:43633334 replica_placement:100 version:3 modified_at_second:1615602670
+ volume id:129 size:1074704328 collection:"collection1" file_count:10012 delete_count:150 deleted_byte_count:64491721 replica_placement:100 version:3 modified_at_second:1615627566
+ volume id:130 size:1075000632 collection:"collection1" file_count:10633 delete_count:161 deleted_byte_count:34768201 replica_placement:100 version:3 modified_at_second:1615582327
+ volume id:131 size:1075279584 collection:"collection1" file_count:10075 delete_count:135 deleted_byte_count:29795712 replica_placement:100 version:3 modified_at_second:1615523898
+ volume id:132 size:1088539552 collection:"collection1" file_count:11051 delete_count:71 deleted_byte_count:17178322 replica_placement:100 version:3 modified_at_second:1615619581
+ volume id:134 size:1074367304 collection:"collection1" file_count:10662 delete_count:69 deleted_byte_count:25530139 replica_placement:100 version:3 modified_at_second:1615555873
+ volume id:135 size:1073906776 collection:"collection1" file_count:10446 delete_count:71 deleted_byte_count:28599975 replica_placement:100 version:3 modified_at_second:1615569816
+ volume id:136 size:1074433552 collection:"collection1" file_count:9593 delete_count:72 deleted_byte_count:26912512 replica_placement:100 version:3 modified_at_second:1615376036
+ volume id:137 size:1074309264 collection:"collection1" file_count:9633 delete_count:50 deleted_byte_count:27487972 replica_placement:100 version:3 modified_at_second:1615572231
+ volume id:138 size:1074465744 collection:"collection1" file_count:10120 delete_count:55 deleted_byte_count:15875438 replica_placement:100 version:3 modified_at_second:1615572231
+ volume id:140 size:1076203744 collection:"collection1" file_count:11219 delete_count:57 deleted_byte_count:19864498 replica_placement:100 version:3 modified_at_second:1615571947
+ volume id:141 size:1074619488 collection:"collection1" file_count:9840 delete_count:45 deleted_byte_count:40890181 replica_placement:100 version:3 modified_at_second:1615630994
+ volume id:142 size:1075733064 collection:"collection1" file_count:9009 delete_count:48 deleted_byte_count:9912854 replica_placement:100 version:3 modified_at_second:1615598913
+ volume id:143 size:1075011280 collection:"collection1" file_count:9608 delete_count:51 deleted_byte_count:37282460 replica_placement:100 version:3 modified_at_second:1615488584
+ volume id:144 size:1074549720 collection:"collection1" file_count:8780 delete_count:50 deleted_byte_count:52475146 replica_placement:100 version:3 modified_at_second:1615573451
+ volume id:145 size:1074394928 collection:"collection1" file_count:9255 delete_count:34 deleted_byte_count:38011392 replica_placement:100 version:3 modified_at_second:1615591825
+ volume id:146 size:1076337576 collection:"collection1" file_count:10492 delete_count:50 deleted_byte_count:17071505 replica_placement:100 version:3 modified_at_second:1615632005
+ volume id:147 size:1077130576 collection:"collection1" file_count:10451 delete_count:27 deleted_byte_count:8290907 replica_placement:100 version:3 modified_at_second:1615604115
+ volume id:148 size:1076066568 collection:"collection1" file_count:9547 delete_count:33 deleted_byte_count:7034089 replica_placement:100 version:3 modified_at_second:1615586390
+ volume id:149 size:1074989016 collection:"collection1" file_count:8352 delete_count:35 deleted_byte_count:7179742 replica_placement:100 version:3 modified_at_second:1615494494
+ volume id:150 size:1076290328 collection:"collection1" file_count:9328 delete_count:33 deleted_byte_count:43417791 replica_placement:100 version:3 modified_at_second:1615611567
+ volume id:152 size:1075941400 collection:"collection1" file_count:9951 delete_count:36 deleted_byte_count:25348335 replica_placement:100 version:3 modified_at_second:1615606614
+ volume id:153 size:1078539784 collection:"collection1" file_count:10924 delete_count:34 deleted_byte_count:12603081 replica_placement:100 version:3 modified_at_second:1615606614
+ volume id:154 size:1081244696 collection:"collection1" file_count:11002 delete_count:31 deleted_byte_count:8467560 replica_placement:100 version:3 modified_at_second:1615478469
+ volume id:155 size:1075140688 collection:"collection1" file_count:10882 delete_count:32 deleted_byte_count:10076804 replica_placement:100 version:3 modified_at_second:1615606614
+ volume id:156 size:1074975832 collection:"collection1" file_count:9535 delete_count:40 deleted_byte_count:11426621 replica_placement:100 version:3 modified_at_second:1615628341
+ volume id:157 size:1076758536 collection:"collection1" file_count:10012 delete_count:19 deleted_byte_count:11688737 replica_placement:100 version:3 modified_at_second:1615597782
+ volume id:158 size:1087251976 collection:"collection1" file_count:9972 delete_count:20 deleted_byte_count:10328429 replica_placement:100 version:3 modified_at_second:1615588879
+ volume id:159 size:1074132368 collection:"collection1" file_count:9382 delete_count:27 deleted_byte_count:11474574 replica_placement:100 version:3 modified_at_second:1615593593
+ volume id:160 size:1075680952 collection:"collection1" file_count:9772 delete_count:22 deleted_byte_count:4981968 replica_placement:100 version:3 modified_at_second:1615597780
+ volume id:162 size:1074286880 collection:"collection1" file_count:11220 delete_count:17 deleted_byte_count:1815547 replica_placement:100 version:3 modified_at_second:1615478126
+ volume id:163 size:1074457192 collection:"collection1" file_count:12524 delete_count:27 deleted_byte_count:6359619 replica_placement:100 version:3 modified_at_second:1615579313
+ volume id:164 size:1074261248 collection:"collection1" file_count:11922 delete_count:25 deleted_byte_count:2923288 replica_placement:100 version:3 modified_at_second:1615620084
+ volume id:165 size:1073891016 collection:"collection1" file_count:9152 delete_count:12 deleted_byte_count:19164659 replica_placement:100 version:3 modified_at_second:1615471907
+ volume id:166 size:1075637536 collection:"collection1" file_count:14211 delete_count:24 deleted_byte_count:20415490 replica_placement:100 version:3 modified_at_second:1615491019
+ volume id:168 size:1074718808 collection:"collection1" file_count:25702 delete_count:40 deleted_byte_count:4024775 replica_placement:100 version:3 modified_at_second:1615585664
+ volume id:169 size:1073863128 collection:"collection1" file_count:25248 delete_count:43 deleted_byte_count:3013817 replica_placement:100 version:3 modified_at_second:1615569832
+ volume id:170 size:1075747096 collection:"collection1" file_count:24596 delete_count:41 deleted_byte_count:3494711 replica_placement:100 version:3 modified_at_second:1615579204
+ volume id:171 size:1081881312 collection:"collection1" file_count:24215 delete_count:36 deleted_byte_count:3191335 replica_placement:100 version:3 modified_at_second:1615596485
+ volume id:172 size:1074787312 collection:"collection1" file_count:31236 delete_count:50 deleted_byte_count:3316482 replica_placement:100 version:3 modified_at_second:1615612385
+ volume id:173 size:1074154648 collection:"collection1" file_count:30884 delete_count:34 deleted_byte_count:2430948 replica_placement:100 version:3 modified_at_second:1615591904
+ volume id:175 size:1077742504 collection:"collection1" file_count:32353 delete_count:33 deleted_byte_count:1861403 replica_placement:100 version:3 modified_at_second:1615559515
+ volume id:176 size:1073854800 collection:"collection1" file_count:30582 delete_count:34 deleted_byte_count:7701976 replica_placement:100 version:3 modified_at_second:1615626169
+ volume id:177 size:1074120120 collection:"collection1" file_count:22293 delete_count:16 deleted_byte_count:3719562 replica_placement:100 version:3 modified_at_second:1615516891
+ volume id:178 size:1087560112 collection:"collection1" file_count:23482 delete_count:22 deleted_byte_count:18810492 replica_placement:100 version:3 modified_at_second:1615541369
+ volume id:180 size:1078438536 collection:"collection1" file_count:23614 delete_count:12 deleted_byte_count:4496474 replica_placement:100 version:3 modified_at_second:1614773242
+ volume id:181 size:1074571768 collection:"collection1" file_count:22898 delete_count:19 deleted_byte_count:6628413 replica_placement:100 version:3 modified_at_second:1614745116
+ volume id:182 size:1076131280 collection:"collection1" file_count:31987 delete_count:21 deleted_byte_count:1416142 replica_placement:100 version:3 modified_at_second:1615568922
+ volume id:183 size:1076361448 collection:"collection1" file_count:31293 delete_count:16 deleted_byte_count:468841 replica_placement:100 version:3 modified_at_second:1615572982
+ volume id:184 size:1074594160 collection:"collection1" file_count:31368 delete_count:22 deleted_byte_count:857453 replica_placement:100 version:3 modified_at_second:1615586578
+ volume id:185 size:1074099624 collection:"collection1" file_count:30612 delete_count:17 deleted_byte_count:2610847 replica_placement:100 version:3 modified_at_second:1615506832
+ volume id:186 size:1074220864 collection:"collection1" file_count:31450 delete_count:15 deleted_byte_count:391855 replica_placement:100 version:3 modified_at_second:1615614933
+ volume id:187 size:1074395944 collection:"collection1" file_count:31853 delete_count:17 deleted_byte_count:454283 replica_placement:100 version:3 modified_at_second:1615590490
+ volume id:188 size:1074732792 collection:"collection1" file_count:31867 delete_count:19 deleted_byte_count:393743 replica_placement:100 version:3 modified_at_second:1615487645
+ volume id:189 size:1074847896 collection:"collection1" file_count:31450 delete_count:16 deleted_byte_count:1040552 replica_placement:100 version:3 modified_at_second:1615335661
+ volume id:190 size:1074008912 collection:"collection1" file_count:31987 delete_count:11 deleted_byte_count:685125 replica_placement:100 version:3 modified_at_second:1615447161
+ volume id:191 size:1075493024 collection:"collection1" file_count:31301 delete_count:19 deleted_byte_count:708401 replica_placement:100 version:3 modified_at_second:1615357456
+ volume id:192 size:1075857400 collection:"collection1" file_count:31490 delete_count:25 deleted_byte_count:720617 replica_placement:100 version:3 modified_at_second:1615621632
+ volume id:193 size:1076616768 collection:"collection1" file_count:31907 delete_count:16 deleted_byte_count:464900 replica_placement:100 version:3 modified_at_second:1615507875
+ volume id:194 size:1073985624 collection:"collection1" file_count:31434 delete_count:18 deleted_byte_count:391432 replica_placement:100 version:3 modified_at_second:1615559499
+ volume id:195 size:1074158312 collection:"collection1" file_count:31453 delete_count:15 deleted_byte_count:718266 replica_placement:100 version:3 modified_at_second:1615559331
+ volume id:196 size:1074594784 collection:"collection1" file_count:31665 delete_count:18 deleted_byte_count:3468922 replica_placement:100 version:3 modified_at_second:1615501688
+ volume id:197 size:1075423296 collection:"collection1" file_count:16473 delete_count:15 deleted_byte_count:12552442 replica_placement:100 version:3 modified_at_second:1615485253
+ volume id:198 size:1075104712 collection:"collection1" file_count:16577 delete_count:18 deleted_byte_count:6583181 replica_placement:100 version:3 modified_at_second:1615623369
+ volume id:199 size:1078117688 collection:"collection1" file_count:16497 delete_count:14 deleted_byte_count:1514286 replica_placement:100 version:3 modified_at_second:1615585984
+ volume id:200 size:1075630536 collection:"collection1" file_count:16380 delete_count:18 deleted_byte_count:1103109 replica_placement:100 version:3 modified_at_second:1615485252
+ volume id:201 size:1091460440 collection:"collection1" file_count:16684 delete_count:26 deleted_byte_count:5590335 replica_placement:100 version:3 modified_at_second:1615585987
+ volume id:202 size:1077533160 collection:"collection1" file_count:2847 delete_count:67 deleted_byte_count:65172985 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615588497
+ volume id:203 size:1027316272 collection:"collection1" file_count:3040 delete_count:11 deleted_byte_count:3993230 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631728
+ volume id:204 size:1079766872 collection:"collection1" file_count:3233 delete_count:255 deleted_byte_count:104707641 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615565701
+ volume id:205 size:1078485304 collection:"collection1" file_count:2869 delete_count:43 deleted_byte_count:18290259 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615579314
+ volume id:206 size:1082045848 collection:"collection1" file_count:2979 delete_count:225 deleted_byte_count:88220074 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630989
+ volume id:207 size:1081939960 collection:"collection1" file_count:3010 delete_count:4 deleted_byte_count:692350 replica_placement:100 version:3 modified_at_second:1615269061
+ volume id:208 size:1077863624 collection:"collection1" file_count:3147 delete_count:6 deleted_byte_count:858726 replica_placement:100 version:3 modified_at_second:1615495515
+ volume id:210 size:1094311304 collection:"collection1" file_count:3468 delete_count:4 deleted_byte_count:466433 replica_placement:100 version:3 modified_at_second:1615495515
+ volume id:212 size:1078293448 collection:"collection1" file_count:3106 delete_count:6 deleted_byte_count:2085755 replica_placement:100 version:3 modified_at_second:1615586387
+ volume id:213 size:1093588072 collection:"collection1" file_count:3681 delete_count:12 deleted_byte_count:3138791 replica_placement:100 version:3 modified_at_second:1615586387
+ volume id:214 size:1074486992 collection:"collection1" file_count:3217 delete_count:10 deleted_byte_count:6392871 replica_placement:100 version:3 modified_at_second:1615586383
+ volume id:215 size:1074798704 collection:"collection1" file_count:2819 delete_count:31 deleted_byte_count:10873569 replica_placement:100 version:3 modified_at_second:1615586386
+ volume id:217 size:1075381872 collection:"collection1" file_count:3331 delete_count:14 deleted_byte_count:2009141 replica_placement:100 version:3 modified_at_second:1615401638
+ volume id:218 size:1081263944 collection:"collection1" file_count:3433 delete_count:14 deleted_byte_count:3454237 replica_placement:100 version:3 modified_at_second:1615603637
+ volume id:219 size:1092298816 collection:"collection1" file_count:3193 delete_count:17 deleted_byte_count:2047576 replica_placement:100 version:3 modified_at_second:1615579316
+ volume id:220 size:1081928312 collection:"collection1" file_count:3166 delete_count:13 deleted_byte_count:4127709 replica_placement:100 version:3 modified_at_second:1615579317
+ volume id:221 size:1106545456 collection:"collection1" file_count:3153 delete_count:11 deleted_byte_count:1496835 replica_placement:100 version:3 modified_at_second:1615269138
+ volume id:222 size:1106623104 collection:"collection1" file_count:3273 delete_count:11 deleted_byte_count:2114627 replica_placement:100 version:3 modified_at_second:1615586243
+ volume id:223 size:1075233064 collection:"collection1" file_count:2966 delete_count:9 deleted_byte_count:744001 replica_placement:100 version:3 modified_at_second:1615586244
+ volume id:224 size:1093691520 collection:"collection1" file_count:3463 delete_count:10 deleted_byte_count:1128328 replica_placement:100 version:3 modified_at_second:1615601870
+ volume id:225 size:1080698928 collection:"collection1" file_count:3115 delete_count:7 deleted_byte_count:18170416 replica_placement:100 version:3 modified_at_second:1615434684
+ volume id:226 size:1103504768 collection:"collection1" file_count:2965 delete_count:10 deleted_byte_count:2639254 replica_placement:100 version:3 modified_at_second:1615601867
+ volume id:228 size:1109784072 collection:"collection1" file_count:2504 delete_count:24 deleted_byte_count:5458950 replica_placement:100 version:3 modified_at_second:1615610489
+ volume id:230 size:1080722984 collection:"collection1" file_count:2898 delete_count:15 deleted_byte_count:3929261 replica_placement:100 version:3 modified_at_second:1615610537
+ volume id:232 size:1073901520 collection:"collection1" file_count:3004 delete_count:54 deleted_byte_count:10273081 replica_placement:100 version:3 modified_at_second:1615611351
+ volume id:234 size:1073835280 collection:"collection1" file_count:2965 delete_count:41 deleted_byte_count:4960354 replica_placement:100 version:3 modified_at_second:1615611351
+ volume id:235 size:1075586104 collection:"collection1" file_count:2767 delete_count:33 deleted_byte_count:3216540 replica_placement:100 version:3 modified_at_second:1615611354
+ volume id:236 size:1089476136 collection:"collection1" file_count:3231 delete_count:53 deleted_byte_count:11625921 replica_placement:100 version:3 modified_at_second:1615611351
+ volume id:237 size:375722792 collection:"collection1" file_count:736 delete_count:16 deleted_byte_count:4464870 replica_placement:100 version:3 modified_at_second:1615631727
+ volume id:238 size:354320000 collection:"collection1" file_count:701 delete_count:17 deleted_byte_count:5940420 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632030
+ volume id:239 size:426569024 collection:"collection1" file_count:693 delete_count:19 deleted_byte_count:13020783 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630841
+ volume id:240 size:424791528 collection:"collection1" file_count:733 delete_count:13 deleted_byte_count:7515220 replica_placement:100 version:3 modified_at_second:1615631670
+ volume id:241 size:380217424 collection:"collection1" file_count:633 delete_count:6 deleted_byte_count:1715768 replica_placement:100 version:3 modified_at_second:1615632006
+ volume id:242 size:1075383392 collection:"collection2" file_count:10470 replica_placement:100 version:3 modified_at_second:1614852116
+ volume id:243 size:1088174704 collection:"collection2" file_count:11109 delete_count:1 deleted_byte_count:938 replica_placement:100 version:3 modified_at_second:1614852203
+ volume id:244 size:1080295352 collection:"collection2" file_count:10812 delete_count:1 deleted_byte_count:795 replica_placement:100 version:3 modified_at_second:1615628825
+ volume id:246 size:1075998672 collection:"collection2" file_count:10365 delete_count:1 deleted_byte_count:13112 replica_placement:100 version:3 modified_at_second:1614852106
+ volume id:247 size:1075859808 collection:"collection2" file_count:10443 delete_count:2 deleted_byte_count:564486 replica_placement:100 version:3 modified_at_second:1614856152
+ volume id:248 size:1084301208 collection:"collection2" file_count:11217 delete_count:4 deleted_byte_count:746488 replica_placement:100 version:3 modified_at_second:1614856285
+ volume id:250 size:1080572168 collection:"collection2" file_count:10220 replica_placement:100 version:3 modified_at_second:1614856129
+ volume id:252 size:1075065264 collection:"collection2" file_count:14622 delete_count:2 deleted_byte_count:5228 replica_placement:100 version:3 modified_at_second:1614861200
+ volume id:253 size:1087328880 collection:"collection2" file_count:14920 delete_count:3 deleted_byte_count:522994 replica_placement:100 version:3 modified_at_second:1614861258
+ volume id:254 size:1074830736 collection:"collection2" file_count:14140 delete_count:2 deleted_byte_count:105892 replica_placement:100 version:3 modified_at_second:1614861115
+ volume id:255 size:1079581640 collection:"collection2" file_count:14877 delete_count:3 deleted_byte_count:101223 replica_placement:100 version:3 modified_at_second:1614861233
+ volume id:256 size:1074283592 collection:"collection2" file_count:14157 delete_count:1 deleted_byte_count:18156 replica_placement:100 version:3 modified_at_second:1614861100
+ volume id:257 size:1082621720 collection:"collection2" file_count:18172 delete_count:2 deleted_byte_count:25125 replica_placement:100 version:3 modified_at_second:1614866402
+ volume id:258 size:1075527216 collection:"collection2" file_count:18421 delete_count:4 deleted_byte_count:267833 replica_placement:100 version:3 modified_at_second:1614866420
+ volume id:259 size:1075507848 collection:"collection2" file_count:18079 delete_count:2 deleted_byte_count:71992 replica_placement:100 version:3 modified_at_second:1614866381
+ volume id:260 size:1075105664 collection:"collection2" file_count:17316 delete_count:4 deleted_byte_count:2015310 replica_placement:100 version:3 modified_at_second:1614866226
+ volume id:261 size:1076628592 collection:"collection2" file_count:18355 delete_count:1 deleted_byte_count:1155 replica_placement:100 version:3 modified_at_second:1614866420
+ volume id:262 size:1078492584 collection:"collection2" file_count:20390 delete_count:3 deleted_byte_count:287601 replica_placement:100 version:3 modified_at_second:1614871601
+ volume id:264 size:1081624192 collection:"collection2" file_count:21151 replica_placement:100 version:3 modified_at_second:1614871629
+ volume id:265 size:1076401104 collection:"collection2" file_count:19932 delete_count:2 deleted_byte_count:160823 replica_placement:100 version:3 modified_at_second:1614871543
+ volume id:266 size:1075617552 collection:"collection2" file_count:20075 delete_count:1 deleted_byte_count:1039 replica_placement:100 version:3 modified_at_second:1614871526
+ volume id:267 size:1075699376 collection:"collection2" file_count:21039 delete_count:3 deleted_byte_count:59956 replica_placement:100 version:3 modified_at_second:1614877294
+ volume id:270 size:1076876424 collection:"collection2" file_count:22057 delete_count:1 deleted_byte_count:43916 replica_placement:100 version:3 modified_at_second:1614877469
+ volume id:271 size:1076992704 collection:"collection2" file_count:22640 delete_count:1 deleted_byte_count:30645 replica_placement:100 version:3 modified_at_second:1614877504
+ volume id:272 size:1076145912 collection:"collection2" file_count:21034 delete_count:2 deleted_byte_count:216564 replica_placement:100 version:3 modified_at_second:1614884139
+ volume id:273 size:1074873432 collection:"collection2" file_count:20511 delete_count:3 deleted_byte_count:46076 replica_placement:100 version:3 modified_at_second:1614884046
+ volume id:274 size:1075994184 collection:"collection2" file_count:20997 replica_placement:100 version:3 modified_at_second:1614884113
+ volume id:275 size:1078349024 collection:"collection2" file_count:20808 delete_count:1 deleted_byte_count:1118 replica_placement:100 version:3 modified_at_second:1614884147
+ volume id:276 size:1076899880 collection:"collection2" file_count:20190 delete_count:1 deleted_byte_count:8798 replica_placement:100 version:3 modified_at_second:1614884003
+ volume id:278 size:1078798632 collection:"collection2" file_count:20597 delete_count:5 deleted_byte_count:400060 replica_placement:100 version:3 modified_at_second:1614890292
+ volume id:280 size:1077432160 collection:"collection2" file_count:20286 delete_count:1 deleted_byte_count:879 replica_placement:100 version:3 modified_at_second:1614890262
+ volume id:281 size:1077581064 collection:"collection2" file_count:20206 delete_count:3 deleted_byte_count:143964 replica_placement:100 version:3 modified_at_second:1614890237
+ volume id:282 size:1075232184 collection:"collection2" file_count:22659 delete_count:4 deleted_byte_count:67915 replica_placement:100 version:3 modified_at_second:1614897304
+ volume id:283 size:1080178880 collection:"collection2" file_count:19462 delete_count:7 deleted_byte_count:660407 replica_placement:100 version:3 modified_at_second:1614896623
+ volume id:286 size:1077464816 collection:"collection2" file_count:23905 delete_count:6 deleted_byte_count:630577 replica_placement:100 version:3 modified_at_second:1614897401
+ volume id:287 size:1074590536 collection:"collection2" file_count:28163 delete_count:5 deleted_byte_count:35727 replica_placement:100 version:3 modified_at_second:1614904875
+ volume id:288 size:1075406920 collection:"collection2" file_count:27243 delete_count:2 deleted_byte_count:51519 replica_placement:100 version:3 modified_at_second:1614904738
+ volume id:289 size:1075284312 collection:"collection2" file_count:29342 delete_count:5 deleted_byte_count:100454 replica_placement:100 version:3 modified_at_second:1614904977
+ volume id:290 size:1074723800 collection:"collection2" file_count:28340 delete_count:4 deleted_byte_count:199064 replica_placement:100 version:3 modified_at_second:1614904924
+ volume id:292 size:1092010672 collection:"collection2" file_count:26781 delete_count:5 deleted_byte_count:508910 replica_placement:100 version:3 modified_at_second:1614912325
+ volume id:295 size:1074702320 collection:"collection2" file_count:24488 delete_count:3 deleted_byte_count:48555 replica_placement:100 version:3 modified_at_second:1614911929
+ volume id:296 size:1077824056 collection:"collection2" file_count:26741 delete_count:4 deleted_byte_count:199906 replica_placement:100 version:3 modified_at_second:1614912301
+ volume id:297 size:1080229176 collection:"collection2" file_count:23409 delete_count:5 deleted_byte_count:46268 replica_placement:100 version:3 modified_at_second:1614918481
+ volume id:298 size:1075410024 collection:"collection2" file_count:23222 delete_count:2 deleted_byte_count:46110 replica_placement:100 version:3 modified_at_second:1614918474
+ volume id:302 size:1077559640 collection:"collection2" file_count:23124 delete_count:7 deleted_byte_count:293111 replica_placement:100 version:3 modified_at_second:1614925500
+ volume id:304 size:1081038944 collection:"collection2" file_count:24505 delete_count:2 deleted_byte_count:124447 replica_placement:100 version:3 modified_at_second:1614925569
+ volume id:305 size:1074185376 collection:"collection2" file_count:22074 delete_count:5 deleted_byte_count:20221 replica_placement:100 version:3 modified_at_second:1614925312
+ volume id:306 size:1074763952 collection:"collection2" file_count:22939 replica_placement:100 version:3 modified_at_second:1614925462
+ volume id:307 size:1076567912 collection:"collection2" file_count:23377 delete_count:2 deleted_byte_count:25453 replica_placement:100 version:3 modified_at_second:1614931448
+ volume id:308 size:1074022336 collection:"collection2" file_count:23086 delete_count:2 deleted_byte_count:2127 replica_placement:100 version:3 modified_at_second:1614931401
+ volume id:311 size:1088248344 collection:"collection2" file_count:23553 delete_count:6 deleted_byte_count:191716 replica_placement:100 version:3 modified_at_second:1614931463
+ volume id:312 size:1075037528 collection:"collection2" file_count:22524 replica_placement:100 version:3 modified_at_second:1614937831
+ volume id:313 size:1074875960 collection:"collection2" file_count:22404 delete_count:4 deleted_byte_count:51728 replica_placement:100 version:3 modified_at_second:1614937755
+ volume id:316 size:1077720776 collection:"collection2" file_count:22605 delete_count:1 deleted_byte_count:8503 replica_placement:100 version:3 modified_at_second:1614937838
+ volume id:318 size:1075965168 collection:"collection2" file_count:22459 delete_count:2 deleted_byte_count:37778 replica_placement:100 version:3 modified_at_second:1614943862
+ volume id:322 size:1078471536 collection:"collection2" file_count:21905 delete_count:3 deleted_byte_count:145002 replica_placement:100 version:3 modified_at_second:1614950572
+ volume id:323 size:1074608056 collection:"collection2" file_count:21605 delete_count:4 deleted_byte_count:172090 replica_placement:100 version:3 modified_at_second:1614950526
+ volume id:325 size:1080701232 collection:"collection2" file_count:21735 replica_placement:100 version:3 modified_at_second:1614950525
+ volume id:326 size:1076059920 collection:"collection2" file_count:22564 delete_count:2 deleted_byte_count:192886 replica_placement:100 version:3 modified_at_second:1614950619
+ volume id:327 size:1076121304 collection:"collection2" file_count:22007 delete_count:3 deleted_byte_count:60358 replica_placement:100 version:3 modified_at_second:1614956487
+ volume id:328 size:1074767816 collection:"collection2" file_count:21720 delete_count:3 deleted_byte_count:56429 replica_placement:100 version:3 modified_at_second:1614956362
+ volume id:329 size:1076691960 collection:"collection2" file_count:22411 delete_count:5 deleted_byte_count:214092 replica_placement:100 version:3 modified_at_second:1614956485
+ volume id:330 size:1080825760 collection:"collection2" file_count:22464 delete_count:2 deleted_byte_count:15771 replica_placement:100 version:3 modified_at_second:1614956476
+ volume id:331 size:1074957256 collection:"collection2" file_count:21230 delete_count:4 deleted_byte_count:62145 replica_placement:100 version:3 modified_at_second:1614956259
+ volume id:332 size:1075569928 collection:"collection2" file_count:22097 delete_count:3 deleted_byte_count:98273 replica_placement:100 version:3 modified_at_second:1614962869
+ volume id:333 size:1074270160 collection:"collection2" file_count:21271 delete_count:2 deleted_byte_count:168122 replica_placement:100 version:3 modified_at_second:1614962697
+ volume id:334 size:1075607880 collection:"collection2" file_count:22546 delete_count:6 deleted_byte_count:101538 replica_placement:100 version:3 modified_at_second:1614962978
+ volume id:335 size:1076235136 collection:"collection2" file_count:22391 delete_count:3 deleted_byte_count:8838 replica_placement:100 version:3 modified_at_second:1614962970
+ volume id:337 size:1075646896 collection:"collection2" file_count:21934 delete_count:1 deleted_byte_count:3397 replica_placement:100 version:3 modified_at_second:1614969937
+ volume id:339 size:1078402392 collection:"collection2" file_count:22309 replica_placement:100 version:3 modified_at_second:1614969995
+ volume id:340 size:1079462152 collection:"collection2" file_count:22319 delete_count:4 deleted_byte_count:93620 replica_placement:100 version:3 modified_at_second:1614969977
+ volume id:341 size:1074448360 collection:"collection2" file_count:21590 delete_count:5 deleted_byte_count:160085 replica_placement:100 version:3 modified_at_second:1614969858
+ volume id:343 size:1075345072 collection:"collection2" file_count:21095 delete_count:2 deleted_byte_count:20581 replica_placement:100 version:3 modified_at_second:1614977148
+ volume id:346 size:1076464112 collection:"collection2" file_count:22320 delete_count:4 deleted_byte_count:798258 replica_placement:100 version:3 modified_at_second:1614977511
+ volume id:347 size:1075145248 collection:"collection2" file_count:22178 delete_count:1 deleted_byte_count:79392 replica_placement:100 version:3 modified_at_second:1614984727
+ volume id:348 size:1080623544 collection:"collection2" file_count:21667 delete_count:1 deleted_byte_count:2443 replica_placement:100 version:3 modified_at_second:1614984604
+ volume id:349 size:1075957672 collection:"collection2" file_count:22395 delete_count:2 deleted_byte_count:61565 replica_placement:100 version:3 modified_at_second:1614984748
+ volume id:351 size:1078795120 collection:"collection2" file_count:23660 delete_count:3 deleted_byte_count:102141 replica_placement:100 version:3 modified_at_second:1614984816
+ volume id:352 size:1077145936 collection:"collection2" file_count:22066 delete_count:1 deleted_byte_count:1018 replica_placement:100 version:3 modified_at_second:1614992130
+ volume id:353 size:1074897496 collection:"collection2" file_count:21266 delete_count:2 deleted_byte_count:3105374 replica_placement:100 version:3 modified_at_second:1614991951
+ volume id:354 size:1085214104 collection:"collection2" file_count:23150 delete_count:4 deleted_byte_count:82391 replica_placement:100 version:3 modified_at_second:1614992208
+ volume id:357 size:1074276152 collection:"collection2" file_count:23137 delete_count:4 deleted_byte_count:188487 replica_placement:100 version:3 modified_at_second:1614998792
+ volume id:359 size:1074211296 collection:"collection2" file_count:22437 delete_count:2 deleted_byte_count:187953 replica_placement:100 version:3 modified_at_second:1614998711
+ volume id:360 size:1075532512 collection:"collection2" file_count:22574 delete_count:3 deleted_byte_count:1774776 replica_placement:100 version:3 modified_at_second:1614998770
+ volume id:361 size:1075362744 collection:"collection2" file_count:22272 delete_count:1 deleted_byte_count:3497 replica_placement:100 version:3 modified_at_second:1614998668
+ volume id:362 size:1074074176 collection:"collection2" file_count:20595 delete_count:1 deleted_byte_count:112145 replica_placement:100 version:3 modified_at_second:1615004407
+ volume id:363 size:1078859640 collection:"collection2" file_count:23177 delete_count:4 deleted_byte_count:9601 replica_placement:100 version:3 modified_at_second:1615004823
+ volume id:364 size:1081280880 collection:"collection2" file_count:22686 delete_count:1 deleted_byte_count:84375 replica_placement:100 version:3 modified_at_second:1615004813
+ volume id:365 size:1075736632 collection:"collection2" file_count:22193 delete_count:5 deleted_byte_count:259033 replica_placement:100 version:3 modified_at_second:1615004776
+ volume id:366 size:1075267272 collection:"collection2" file_count:21856 delete_count:5 deleted_byte_count:138363 replica_placement:100 version:3 modified_at_second:1615004703
+ volume id:367 size:1076403648 collection:"collection2" file_count:22995 delete_count:2 deleted_byte_count:36955 replica_placement:100 version:3 modified_at_second:1615010985
+ volume id:368 size:1074821960 collection:"collection2" file_count:22252 delete_count:4 deleted_byte_count:3291946 replica_placement:100 version:3 modified_at_second:1615010877
+ volume id:369 size:1091472040 collection:"collection2" file_count:23709 delete_count:4 deleted_byte_count:400876 replica_placement:100 version:3 modified_at_second:1615011021
+ volume id:370 size:1076040544 collection:"collection2" file_count:22092 delete_count:2 deleted_byte_count:115388 replica_placement:100 version:3 modified_at_second:1615010877
+ volume id:371 size:1078806216 collection:"collection2" file_count:22685 delete_count:2 deleted_byte_count:68905 replica_placement:100 version:3 modified_at_second:1615010995
+ volume id:372 size:1076193344 collection:"collection2" file_count:22774 delete_count:1 deleted_byte_count:3495 replica_placement:100 version:3 modified_at_second:1615016911
+ volume id:373 size:1080928088 collection:"collection2" file_count:22617 delete_count:4 deleted_byte_count:91849 replica_placement:100 version:3 modified_at_second:1615016878
+ volume id:374 size:1085011176 collection:"collection2" file_count:23054 delete_count:2 deleted_byte_count:89034 replica_placement:100 version:3 modified_at_second:1615016917
+ volume id:376 size:1074845832 collection:"collection2" file_count:22908 delete_count:4 deleted_byte_count:432305 replica_placement:100 version:3 modified_at_second:1615016916
+ volume id:377 size:957434264 collection:"collection2" file_count:14929 delete_count:1 deleted_byte_count:43099 replica_placement:100 version:3 modified_at_second:1615632323
+ volume id:379 size:1014108528 collection:"collection2" file_count:15362 delete_count:6 deleted_byte_count:2481613 replica_placement:100 version:3 modified_at_second:1615632323
+ Disk hdd total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660
+ DataNode 192.168.1.5:8080 total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660
+ Rack DefaultRack total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660
+ DataCenter dc5 total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660
+total size:775256653592 file_count:10478712 deleted_file:33754 deleted_bytes:10839266043
+`
diff --git a/weed/shell/command_volume_mark.go b/weed/shell/command_volume_mark.go
new file mode 100644
index 000000000..19b614310
--- /dev/null
+++ b/weed/shell/command_volume_mark.go
@@ -0,0 +1,55 @@
+package shell
+
+import (
+ "flag"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeMark{})
+}
+
+type commandVolumeMark struct {
+}
+
+func (c *commandVolumeMark) Name() string {
+ return "volume.mark"
+}
+
+func (c *commandVolumeMark) Help() string {
+ return `Mark volume writable or readonly from one volume server
+
+ volume.mark -node <volume server host:port> -volumeId <volume id> -writable or -readonly
+`
+}
+
+func (c *commandVolumeMark) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ volMarkCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeIdInt := volMarkCommand.Int("volumeId", 0, "the volume id")
+ nodeStr := volMarkCommand.String("node", "", "the volume server <host>:<port>")
+ writable := volMarkCommand.Bool("writable", false, "volume mark writable")
+ readonly := volMarkCommand.Bool("readonly", false, "volume mark readonly")
+ if err = volMarkCommand.Parse(args); err != nil {
+ return nil
+ }
+ markWritable := false
+ if (*writable && *readonly) || (!*writable && !*readonly) {
+ return fmt.Errorf("use -readonly or -writable")
+ } else if *writable {
+ markWritable = true
+ }
+
+ sourceVolumeServer := *nodeStr
+
+ volumeId := needle.VolumeId(*volumeIdInt)
+
+ return markVolumeWritable(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, markWritable)
+}
diff --git a/weed/shell/command_volume_mount.go b/weed/shell/command_volume_mount.go
index 50a307492..bd588d0b5 100644
--- a/weed/shell/command_volume_mount.go
+++ b/weed/shell/command_volume_mount.go
@@ -2,7 +2,7 @@ package shell
import (
"context"
- "fmt"
+ "flag"
"io"
"github.com/chrislusf/seaweedfs/weed/operation"
@@ -25,7 +25,7 @@ func (c *commandVolumeMount) Name() string {
func (c *commandVolumeMount) Help() string {
return `mount a volume from one volume server
- volume.mount <volume server host:port> <volume id>
+ volume.mount -node <volume server host:port> -volumeId <volume id>
This command mounts a volume from one volume server.
@@ -34,25 +34,28 @@ func (c *commandVolumeMount) Help() string {
func (c *commandVolumeMount) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- if len(args) != 2 {
- fmt.Fprintf(writer, "received args: %+v\n", args)
- return fmt.Errorf("need 2 args of <volume server host:port> <volume id>")
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
}
- sourceVolumeServer, volumeIdString := args[0], args[1]
- volumeId, err := needle.NewVolumeId(volumeIdString)
- if err != nil {
- return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
+ volMountCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeIdInt := volMountCommand.Int("volumeId", 0, "the volume id")
+ nodeStr := volMountCommand.String("node", "", "the volume server <host>:<port>")
+ if err = volMountCommand.Parse(args); err != nil {
+ return nil
}
- ctx := context.Background()
- return mountVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
+ sourceVolumeServer := *nodeStr
+
+ volumeId := needle.VolumeId(*volumeIdInt)
+
+ return mountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
}
-func mountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
+func mountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{
+ _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
VolumeId: uint32(volumeId),
})
return mountErr
diff --git a/weed/shell/command_volume_move.go b/weed/shell/command_volume_move.go
index 08d87c988..84f33db34 100644
--- a/weed/shell/command_volume_move.go
+++ b/weed/shell/command_volume_move.go
@@ -2,6 +2,7 @@ package shell
import (
"context"
+ "flag"
"fmt"
"io"
"log"
@@ -25,9 +26,10 @@ func (c *commandVolumeMove) Name() string {
}
func (c *commandVolumeMove) Help() string {
- return `<experimental> move a live volume from one volume server to another volume server
+ return `move a live volume from one volume server to another volume server
- volume.move <source volume server host:port> <target volume server host:port> <volume id>
+ volume.move -source <source volume server host:port> -target <target volume server host:port> -volumeId <volume id>
+ volume.move -source <source volume server host:port> -target <target volume server host:port> -volumeId <volume id> -disk [hdd|ssd|<tag>]
This command move a live volume from one volume server to another volume server. Here are the steps:
@@ -39,46 +41,53 @@ func (c *commandVolumeMove) Help() string {
Now the master will mark this volume id as writable.
5. This command asks the source volume server to delete the source volume
+ The option "-disk [hdd|ssd|<tag>]" can be used to change the volume disk type.
+
`
}
func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- if len(args) != 3 {
- fmt.Fprintf(writer, "received args: %+v\n", args)
- return fmt.Errorf("need 3 args of <source volume server host:port> <target volume server host:port> <volume id>")
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
}
- sourceVolumeServer, targetVolumeServer, volumeIdString := args[0], args[1], args[2]
- volumeId, err := needle.NewVolumeId(volumeIdString)
- if err != nil {
- return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
+ volMoveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeIdInt := volMoveCommand.Int("volumeId", 0, "the volume id")
+ sourceNodeStr := volMoveCommand.String("source", "", "the source volume server <host>:<port>")
+ targetNodeStr := volMoveCommand.String("target", "", "the target volume server <host>:<port>")
+ diskTypeStr := volMoveCommand.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
+ if err = volMoveCommand.Parse(args); err != nil {
+ return nil
}
+ sourceVolumeServer, targetVolumeServer := *sourceNodeStr, *targetNodeStr
+
+ volumeId := needle.VolumeId(*volumeIdInt)
+
if sourceVolumeServer == targetVolumeServer {
return fmt.Errorf("source and target volume servers are the same!")
}
- ctx := context.Background()
- return LiveMoveVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second)
+ return LiveMoveVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second, *diskTypeStr)
}
// LiveMoveVolume moves one volume from one source volume server to one target volume server, with idleTimeout to drain the incoming requests.
-func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration) (err error) {
+func LiveMoveVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration, diskType string) (err error) {
log.Printf("copying volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer)
- lastAppendAtNs, err := copyVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer)
+ lastAppendAtNs, err := copyVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, diskType)
if err != nil {
return fmt.Errorf("copy volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err)
}
log.Printf("tailing volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer)
- if err = tailVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil {
+ if err = tailVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil {
return fmt.Errorf("tail volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err)
}
log.Printf("deleting volume %d from %s", volumeId, sourceVolumeServer)
- if err = deleteVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer); err != nil {
+ if err = deleteVolume(grpcDialOption, volumeId, sourceVolumeServer); err != nil {
return fmt.Errorf("delete volume %d from %s: %v", volumeId, sourceVolumeServer, err)
}
@@ -86,12 +95,50 @@ func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeI
return nil
}
-func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) {
+func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, diskType string) (lastAppendAtNs uint64, err error) {
+
+ // check to see if the volume is already read-only and if its not then we need
+ // to mark it as read-only and then before we return we need to undo what we
+ // did
+ var shouldMarkWritable bool
+ defer func() {
+ if !shouldMarkWritable {
+ return
+ }
+
+ clientErr := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, writableErr := volumeServerClient.VolumeMarkWritable(context.Background(), &volume_server_pb.VolumeMarkWritableRequest{
+ VolumeId: uint32(volumeId),
+ })
+ return writableErr
+ })
+ if clientErr != nil {
+ log.Printf("failed to mark volume %d as writable after copy from %s: %v", volumeId, sourceVolumeServer, clientErr)
+ }
+ }()
+
+ err = operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, statusErr := volumeServerClient.VolumeStatus(context.Background(), &volume_server_pb.VolumeStatusRequest{
+ VolumeId: uint32(volumeId),
+ })
+ if statusErr == nil && !resp.IsReadOnly {
+ shouldMarkWritable = true
+ _, readonlyErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{
+ VolumeId: uint32(volumeId),
+ })
+ return readonlyErr
+ }
+ return statusErr
+ })
+ if err != nil {
+ return
+ }
err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- resp, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{
+ resp, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
VolumeId: uint32(volumeId),
SourceDataNode: sourceVolumeServer,
+ DiskType: diskType,
})
if replicateErr == nil {
lastAppendAtNs = resp.LastAppendAtNs
@@ -102,10 +149,10 @@ func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne
return
}
-func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) {
+func tailVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) {
return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, replicateErr := volumeServerClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{
+ _, replicateErr := volumeServerClient.VolumeTailReceiver(context.Background(), &volume_server_pb.VolumeTailReceiverRequest{
VolumeId: uint32(volumeId),
SinceNs: lastAppendAtNs,
IdleTimeoutSeconds: uint32(idleTimeout.Seconds()),
@@ -116,11 +163,26 @@ func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne
}
-func deleteVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
+func deleteVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, deleteErr := volumeServerClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{
+ _, deleteErr := volumeServerClient.VolumeDelete(context.Background(), &volume_server_pb.VolumeDeleteRequest{
VolumeId: uint32(volumeId),
})
return deleteErr
})
}
+
+func markVolumeWritable(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string, writable bool) (err error) {
+ return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ if writable {
+ _, err = volumeServerClient.VolumeMarkWritable(context.Background(), &volume_server_pb.VolumeMarkWritableRequest{
+ VolumeId: uint32(volumeId),
+ })
+ } else {
+ _, err = volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{
+ VolumeId: uint32(volumeId),
+ })
+ }
+ return err
+ })
+}
diff --git a/weed/shell/command_volume_server_evacuate.go b/weed/shell/command_volume_server_evacuate.go
new file mode 100644
index 000000000..f21d0334c
--- /dev/null
+++ b/weed/shell/command_volume_server_evacuate.go
@@ -0,0 +1,221 @@
+package shell
+
+import (
+ "flag"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "io"
+ "os"
+ "sort"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeServerEvacuate{})
+}
+
+type commandVolumeServerEvacuate struct {
+}
+
+func (c *commandVolumeServerEvacuate) Name() string {
+ return "volumeServer.evacuate"
+}
+
+func (c *commandVolumeServerEvacuate) Help() string {
+ return `move out all data on a volume server
+
+ volumeServer.evacuate -node <host:port>
+
+ This command moves all data away from the volume server.
+ The volumes on the volume servers will be redistributed.
+
+ Usually this is used to prepare to shutdown or upgrade the volume server.
+
+ Sometimes a volume can not be moved because there are no
+ good destination to meet the replication requirement.
+ E.g. a volume replication 001 in a cluster with 2 volume servers can not be moved.
+ You can use "-skipNonMoveable" to move the rest volumes.
+
+`
+}
+
+func (c *commandVolumeServerEvacuate) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ vsEvacuateCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeServer := vsEvacuateCommand.String("node", "", "<host>:<port> of the volume server")
+ skipNonMoveable := vsEvacuateCommand.Bool("skipNonMoveable", false, "skip volumes that can not be moved")
+ applyChange := vsEvacuateCommand.Bool("force", false, "actually apply the changes")
+ if err = vsEvacuateCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ if *volumeServer == "" {
+ return fmt.Errorf("need to specify volume server by -node=<host>:<port>")
+ }
+
+ return volumeServerEvacuate(commandEnv, *volumeServer, *skipNonMoveable, *applyChange, writer)
+
+}
+
+func volumeServerEvacuate(commandEnv *CommandEnv, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) (err error) {
+ // 1. confirm the volume server is part of the cluster
+ // 2. collect all other volume servers, sort by empty slots
+ // 3. move to any other volume server as long as it satisfy the replication requirements
+
+ // list all the volumes
+ // collect topology information
+ topologyInfo, _, err := collectTopologyInfo(commandEnv)
+ if err != nil {
+ return err
+ }
+
+ if err := evacuateNormalVolumes(commandEnv, topologyInfo, volumeServer, skipNonMoveable, applyChange, writer); err != nil {
+ return err
+ }
+
+ if err := evacuateEcVolumes(commandEnv, topologyInfo, volumeServer, skipNonMoveable, applyChange, writer); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func evacuateNormalVolumes(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error {
+ // find this volume server
+ volumeServers := collectVolumeServersByDc(topologyInfo, "")
+ thisNode, otherNodes := nodesOtherThan(volumeServers, volumeServer)
+ if thisNode == nil {
+ return fmt.Errorf("%s is not found in this cluster", volumeServer)
+ }
+
+ // move away normal volumes
+ volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
+ for _, diskInfo := range thisNode.info.DiskInfos {
+ for _, vol := range diskInfo.VolumeInfos {
+ hasMoved, err := moveAwayOneNormalVolume(commandEnv, volumeReplicas, vol, thisNode, otherNodes, applyChange)
+ if err != nil {
+ return fmt.Errorf("move away volume %d from %s: %v", vol.Id, volumeServer, err)
+ }
+ if !hasMoved {
+ if skipNonMoveable {
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(vol.ReplicaPlacement))
+ fmt.Fprintf(writer, "skipping non moveable volume %d replication:%s\n", vol.Id, replicaPlacement.String())
+ } else {
+ return fmt.Errorf("failed to move volume %d from %s", vol.Id, volumeServer)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func evacuateEcVolumes(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error {
+ // find this ec volume server
+ ecNodes, _ := collectEcVolumeServersByDc(topologyInfo, "")
+ thisNode, otherNodes := ecNodesOtherThan(ecNodes, volumeServer)
+ if thisNode == nil {
+ return fmt.Errorf("%s is not found in this cluster\n", volumeServer)
+ }
+
+ // move away ec volumes
+ for _, diskInfo := range thisNode.info.DiskInfos {
+ for _, ecShardInfo := range diskInfo.EcShardInfos {
+ hasMoved, err := moveAwayOneEcVolume(commandEnv, ecShardInfo, thisNode, otherNodes, applyChange)
+ if err != nil {
+ return fmt.Errorf("move away volume %d from %s: %v", ecShardInfo.Id, volumeServer, err)
+ }
+ if !hasMoved {
+ if skipNonMoveable {
+ fmt.Fprintf(writer, "failed to move away ec volume %d from %s\n", ecShardInfo.Id, volumeServer)
+ } else {
+ return fmt.Errorf("failed to move away ec volume %d from %s", ecShardInfo.Id, volumeServer)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEcShardInformationMessage, thisNode *EcNode, otherNodes []*EcNode, applyChange bool) (hasMoved bool, err error) {
+
+ for _, shardId := range erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds() {
+
+ sort.Slice(otherNodes, func(i, j int) bool {
+ return otherNodes[i].localShardIdCount(ecShardInfo.Id) < otherNodes[j].localShardIdCount(ecShardInfo.Id)
+ })
+
+ for i := 0; i < len(otherNodes); i++ {
+ emptyNode := otherNodes[i]
+ collectionPrefix := ""
+ if ecShardInfo.Collection != "" {
+ collectionPrefix = ecShardInfo.Collection + "_"
+ }
+ fmt.Fprintf(os.Stdout, "moving ec volume %s%d.%d %s => %s\n", collectionPrefix, ecShardInfo.Id, shardId, thisNode.info.Id, emptyNode.info.Id)
+ err = moveMountedShardToEcNode(commandEnv, thisNode, ecShardInfo.Collection, needle.VolumeId(ecShardInfo.Id), shardId, emptyNode, applyChange)
+ if err != nil {
+ return
+ } else {
+ hasMoved = true
+ break
+ }
+ }
+ if !hasMoved {
+ return
+ }
+ }
+
+ return
+}
+
+func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, vol *master_pb.VolumeInformationMessage, thisNode *Node, otherNodes []*Node, applyChange bool) (hasMoved bool, err error) {
+ fn := capacityByFreeVolumeCount(types.ToDiskType(vol.DiskType))
+ for _, n := range otherNodes {
+ n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool {
+ return v.DiskType == vol.DiskType
+ })
+ }
+ sort.Slice(otherNodes, func(i, j int) bool {
+ return otherNodes[i].localVolumeRatio(fn) > otherNodes[j].localVolumeRatio(fn)
+ })
+
+ for i := 0; i < len(otherNodes); i++ {
+ emptyNode := otherNodes[i]
+ hasMoved, err = maybeMoveOneVolume(commandEnv, volumeReplicas, thisNode, vol, emptyNode, applyChange)
+ if err != nil {
+ return
+ }
+ if hasMoved {
+ break
+ }
+ }
+ return
+}
+
+func nodesOtherThan(volumeServers []*Node, thisServer string) (thisNode *Node, otherNodes []*Node) {
+ for _, node := range volumeServers {
+ if node.info.Id == thisServer {
+ thisNode = node
+ continue
+ }
+ otherNodes = append(otherNodes, node)
+ }
+ return
+}
+
+func ecNodesOtherThan(volumeServers []*EcNode, thisServer string) (thisNode *EcNode, otherNodes []*EcNode) {
+ for _, node := range volumeServers {
+ if node.info.Id == thisServer {
+ thisNode = node
+ continue
+ }
+ otherNodes = append(otherNodes, node)
+ }
+ return
+}
diff --git a/weed/shell/command_volume_server_evacuate_test.go b/weed/shell/command_volume_server_evacuate_test.go
new file mode 100644
index 000000000..5753af78b
--- /dev/null
+++ b/weed/shell/command_volume_server_evacuate_test.go
@@ -0,0 +1,17 @@
+package shell
+
+import (
+ "os"
+ "testing"
+)
+
+func TestVolumeServerEvacuate(t *testing.T) {
+ topologyInfo := parseOutput(topoData)
+
+ volumeServer := "192.168.1.4:8080"
+
+ if err := evacuateNormalVolumes(nil, topologyInfo, volumeServer, true, false, os.Stdout); err != nil {
+ t.Errorf("evacuate: %v", err)
+ }
+
+}
diff --git a/weed/shell/command_volume_server_leave.go b/weed/shell/command_volume_server_leave.go
new file mode 100644
index 000000000..2a2e56e86
--- /dev/null
+++ b/weed/shell/command_volume_server_leave.go
@@ -0,0 +1,67 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "google.golang.org/grpc"
+ "io"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeServerLeave{})
+}
+
+type commandVolumeServerLeave struct {
+}
+
+func (c *commandVolumeServerLeave) Name() string {
+ return "volumeServer.leave"
+}
+
+func (c *commandVolumeServerLeave) Help() string {
+ return `stop a volume server from sending heartbeats to the master
+
+ volume.unmount -node <volume server host:port> -force
+
+ This command enables gracefully shutting down the volume server.
+ The volume server will stop sending heartbeats to the master.
+ After draining the traffic for a few seconds, you can safely shut down the volume server.
+
+ This operation is not revocable unless the volume server is restarted.
+`
+}
+
+func (c *commandVolumeServerLeave) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ vsLeaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeServer := vsLeaveCommand.String("node", "", "<host>:<port> of the volume server")
+ if err = vsLeaveCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ if *volumeServer == "" {
+ return fmt.Errorf("need to specify volume server by -node=<host>:<port>")
+ }
+
+ return volumeServerLeave(commandEnv.option.GrpcDialOption, *volumeServer, writer)
+
+}
+
+func volumeServerLeave(grpcDialOption grpc.DialOption, volumeServer string, writer io.Writer) (err error) {
+ return operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, leaveErr := volumeServerClient.VolumeServerLeave(context.Background(), &volume_server_pb.VolumeServerLeaveRequest{})
+ if leaveErr != nil {
+ fmt.Fprintf(writer, "ask volume server %s to leave: %v\n", volumeServer, leaveErr)
+ } else {
+ fmt.Fprintf(writer, "stopped heartbeat in volume server %s. After a few seconds to drain traffic, it will be safe to stop the volume server.\n", volumeServer)
+ }
+ return leaveErr
+ })
+}
diff --git a/weed/shell/command_volume_tier_download.go b/weed/shell/command_volume_tier_download.go
index 88e2e8b92..33166ce65 100644
--- a/weed/shell/command_volume_tier_download.go
+++ b/weed/shell/command_volume_tier_download.go
@@ -26,7 +26,7 @@ func (c *commandVolumeTierDownload) Name() string {
}
func (c *commandVolumeTierDownload) Help() string {
- return `move the dat file of a volume to a remote tier
+ return `download the dat file of a volume from a remote tier
volume.tier.download [-collection=""]
volume.tier.download [-collection=""] -volumeId=<volume_id>
@@ -42,6 +42,10 @@ func (c *commandVolumeTierDownload) Help() string {
func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeId := tierCommand.Int("volumeId", 0, "the volume id")
collection := tierCommand.String("collection", "", "the collection name")
@@ -49,18 +53,17 @@ func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, wr
return nil
}
- ctx := context.Background()
vid := needle.VolumeId(*volumeId)
// collect topology information
- topologyInfo, err := collectTopologyInfo(ctx, commandEnv)
+ topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
// volumeId is provided
if vid != 0 {
- return doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid)
+ return doVolumeTierDownload(commandEnv, writer, *collection, vid)
}
// apply to all volumes in the collection
@@ -71,7 +74,7 @@ func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, wr
}
fmt.Printf("tier download volumes: %v\n", volumeIds)
for _, vid := range volumeIds {
- if err = doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid); err != nil {
+ if err = doVolumeTierDownload(commandEnv, writer, *collection, vid); err != nil {
return err
}
}
@@ -83,9 +86,11 @@ func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection s
vidMap := make(map[uint32]bool)
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
- for _, v := range dn.VolumeInfos {
- if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" {
- vidMap[v.Id] = true
+ for _, diskInfo := range dn.DiskInfos {
+ for _, v := range diskInfo.VolumeInfos {
+ if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" {
+ vidMap[v.Id] = true
+ }
}
}
})
@@ -97,7 +102,7 @@ func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection s
return
}
-func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) {
+func doVolumeTierDownload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) {
// find volume location
locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
if !found {
@@ -107,7 +112,7 @@ func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io
// TODO parallelize this
for _, loc := range locations {
// copy the .dat file from remote tier to local
- err = downloadDatFromRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url)
+ err = downloadDatFromRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url)
if err != nil {
return fmt.Errorf("download dat file for volume %d to %s: %v", vid, loc.Url, err)
}
@@ -116,10 +121,10 @@ func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io
return nil
}
-func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error {
+func downloadDatFromRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error {
err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
+ stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
VolumeId: uint32(volumeId),
Collection: collection,
})
@@ -145,14 +150,14 @@ func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOpti
return downloadErr
}
- _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{
+ _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{
VolumeId: uint32(volumeId),
})
if unmountErr != nil {
return unmountErr
}
- _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{
+ _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
VolumeId: uint32(volumeId),
})
if mountErr != nil {
diff --git a/weed/shell/command_volume_tier_move.go b/weed/shell/command_volume_tier_move.go
new file mode 100644
index 000000000..d6a49d6e1
--- /dev/null
+++ b/weed/shell/command_volume_tier_move.go
@@ -0,0 +1,177 @@
+package shell
+
+import (
+ "flag"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+ "io"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeTierMove{})
+}
+
+type commandVolumeTierMove struct {
+}
+
+func (c *commandVolumeTierMove) Name() string {
+ return "volume.tier.move"
+}
+
+func (c *commandVolumeTierMove) Help() string {
+ return `change a volume from one disk type to another
+
+ volume.tier.move -fromDiskType=hdd -toDiskType=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h]
+
+ Even if the volume is replicated, only one replica will be changed and the rest replicas will be dropped.
+ So "volume.fix.replication" and "volume.balance" should be followed.
+
+`
+}
+
+func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ collection := tierCommand.String("collection", "", "the collection name")
+ fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
+ quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period")
+ source := tierCommand.String("fromDiskType", "", "the source disk type")
+ target := tierCommand.String("toDiskType", "", "the target disk type")
+ applyChange := tierCommand.Bool("force", false, "actually apply the changes")
+ if err = tierCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ fromDiskType := types.ToDiskType(*source)
+ toDiskType := types.ToDiskType(*target)
+
+ if fromDiskType == toDiskType {
+ return fmt.Errorf("source tier %s is the same as target tier %s", fromDiskType, toDiskType)
+ }
+
+ // collect topology information
+ topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv)
+ if err != nil {
+ return err
+ }
+
+ // collect all volumes that should change
+ volumeIds, err := collectVolumeIdsForTierChange(commandEnv, topologyInfo, volumeSizeLimitMb, fromDiskType, *collection, *fullPercentage, *quietPeriod)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("tier move volumes: %v\n", volumeIds)
+
+ _, allLocations := collectVolumeReplicaLocations(topologyInfo)
+ for _, vid := range volumeIds {
+ if err = doVolumeTierMove(commandEnv, writer, *collection, vid, toDiskType, allLocations, *applyChange); err != nil {
+ fmt.Printf("tier move volume %d: %v\n", vid, err)
+ }
+ }
+
+ return nil
+}
+
+func isOneOf(server string, locations []wdclient.Location) bool {
+ for _, loc := range locations {
+ if server == loc.Url {
+ return true
+ }
+ }
+ return false
+}
+
+func doVolumeTierMove(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, toDiskType types.DiskType, allLocations []location, applyChanges bool) (err error) {
+ // find volume location
+ locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
+ if !found {
+ return fmt.Errorf("volume %d not found", vid)
+ }
+
+ // find one server with the most empty volume slots with target disk type
+ hasFoundTarget := false
+ keepDataNodesSorted(allLocations, toDiskType)
+ fn := capacityByFreeVolumeCount(toDiskType)
+ for _, dst := range allLocations {
+ if fn(dst.dataNode) > 0 && !hasFoundTarget {
+ // ask the volume server to replicate the volume
+ if isOneOf(dst.dataNode.Id, locations) {
+ continue
+ }
+ sourceVolumeServer := ""
+ for _, loc := range locations {
+ if loc.Url != dst.dataNode.Id {
+ sourceVolumeServer = loc.Url
+ }
+ }
+ if sourceVolumeServer == "" {
+ continue
+ }
+ fmt.Fprintf(writer, "moving volume %d from %s to %s with disk type %s ...\n", vid, sourceVolumeServer, dst.dataNode.Id, toDiskType.ReadableString())
+ hasFoundTarget = true
+
+ if !applyChanges {
+ break
+ }
+
+ // mark all replicas as read only
+ if err = markVolumeReadonly(commandEnv.option.GrpcDialOption, vid, locations); err != nil {
+ return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err)
+ }
+ if err = LiveMoveVolume(commandEnv.option.GrpcDialOption, vid, sourceVolumeServer, dst.dataNode.Id, 5*time.Second, toDiskType.ReadableString()); err != nil {
+ return fmt.Errorf("move volume %d %s => %s : %v", vid, locations[0].Url, dst.dataNode.Id, err)
+ }
+
+ // remove the remaining replicas
+ for _, loc := range locations {
+ if loc.Url != dst.dataNode.Id {
+ if err = deleteVolume(commandEnv.option.GrpcDialOption, vid, loc.Url); err != nil {
+ fmt.Fprintf(writer, "failed to delete volume %d on %s\n", vid, loc.Url)
+ }
+ }
+ }
+ }
+ }
+
+ if !hasFoundTarget {
+ fmt.Fprintf(writer, "can not find disk type %s for volume %d\n", toDiskType.ReadableString(), vid)
+ }
+
+ return nil
+}
+
+func collectVolumeIdsForTierChange(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, sourceTier types.DiskType, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
+
+ quietSeconds := int64(quietPeriod / time.Second)
+ nowUnixSeconds := time.Now().Unix()
+
+ fmt.Printf("collect %s volumes quiet for: %d seconds\n", sourceTier, quietSeconds)
+
+ vidMap := make(map[uint32]bool)
+ eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ for _, diskInfo := range dn.DiskInfos {
+ for _, v := range diskInfo.VolumeInfos {
+ if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == sourceTier {
+ if float64(v.Size) > fullPercentage/100*float64(volumeSizeLimitMb)*1024*1024 {
+ vidMap[v.Id] = true
+ }
+ }
+ }
+ }
+ })
+
+ for vid := range vidMap {
+ vids = append(vids, needle.VolumeId(vid))
+ }
+
+ return
+}
diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go
index b3a0d9fe8..f92cdc3e4 100644
--- a/weed/shell/command_volume_tier_upload.go
+++ b/weed/shell/command_volume_tier_upload.go
@@ -26,7 +26,7 @@ func (c *commandVolumeTierUpload) Name() string {
}
func (c *commandVolumeTierUpload) Help() string {
- return `move the dat file of a volume to a remote tier
+ return `upload the dat file of a volume to a remote tier
volume.tier.upload [-collection=""] [-fullPercent=95] [-quietFor=1h]
volume.tier.upload [-collection=""] -volumeId=<volume_id> -dest=<storage_backend> [-keepLocalDatFile]
@@ -56,6 +56,10 @@ func (c *commandVolumeTierUpload) Help() string {
func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeId := tierCommand.Int("volumeId", 0, "the volume id")
collection := tierCommand.String("collection", "", "the collection name")
@@ -67,23 +71,22 @@ func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writ
return nil
}
- ctx := context.Background()
vid := needle.VolumeId(*volumeId)
// volumeId is provided
if vid != 0 {
- return doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile)
+ return doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile)
}
// apply to all volumes in the collection
// reusing collectVolumeIdsForEcEncode for now
- volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod)
+ volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod)
if err != nil {
return err
}
fmt.Printf("tier upload volumes: %v\n", volumeIds)
for _, vid := range volumeIds {
- if err = doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil {
+ if err = doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil {
return err
}
}
@@ -91,20 +94,20 @@ func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writ
return nil
}
-func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) {
+func doVolumeTierUpload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) {
// find volume location
locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
if !found {
return fmt.Errorf("volume %d not found", vid)
}
- err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations)
+ err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations)
if err != nil {
return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err)
}
// copy the .dat file to remote tier
- err = uploadDatToRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile)
+ err = uploadDatToRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile)
if err != nil {
return fmt.Errorf("copy dat file for volume %d on %s to %s: %v", vid, locations[0].Url, dest, err)
}
@@ -112,10 +115,10 @@ func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.W
return nil
}
-func uploadDatToRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error {
+func uploadDatToRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error {
err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
+ stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
VolumeId: uint32(volumeId),
Collection: collection,
DestinationBackendName: dest,
diff --git a/weed/shell/command_volume_unmount.go b/weed/shell/command_volume_unmount.go
index 8096f34d8..f7e5a501b 100644
--- a/weed/shell/command_volume_unmount.go
+++ b/weed/shell/command_volume_unmount.go
@@ -2,7 +2,7 @@ package shell
import (
"context"
- "fmt"
+ "flag"
"io"
"github.com/chrislusf/seaweedfs/weed/operation"
@@ -25,7 +25,7 @@ func (c *commandVolumeUnmount) Name() string {
func (c *commandVolumeUnmount) Help() string {
return `unmount a volume from one volume server
- volume.unmount <volume server host:port> <volume id>
+ volume.unmount -node <volume server host:port> -volumeId <volume id>
This command unmounts a volume from one volume server.
@@ -34,25 +34,28 @@ func (c *commandVolumeUnmount) Help() string {
func (c *commandVolumeUnmount) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- if len(args) != 2 {
- fmt.Fprintf(writer, "received args: %+v\n", args)
- return fmt.Errorf("need 2 args of <volume server host:port> <volume id>")
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
}
- sourceVolumeServer, volumeIdString := args[0], args[1]
- volumeId, err := needle.NewVolumeId(volumeIdString)
- if err != nil {
- return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
+ volUnmountCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeIdInt := volUnmountCommand.Int("volumeId", 0, "the volume id")
+ nodeStr := volUnmountCommand.String("node", "", "the volume server <host>:<port>")
+ if err = volUnmountCommand.Parse(args); err != nil {
+ return nil
}
- ctx := context.Background()
- return unmountVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
+ sourceVolumeServer := *nodeStr
+
+ volumeId := needle.VolumeId(*volumeIdInt)
+
+ return unmountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
}
-func unmountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
+func unmountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{
+ _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{
VolumeId: uint32(volumeId),
})
return unmountErr
diff --git a/weed/shell/command_volume_vacuum.go b/weed/shell/command_volume_vacuum.go
new file mode 100644
index 000000000..56f85f4fe
--- /dev/null
+++ b/weed/shell/command_volume_vacuum.go
@@ -0,0 +1,53 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandVacuum{})
+}
+
+type commandVacuum struct {
+}
+
+func (c *commandVacuum) Name() string {
+ return "volume.vacuum"
+}
+
+func (c *commandVacuum) Help() string {
+ return `compact volumes if deleted entries are more than the limit
+
+ volume.vacuum [-garbageThreshold=0.3]
+
+`
+}
+
+func (c *commandVacuum) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ volumeVacuumCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ garbageThreshold := volumeVacuumCommand.Float64("garbageThreshold", 0.3, "vacuum when garbage is more than this limit")
+ if err = volumeVacuumCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err = client.VacuumVolume(context.Background(), &master_pb.VacuumVolumeRequest{
+ GarbageThreshold: float32(*garbageThreshold),
+ })
+ return err
+ })
+ if err != nil {
+ return
+ }
+
+ return nil
+}
diff --git a/weed/shell/commands.go b/weed/shell/commands.go
index a6a0f7dec..0e285214b 100644
--- a/weed/shell/commands.go
+++ b/weed/shell/commands.go
@@ -1,19 +1,19 @@
package shell
import (
- "context"
"fmt"
"io"
"net/url"
- "path/filepath"
"strconv"
"strings"
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
+ "github.com/chrislusf/seaweedfs/weed/wdclient/exclusive_locks"
)
type ShellOptions struct {
@@ -29,6 +29,7 @@ type CommandEnv struct {
env map[string]string
MasterClient *wdclient.MasterClient
option ShellOptions
+ locker *exclusive_locks.ExclusiveLocker
}
type command interface {
@@ -42,55 +43,67 @@ var (
)
func NewCommandEnv(options ShellOptions) *CommandEnv {
- return &CommandEnv{
- env: make(map[string]string),
- MasterClient: wdclient.NewMasterClient(context.Background(),
- options.GrpcDialOption, "shell", strings.Split(*options.Masters, ",")),
- option: options,
+ ce := &CommandEnv{
+ env: make(map[string]string),
+ MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, pb.AdminShellClient, "", 0, "", strings.Split(*options.Masters, ",")),
+ option: options,
}
+ ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient)
+ return ce
}
-func (ce *CommandEnv) parseUrl(input string) (filerServer string, filerPort int64, path string, err error) {
+func (ce *CommandEnv) parseUrl(input string) (path string, err error) {
if strings.HasPrefix(input, "http") {
- return parseFilerUrl(input)
+ err = fmt.Errorf("http://<filer>:<port> prefix is not supported any more")
+ return
}
if !strings.HasPrefix(input, "/") {
- input = filepath.ToSlash(filepath.Join(ce.option.Directory, input))
+ input = util.Join(ce.option.Directory, input)
}
- return ce.option.FilerHost, ce.option.FilerPort, input, err
+ return input, err
}
-func (ce *CommandEnv) isDirectory(ctx context.Context, filerServer string, filerPort int64, path string) bool {
+func (ce *CommandEnv) isDirectory(path string) bool {
- return ce.checkDirectory(ctx, filerServer, filerPort, path) == nil
+ return ce.checkDirectory(path) == nil
}
-func (ce *CommandEnv) checkDirectory(ctx context.Context, filerServer string, filerPort int64, path string) error {
+func (ce *CommandEnv) confirmIsLocked() error {
- dir, name := filer2.FullPath(path).DirAndName()
+ if ce.locker.IsLocking() {
+ return nil
+ }
- return ce.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error {
+ return fmt.Errorf("need to lock to continue")
- resp, lookupErr := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{
- Directory: dir,
- Name: name,
- })
- if lookupErr != nil {
- return lookupErr
- }
+}
- if resp.Entry == nil {
- return fmt.Errorf("entry not found")
- }
+func (ce *CommandEnv) checkDirectory(path string) error {
- if !resp.Entry.IsDirectory {
- return fmt.Errorf("not a directory")
- }
+ dir, name := util.FullPath(path).DirAndName()
- return nil
- })
+ exists, err := filer_pb.Exists(ce, dir, name, true)
+
+ if !exists {
+ return fmt.Errorf("%s is not a directory", path)
+ }
+
+ return err
+
+}
+
+var _ = filer_pb.FilerClient(&CommandEnv{})
+
+func (ce *CommandEnv) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ filerGrpcAddress := fmt.Sprintf("%s:%d", ce.option.FilerHost, ce.option.FilerPort+10000)
+ return pb.WithGrpcFilerClient(filerGrpcAddress, ce.option.GrpcDialOption, fn)
+
+}
+func (ce *CommandEnv) AdjustedUrl(location *filer_pb.Location) string {
+ return location.Url
}
func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) {
@@ -107,7 +120,7 @@ func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path
}
path = u.Path
} else {
- err = fmt.Errorf("path should have full url http://<filer_server>:<port>/path/to/dirOrFile : %s", entryPath)
+ err = fmt.Errorf("path should have full url /path/to/dirOrFile : %s", entryPath)
}
return
}
diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go
index a4f17e0fa..1dd611ca5 100644
--- a/weed/shell/shell_liner.go
+++ b/weed/shell/shell_liner.go
@@ -2,13 +2,13 @@ package shell
import (
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
"io"
"os"
"path"
"regexp"
- "strings"
-
"sort"
+ "strings"
"github.com/peterh/liner"
)
@@ -20,8 +20,15 @@ var (
func RunShell(options ShellOptions) {
+ sort.Slice(Commands, func(i, j int) bool {
+ return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0
+ })
+
line = liner.NewLiner()
defer line.Close()
+ grace.OnInterrupt(func() {
+ line.Close()
+ })
line.SetCtrlCAborts(true)
@@ -46,51 +53,57 @@ func RunShell(options ShellOptions) {
return
}
- cmds := reg.FindAllString(cmd, -1)
- if len(cmds) == 0 {
- continue
- } else {
- line.AppendHistory(cmd)
+ for _, c := range strings.Split(cmd, ";") {
+ if processEachCmd(reg, c, commandEnv) {
+ return
+ }
+ }
+ }
+}
- args := make([]string, len(cmds[1:]))
+func processEachCmd(reg *regexp.Regexp, cmd string, commandEnv *CommandEnv) bool {
+ cmds := reg.FindAllString(cmd, -1)
+ if len(cmds) == 0 {
+ return false
+ } else {
+ line.AppendHistory(cmd)
- for i := range args {
- args[i] = strings.Trim(string(cmds[1+i]), "\"'")
- }
+ args := make([]string, len(cmds[1:]))
- cmd := strings.ToLower(cmds[0])
- if cmd == "help" || cmd == "?" {
- printHelp(cmds)
- } else if cmd == "exit" || cmd == "quit" {
- return
- } else {
- foundCommand := false
- for _, c := range Commands {
- if c.Name() == cmd {
- if err := c.Do(args, commandEnv, os.Stdout); err != nil {
- fmt.Fprintf(os.Stderr, "error: %v\n", err)
- }
- foundCommand = true
+ for i := range args {
+ args[i] = strings.Trim(string(cmds[1+i]), "\"'")
+ }
+
+ cmd := cmds[0]
+ if cmd == "help" || cmd == "?" {
+ printHelp(cmds)
+ } else if cmd == "exit" || cmd == "quit" {
+ return true
+ } else {
+ foundCommand := false
+ for _, c := range Commands {
+ if c.Name() == cmd || c.Name() == "fs."+cmd {
+ if err := c.Do(args, commandEnv, os.Stdout); err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err)
}
- }
- if !foundCommand {
- fmt.Fprintf(os.Stderr, "unknown command: %v\n", cmd)
+ foundCommand = true
}
}
-
+ if !foundCommand {
+ fmt.Fprintf(os.Stderr, "unknown command: %v\n", cmd)
+ }
}
+
}
+ return false
}
func printGenericHelp() {
msg :=
- `Type: "help <command>" for help on <command>
+ `Type: "help <command>" for help on <command>. Most commands support "<command> -h" also for options.
`
fmt.Print(msg)
- sort.Slice(Commands, func(i, j int) bool {
- return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0
- })
for _, c := range Commands {
helpTexts := strings.SplitN(c.Help(), "\n", 2)
fmt.Printf(" %-30s\t# %s \n", c.Name(), helpTexts[0])
@@ -106,10 +119,6 @@ func printHelp(cmds []string) {
} else {
cmd := strings.ToLower(args[0])
- sort.Slice(Commands, func(i, j int) bool {
- return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0
- })
-
for _, c := range Commands {
if c.Name() == cmd {
fmt.Printf(" %s\t# %s\n", c.Name(), c.Help())