diff options
| author | bingoohuang <bingoo.huang@gmail.com> | 2019-12-30 13:05:50 +0800 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2019-12-30 13:05:50 +0800 |
| commit | 70da715d8d917527291b35fb069fac077d17b868 (patch) | |
| tree | b89bad02094cc7131bc2c9f64df13e15f9de9914 /weed/shell | |
| parent | 93a7df500ffeed766e395907e860b1733040ff23 (diff) | |
| parent | 09043c8e5a3b43add589344d28d4f57e90c83f70 (diff) | |
| download | seaweedfs-70da715d8d917527291b35fb069fac077d17b868.tar.xz seaweedfs-70da715d8d917527291b35fb069fac077d17b868.zip | |
Merge pull request #4 from chrislusf/master
Syncing to the original repository
Diffstat (limited to 'weed/shell')
| -rw-r--r-- | weed/shell/command_ec_balance.go | 11 | ||||
| -rw-r--r-- | weed/shell/command_ec_common.go | 40 | ||||
| -rw-r--r-- | weed/shell/command_ec_decode.go | 265 | ||||
| -rw-r--r-- | weed/shell/command_ec_encode.go | 53 | ||||
| -rw-r--r-- | weed/shell/command_ec_rebuild.go | 4 | ||||
| -rw-r--r-- | weed/shell/command_ec_test.go | 13 | ||||
| -rw-r--r-- | weed/shell/command_fs_cat.go | 4 | ||||
| -rw-r--r-- | weed/shell/command_fs_du.go | 95 | ||||
| -rw-r--r-- | weed/shell/command_fs_ls.go | 99 | ||||
| -rw-r--r-- | weed/shell/command_fs_meta_cat.go | 75 | ||||
| -rw-r--r-- | weed/shell/command_fs_meta_notify.go | 45 | ||||
| -rw-r--r-- | weed/shell/command_fs_meta_save.go | 195 | ||||
| -rw-r--r-- | weed/shell/command_fs_mv.go | 3 | ||||
| -rw-r--r-- | weed/shell/command_fs_tree.go | 87 | ||||
| -rw-r--r-- | weed/shell/command_volume_balance.go | 15 | ||||
| -rw-r--r-- | weed/shell/command_volume_fix_replication.go | 15 | ||||
| -rw-r--r-- | weed/shell/command_volume_list.go | 12 | ||||
| -rw-r--r-- | weed/shell/command_volume_tier_download.go | 167 | ||||
| -rw-r--r-- | weed/shell/command_volume_tier_upload.go | 148 | ||||
| -rw-r--r-- | weed/shell/commands.go | 24 |
20 files changed, 1004 insertions, 366 deletions
diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go index 47ae7bad3..96599372e 100644 --- a/weed/shell/command_ec_balance.go +++ b/weed/shell/command_ec_balance.go @@ -207,7 +207,7 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti if len(ecNodes) <= 1 { continue } - sortEcNodes(ecNodes) + sortEcNodesByFreeslotsAscending(ecNodes) fmt.Printf("ec shard %d.%d has %d copies, keeping %v\n", vid, shardId, len(ecNodes), ecNodes[0].info.Id) if !applyBalancing { continue @@ -266,6 +266,10 @@ func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, c for shardId, ecNode := range ecShardsToMove { rackId := pickOneRack(racks, rackToShardCount, averageShardsPerEcRack) + if rackId == "" { + fmt.Printf("ec shard %d.%d at %s can not find a destination rack\n", vid, shardId, ecNode.info.Id) + continue + } var possibleDestinationEcNodes []*EcNode for _, n := range racks[rackId].ecNodes { possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) @@ -436,10 +440,9 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack return nil } -func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, expectedTotalEcShards int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { +func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { - sortEcNodes(possibleDestinationEcNodes) - averageShardsPerEcNode := ceilDivide(expectedTotalEcShards, len(possibleDestinationEcNodes)) + sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes) for _, destEcNode := range possibleDestinationEcNodes { if destEcNode.info.Id == existingLocation.info.Id { diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go index d0fe16a68..2beed4742 100644 --- a/weed/shell/command_ec_common.go +++ b/weed/shell/command_ec_common.go @@ -22,7 +22,7 @@ func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, exist if applyBalancing { // ask destination node to copy shard and the ecx file from source node, and mount it - copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, uint32(shardId), 1, vid, collection, existingLocation.info.Id) + copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id) if err != nil { return err } @@ -51,13 +51,9 @@ func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, exist } func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, - targetServer *EcNode, startFromShardId uint32, shardCount int, + targetServer *EcNode, shardIdsToCopy []uint32, volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) { - var shardIdsToCopy []uint32 - for shardId := startFromShardId; shardId < startFromShardId+uint32(shardCount); shardId++ { - shardIdsToCopy = append(shardIdsToCopy, shardId) - } fmt.Printf("allocate %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { @@ -70,6 +66,8 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption Collection: collection, ShardIds: shardIdsToCopy, CopyEcxFile: true, + CopyEcjFile: true, + CopyVifFile: true, SourceDataNode: existingLocation, }) if copyErr != nil { @@ -112,12 +110,18 @@ func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId, } } -func sortEcNodes(ecNodes []*EcNode) { +func sortEcNodesByFreeslotsDecending(ecNodes []*EcNode) { sort.Slice(ecNodes, func(i, j int) bool { return ecNodes[i].freeEcSlot > ecNodes[j].freeEcSlot }) } +func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) { + sort.Slice(ecNodes, func(i, j int) bool { + return ecNodes[i].freeEcSlot < ecNodes[j].freeEcSlot + }) +} + type CandidateEcNode struct { ecNode *EcNode shardCount int @@ -156,7 +160,7 @@ func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (cou } func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) { - return int(dn.FreeVolumeCount)*10 - countShards(dn.EcShardInfos) + return int(dn.MaxVolumeCount-dn.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(dn.EcShardInfos) } type RackId string @@ -191,18 +195,18 @@ func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCen if selectedDataCenter != "" && selectedDataCenter != dc { return } - if freeEcSlots := countFreeShardSlots(dn); freeEcSlots > 0 { - ecNodes = append(ecNodes, &EcNode{ - info: dn, - dc: dc, - rack: rack, - freeEcSlot: int(freeEcSlots), - }) - totalFreeEcSlots += freeEcSlots - } + + freeEcSlots := countFreeShardSlots(dn) + ecNodes = append(ecNodes, &EcNode{ + info: dn, + dc: dc, + rack: rack, + freeEcSlot: int(freeEcSlots), + }) + totalFreeEcSlots += freeEcSlots }) - sortEcNodes(ecNodes) + sortEcNodesByFreeslotsDecending(ecNodes) return } diff --git a/weed/shell/command_ec_decode.go b/weed/shell/command_ec_decode.go new file mode 100644 index 000000000..1f9ad2ff9 --- /dev/null +++ b/weed/shell/command_ec_decode.go @@ -0,0 +1,265 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandEcDecode{}) +} + +type commandEcDecode struct { +} + +func (c *commandEcDecode) Name() string { + return "ec.decode" +} + +func (c *commandEcDecode) Help() string { + return `decode a erasure coded volume into a normal volume + + ec.decode [-collection=""] [-volumeId=<volume_id>] + +` +} + +func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := encodeCommand.Int("volumeId", 0, "the volume id") + collection := encodeCommand.String("collection", "", "the collection name") + if err = encodeCommand.Parse(args); err != nil { + return nil + } + + ctx := context.Background() + vid := needle.VolumeId(*volumeId) + + // collect topology information + topologyInfo, err := collectTopologyInfo(ctx, commandEnv) + if err != nil { + return err + } + + // volumeId is provided + if vid != 0 { + return doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid) + } + + // apply to all volumes in the collection + volumeIds := collectEcShardIds(topologyInfo, *collection) + fmt.Printf("ec encode volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid); err != nil { + return err + } + } + + return nil +} + +func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) { + // find volume location + nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid) + + fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits) + + // collect ec shards to the server with most space + targetNodeLocation, err := collectEcShards(ctx, commandEnv, nodeToEcIndexBits, collection, vid) + if err != nil { + return fmt.Errorf("collectEcShards for volume %d: %v", vid, err) + } + + // generate a normal volume + err = generateNormalVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation) + if err != nil { + return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err) + } + + // delete the previous ec shards + err = mountVolumeAndDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid) + if err != nil { + return fmt.Errorf("delete ec shards for volume %d: %v", vid, err) + } + + return nil +} + +func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error { + + // mount volume + if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ + VolumeId: uint32(vid), + }) + return mountErr + }); err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards mount volume %d on %s: %v", vid, targetNodeLocation, err) + } + + // unmount ec shards + for location, ecIndexBits := range nodeToEcIndexBits { + fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) + err := unmountEcShards(ctx, grpcDialOption, vid, location, ecIndexBits.ToUint32Slice()) + if err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err) + } + } + // delete ec shards + for location, ecIndexBits := range nodeToEcIndexBits { + fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) + err := sourceServerDeleteEcShards(ctx, grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice()) + if err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err) + } + } + + return nil +} + +func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error { + + fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer) + + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, genErr := volumeServerClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{ + VolumeId: uint32(vid), + Collection: collection, + }) + return genErr + }) + + return err + +} + +func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) { + + maxShardCount := 0 + var exisitngEcIndexBits erasure_coding.ShardBits + for loc, ecIndexBits := range nodeToEcIndexBits { + toBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount() + if toBeCopiedShardCount > maxShardCount { + maxShardCount = toBeCopiedShardCount + targetNodeLocation = loc + exisitngEcIndexBits = ecIndexBits + } + } + + fmt.Printf("collectEcShards: ec volume %d collect shards to %s from: %+v\n", vid, targetNodeLocation, nodeToEcIndexBits) + + var copiedEcIndexBits erasure_coding.ShardBits + for loc, ecIndexBits := range nodeToEcIndexBits { + if loc == targetNodeLocation { + continue + } + + needToCopyEcIndexBits := ecIndexBits.Minus(exisitngEcIndexBits).MinusParityShards() + if needToCopyEcIndexBits.ShardIdCount() == 0 { + continue + } + + err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + + fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation) + + _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + VolumeId: uint32(vid), + Collection: collection, + ShardIds: needToCopyEcIndexBits.ToUint32Slice(), + CopyEcxFile: false, + CopyEcjFile: true, + CopyVifFile: true, + SourceDataNode: loc, + }) + if copyErr != nil { + return fmt.Errorf("copy %d.%v %s => %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr) + } + + return nil + }) + + if err != nil { + break + } + + copiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits) + + } + + nodeToEcIndexBits[targetNodeLocation] = exisitngEcIndexBits.Plus(copiedEcIndexBits) + + return targetNodeLocation, err + +} + +func collectTopologyInfo(ctx context.Context, commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) { + + var resp *master_pb.VolumeListResponse + err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return + } + + return resp.TopologyInfo, nil + +} + +func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) { + + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Collection == selectedCollection && v.Id == uint32(vid) { + ecShardInfos = append(ecShardInfos, v) + } + } + }) + + return +} + +func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) { + + vidMap := make(map[uint32]bool) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Collection == selectedCollection { + vidMap[v.Id] = true + } + } + }) + + for vid := range vidMap { + vids = append(vids, needle.VolumeId(vid)) + } + + return +} + +func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[string]erasure_coding.ShardBits { + + nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Id == uint32(vid) { + nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits) + } + } + }) + + return nodeToEcIndexBits +} diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index 8ad0d51c8..58527abf2 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -8,13 +8,14 @@ import ( "sync" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/wdclient" - "google.golang.org/grpc" ) func init() { @@ -87,15 +88,17 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) { // find volume location - locations := commandEnv.MasterClient.GetLocations(uint32(vid)) - if len(locations) == 0 { + locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) + if !found { return fmt.Errorf("volume %d not found", vid) } + // fmt.Printf("found ec %d shards on %v\n", vid, locations) + // mark the volume as readonly err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) if err != nil { - return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err) + return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) } // generate ec shards @@ -163,10 +166,10 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle } // calculate how many shards to allocate for these servers - allocated := balancedEcDistribution(allocatedDataNodes) + allocatedEcIds := balancedEcDistribution(allocatedDataNodes) // ask the data nodes to copy from the source volume server - copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocated, volumeId, collection, existingLocations[0]) + copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0]) if err != nil { return err } @@ -196,31 +199,29 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle } func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, - targetServers []*EcNode, allocated []int, + targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { // parallelize shardIdChan := make(chan []uint32, len(targetServers)) var wg sync.WaitGroup - startFromShardId := uint32(0) for i, server := range targetServers { - if allocated[i] <= 0 { + if len(allocatedEcIds[i]) <= 0 { continue } wg.Add(1) - go func(server *EcNode, startFromShardId uint32, shardCount int) { + go func(server *EcNode, allocatedEcShardIds []uint32) { defer wg.Done() copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(ctx, grpcDialOption, server, - startFromShardId, shardCount, volumeId, collection, existingLocation.Url) + allocatedEcShardIds, volumeId, collection, existingLocation.Url) if copyErr != nil { err = copyErr } else { shardIdChan <- copiedShardIds server.addEcVolumeShards(volumeId, collection, copiedShardIds) } - }(server, startFromShardId, allocated[i]) - startFromShardId += uint32(allocated[i]) + }(server, allocatedEcIds[i]) } wg.Wait() close(shardIdChan) @@ -236,18 +237,18 @@ func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Dia return } -func balancedEcDistribution(servers []*EcNode) (allocated []int) { - allocated = make([]int, len(servers)) - allocatedCount := 0 - for allocatedCount < erasure_coding.TotalShardsCount { - for i, server := range servers { - if server.freeEcSlot-allocated[i] > 0 { - allocated[i] += 1 - allocatedCount += 1 - } - if allocatedCount >= erasure_coding.TotalShardsCount { - break - } +func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) { + allocated = make([][]uint32, len(servers)) + allocatedShardIdIndex := uint32(0) + serverIndex := 0 + for allocatedShardIdIndex < erasure_coding.TotalShardsCount { + if servers[serverIndex].freeEcSlot > 0 { + allocated[serverIndex] = append(allocated[serverIndex], allocatedShardIdIndex) + allocatedShardIdIndex++ + } + serverIndex++ + if serverIndex >= len(servers) { + serverIndex = 0 } } @@ -281,7 +282,7 @@ func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, se } }) - for vid, _ := range vidMap { + for vid := range vidMap { vids = append(vids, needle.VolumeId(vid)) } diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go index 63b7c4088..2e2fca743 100644 --- a/weed/shell/command_ec_rebuild.go +++ b/weed/shell/command_ec_rebuild.go @@ -111,7 +111,7 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount) } - sortEcNodes(allEcNodes) + sortEcNodesByFreeslotsDecending(allEcNodes) if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount { return fmt.Errorf("disk space is not enough") @@ -215,6 +215,8 @@ func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder Collection: collection, ShardIds: []uint32{uint32(shardId)}, CopyEcxFile: needEcxFile, + CopyEcjFile: needEcxFile, + CopyVifFile: needEcxFile, SourceDataNode: ecNodes[0].info.Id, }) return copyErr diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go index 9e578ed28..c233d25d0 100644 --- a/weed/shell/command_ec_test.go +++ b/weed/shell/command_ec_test.go @@ -2,12 +2,25 @@ package shell import ( "context" + "fmt" "testing" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" ) +func TestCommandEcDistribution(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100), + newEcNode("dc1", "rack2", "dn2", 100), + } + + allocated := balancedEcDistribution(allEcNodes) + + fmt.Printf("allocated: %+v", allocated) +} + func TestCommandEcBalanceSmall(t *testing.T) { allEcNodes := []*EcNode{ diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 66ced46c5..9db36e9d1 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -24,12 +24,8 @@ func (c *commandFsCat) Name() string { func (c *commandFsCat) Help() string { return `stream the file content on to the screen - fs.cat /dir/ fs.cat /dir/file_name - fs.cat /dir/file_prefix - fs.cat http://<filer_server>:<port>/dir/ fs.cat http://<filer_server>:<port>/dir/file_name - fs.cat http://<filer_server>:<port>/dir/file_prefix ` } diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 5e634c82a..1d7d79686 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -3,11 +3,13 @@ package shell import ( "context" "fmt" + "io" + + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "io" ) func init() { @@ -43,67 +45,41 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer path = path + "/" } + var blockCount, byteCount uint64 dir, name := filer2.FullPath(path).DirAndName() + blockCount, byteCount, err = duTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name) - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - - _, _, err = paginateDirectory(ctx, writer, client, dir, name, 1000) - - return err + if name == "" && err == nil { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) + } - }) + return } -func paginateDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int) (blockCount uint64, byteCount uint64, err error) { - - paginatedCount := -1 - startFromFileName := "" - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr - return - } +func duTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string) (blockCount uint64, byteCount uint64, err error) { - paginatedCount = len(resp.Entries) - - for _, entry := range resp.Entries { - if entry.IsDirectory { - subDir := fmt.Sprintf("%s/%s", dir, entry.Name) - if dir == "/" { - subDir = "/" + entry.Name - } - numBlock, numByte, err := paginateDirectory(ctx, writer, client, subDir, "", paginateSize) - if err == nil { - blockCount += numBlock - byteCount += numByte - } - } else { - blockCount += uint64(len(entry.Chunks)) - byteCount += filer2.TotalSize(entry.Chunks) + err = filer2.ReadDirAllEntries(ctx, filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", dir, entry.Name) + if dir == "/" { + subDir = "/" + entry.Name } - startFromFileName = entry.Name - - if name != "" && !entry.IsDirectory { - fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name) + numBlock, numByte, err := duTraverseDirectory(ctx, writer, filerClient, subDir, "") + if err == nil { + blockCount += numBlock + byteCount += numByte } + } else { + blockCount += uint64(len(entry.Chunks)) + byteCount += filer2.TotalSize(entry.Chunks) } - } - - if name == "" { - fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) - } + if name != "" && !entry.IsDirectory { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name) + } + }) return - } func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { @@ -115,3 +91,20 @@ func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, }, filerGrpcAddress, env.option.GrpcDialOption) } + +type commandFilerClient struct { + env *CommandEnv + filerServer string + filerPort int64 +} + +func (env *CommandEnv) getFilerClient(filerServer string, filerPort int64) *commandFilerClient { + return &commandFilerClient{ + env: env, + filerServer: filerServer, + filerPort: filerPort, + } +} +func (c *commandFilerClient) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { + return c.env.withFilerClient(ctx, c.filerServer, c.filerPort, fn) +} diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 6979635e1..01842083b 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -3,13 +3,14 @@ package shell import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "io" "os" "os/user" "strconv" "strings" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func init() { @@ -66,83 +67,51 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer } dir, name := filer2.FullPath(path).DirAndName() + entryCount := 0 - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - - return paginateOneDirectory(ctx, writer, client, dir, name, 1000, isLongFormat, showHidden) - - }) - -} - -func paginateOneDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int, isLongFormat, showHidden bool) (err error) { + err = filer2.ReadDirAllEntries(ctx, commandEnv.getFilerClient(filerServer, filerPort), dir, name, func(entry *filer_pb.Entry, isLast bool) { - entryCount := 0 - paginatedCount := -1 - startFromFileName := "" - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr + if !showHidden && strings.HasPrefix(entry.Name, ".") { return } - paginatedCount = len(resp.Entries) - - for _, entry := range resp.Entries { + entryCount++ - if !showHidden && strings.HasPrefix(entry.Name, ".") { - continue - } - - entryCount++ - - if isLongFormat { - fileMode := os.FileMode(entry.Attributes.FileMode) - userName, groupNames := entry.Attributes.UserName, entry.Attributes.GroupName - if userName == "" { - if user, userErr := user.LookupId(strconv.Itoa(int(entry.Attributes.Uid))); userErr == nil { - userName = user.Username - } - } - groupName := "" - if len(groupNames) > 0 { - groupName = groupNames[0] + if isLongFormat { + fileMode := os.FileMode(entry.Attributes.FileMode) + userName, groupNames := entry.Attributes.UserName, entry.Attributes.GroupName + if userName == "" { + if user, userErr := user.LookupId(strconv.Itoa(int(entry.Attributes.Uid))); userErr == nil { + userName = user.Username } - if groupName == "" { - if group, groupErr := user.LookupGroupId(strconv.Itoa(int(entry.Attributes.Gid))); groupErr == nil { - groupName = group.Name - } - } - - if dir == "/" { - // just for printing - dir = "" + } + groupName := "" + if len(groupNames) > 0 { + groupName = groupNames[0] + } + if groupName == "" { + if group, groupErr := user.LookupGroupId(strconv.Itoa(int(entry.Attributes.Gid))); groupErr == nil { + groupName = group.Name } - fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", - fileMode, len(entry.Chunks), - userName, groupName, - filer2.TotalSize(entry.Chunks), dir, entry.Name) - } else { - fmt.Fprintf(writer, "%s\n", entry.Name) } - startFromFileName = entry.Name - + if dir == "/" { + // just for printing + dir = "" + } + fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", + fileMode, len(entry.Chunks), + userName, groupName, + filer2.TotalSize(entry.Chunks), dir, entry.Name) + } else { + fmt.Fprintf(writer, "%s\n", entry.Name) } - } - if isLongFormat { + }) + + if isLongFormat && err == nil { fmt.Fprintf(writer, "total %d\n", entryCount) } return - } diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go new file mode 100644 index 000000000..5908b0a3c --- /dev/null +++ b/weed/shell/command_fs_meta_cat.go @@ -0,0 +1,75 @@ +package shell + +import ( + "context" + "fmt" + "io" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandFsMetaCat{}) +} + +type commandFsMetaCat struct { +} + +func (c *commandFsMetaCat) Name() string { + return "fs.meta.cat" +} + +func (c *commandFsMetaCat) Help() string { + return `print out the meta data content for a file or directory + + fs.meta.cat /dir/ + fs.meta.cat /dir/file_name + fs.meta.cat http://<filer_server>:<port>/dir/ + fs.meta.cat http://<filer_server>:<port>/dir/file_name +` +} + +func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + input := findInputDirectory(args) + + filerServer, filerPort, path, err := commandEnv.parseUrl(input) + if err != nil { + return err + } + + ctx := context.Background() + + dir, name := filer2.FullPath(path).DirAndName() + + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Name: name, + Directory: dir, + } + respLookupEntry, err := client.LookupDirectoryEntry(ctx, request) + if err != nil { + return err + } + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, marshalErr := m.MarshalToString(respLookupEntry.Entry) + if marshalErr != nil { + return fmt.Errorf("marshal meta: %v", marshalErr) + } + + fmt.Fprintf(writer, "%s\n", text) + + return nil + + }) + +} diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go index 13b272fbf..a898df7a0 100644 --- a/weed/shell/command_fs_meta_notify.go +++ b/weed/shell/command_fs_meta_notify.go @@ -5,11 +5,12 @@ import ( "fmt" "io" + "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) func init() { @@ -46,33 +47,33 @@ func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer i ctx := context.Background() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + var dirCount, fileCount uint64 - var dirCount, fileCount uint64 + err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { - err = doTraverse(ctx, writer, client, filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) error { - - if entry.IsDirectory { - dirCount++ - } else { - fileCount++ - } - - return notification.Queue.SendMessage( - string(parentPath.Child(entry.Name)), - &filer_pb.EventNotification{ - NewEntry: entry, - }, - ) + if entry.IsDirectory { + dirCount++ + } else { + fileCount++ + } - }) + notifyErr := notification.Queue.SendMessage( + string(parentPath.Child(entry.Name)), + &filer_pb.EventNotification{ + NewEntry: entry, + }, + ) - if err == nil { - fmt.Fprintf(writer, "\ntotal notified %d directories, %d files\n", dirCount, fileCount) + if notifyErr != nil { + fmt.Fprintf(writer, "fail to notify new entry event for %s: %v\n", parentPath.Child(entry.Name), notifyErr) } - return err - }) + if err == nil { + fmt.Fprintf(writer, "\ntotal notified %d directories, %d files\n", dirCount, fileCount) + } + + return err + } diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index 6ca395fae..ed070350f 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -2,15 +2,19 @@ package shell import ( "context" + "flag" "fmt" "io" "os" + "sync" + "sync/atomic" "time" + "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" ) func init() { @@ -27,10 +31,11 @@ func (c *commandFsMetaSave) Name() string { func (c *commandFsMetaSave) Help() string { return `save all directory and file meta data to a local file for metadata backup. - fs.meta.save / # save from the root - fs.meta.save /path/to/save # save from the directory /path/to/save - fs.meta.save . # save from current directory - fs.meta.save # save from current directory + fs.meta.save / # save from the root + fs.meta.save -v -o t.meta / # save from the root, output to t.meta file. + fs.meta.save /path/to/save # save from the directory /path/to/save + fs.meta.save . # save from current directory + fs.meta.save # save from current directory The meta data will be saved into a local <filer_host>-<port>-<time>.meta file. These meta data can be later loaded by fs.meta.load command, @@ -42,109 +47,139 @@ func (c *commandFsMetaSave) Help() string { func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) - if err != nil { - return err + fsMetaSaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + verbose := fsMetaSaveCommand.Bool("v", false, "print out each processed files") + outputFileName := fsMetaSaveCommand.String("o", "", "output the meta data to this file") + if err = fsMetaSaveCommand.Parse(args); err != nil { + return nil } - ctx := context.Background() + filerServer, filerPort, path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args())) + if parseErr != nil { + return parseErr + } - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + ctx := context.Background() - t := time.Now() - fileName := fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta", + t := time.Now() + fileName := *outputFileName + if fileName == "" { + fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta", filerServer, filerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) + } - dst, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil - } - defer dst.Close() - - var dirCount, fileCount uint64 + dst, openErr := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if openErr != nil { + return fmt.Errorf("failed to create file %s: %v", fileName, openErr) + } + defer dst.Close() + var wg sync.WaitGroup + wg.Add(1) + outputChan := make(chan []byte, 1024) + go func() { sizeBuf := make([]byte, 4) + for b := range outputChan { + util.Uint32toBytes(sizeBuf, uint32(len(b))) + dst.Write(sizeBuf) + dst.Write(b) + } + wg.Done() + }() - err = doTraverse(ctx, writer, client, filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) error { + var dirCount, fileCount uint64 - protoMessage := &filer_pb.FullEntry{ - Dir: string(parentPath), - Entry: entry, - } + err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { - bytes, err := proto.Marshal(protoMessage) - if err != nil { - return fmt.Errorf("marshall error: %v", err) - } + protoMessage := &filer_pb.FullEntry{ + Dir: string(parentPath), + Entry: entry, + } - util.Uint32toBytes(sizeBuf, uint32(len(bytes))) + bytes, err := proto.Marshal(protoMessage) + if err != nil { + fmt.Fprintf(writer, "marshall error: %v\n", err) + return + } - dst.Write(sizeBuf) - dst.Write(bytes) + outputChan <- bytes - if entry.IsDirectory { - dirCount++ - } else { - fileCount++ - } + if entry.IsDirectory { + atomic.AddUint64(&dirCount, 1) + } else { + atomic.AddUint64(&fileCount, 1) + } + if *verbose { println(parentPath.Child(entry.Name)) - - return nil - - }) - - if err == nil { - fmt.Fprintf(writer, "\ntotal %d directories, %d files", dirCount, fileCount) - fmt.Fprintf(writer, "\nmeta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName) } - return err - }) -} -func doTraverse(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry) error) (err error) { - - paginatedCount := -1 - startFromFileName := "" - paginateSize := 1000 - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: string(parentPath), - Prefix: "", - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr - return - } + close(outputChan) - paginatedCount = len(resp.Entries) + wg.Wait() - for _, entry := range resp.Entries { + if err == nil { + fmt.Fprintf(writer, "total %d directories, %d files\n", dirCount, fileCount) + fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName) + } - if err = fn(parentPath, entry); err != nil { - return err - } + return err - if entry.IsDirectory { - subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) - if parentPath == "/" { - subDir = "/" + entry.Name +} +func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, + parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { + + K := 5 + + var jobQueueWg sync.WaitGroup + queue := util.NewQueue() + jobQueueWg.Add(1) + queue.Enqueue(parentPath) + var isTerminating bool + + for i := 0; i < K; i++ { + go func() { + for { + if isTerminating { + break + } + t := queue.Dequeue() + if t == nil { + time.Sleep(329 * time.Millisecond) + continue } - if err = doTraverse(ctx, writer, client, filer2.FullPath(subDir), fn); err != nil { - return err + dir := t.(filer2.FullPath) + processErr := processOneDirectory(ctx, writer, filerClient, dir, queue, &jobQueueWg, fn) + if processErr != nil { + err = processErr } + jobQueueWg.Done() } - startFromFileName = entry.Name - - } + }() } - + jobQueueWg.Wait() + isTerminating = true return +} + +func processOneDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, + parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, + fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { + + return filer2.ReadDirAllEntries(ctx, filerClient, string(parentPath), "", func(entry *filer_pb.Entry, isLast bool) { + + fn(parentPath, entry) + + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) + if parentPath == "/" { + subDir = "/" + entry.Name + } + jobQueueWg.Add(1) + queue.Enqueue(filer2.FullPath(subDir)) + } + }) } diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index 130bfe4e7..67606ab53 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -49,12 +49,10 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer ctx := context.Background() - sourceDir, sourceName := filer2.FullPath(sourcePath).DirAndName() destinationDir, destinationName := filer2.FullPath(destinationPath).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { // collect destination entry info @@ -77,7 +75,6 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer targetName = destinationName } - request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: sourceDir, OldName: sourceName, diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 8474e43ea..a4524f341 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -3,10 +3,11 @@ package shell import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "io" "strings" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func init() { @@ -38,75 +39,45 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ ctx := context.Background() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name, newPrefix(), -1) - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, client, dir, name, newPrefix(), -1) - - if terr == nil { - fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) - } - - return terr + if terr == nil { + fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) + } - }) + return terr } -func treeTraverseDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { - - paginatedCount := -1 - startFromFileName := "" - paginateSize := 1000 - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr - return - } - paginatedCount = len(resp.Entries) - if paginatedCount > 0 { - prefix.addMarker(level) - } +func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { - for i, entry := range resp.Entries { + prefix.addMarker(level) - if level < 0 && name != "" { - if entry.Name != name { - break - } + err = filer2.ReadDirAllEntries(ctx, filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { + if level < 0 && name != "" { + if entry.Name != name { + return } + } - // 0.1% wrong prefix here, but fixing it would need to paginate to the next batch first - isLast := paginatedCount < paginateSize && i == paginatedCount-1 - fmt.Fprintf(writer, "%s%s\n", prefix.getPrefix(level, isLast), entry.Name) - - if entry.IsDirectory { - directoryCount++ - subDir := fmt.Sprintf("%s/%s", dir, entry.Name) - if dir == "/" { - subDir = "/" + entry.Name - } - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, client, subDir, "", prefix, level+1) - directoryCount += dirCount - fileCount += fCount - err = terr - } else { - fileCount++ - } - startFromFileName = entry.Name + fmt.Fprintf(writer, "%s%s\n", prefix.getPrefix(level, isLast), entry.Name) + if entry.IsDirectory { + directoryCount++ + subDir := fmt.Sprintf("%s/%s", dir, entry.Name) + if dir == "/" { + subDir = "/" + entry.Name + } + dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, filerClient, subDir, "", prefix, level+1) + directoryCount += dirCount + fileCount += fCount + err = terr + } else { + fileCount++ } - } + }) return - } type Prefix struct { diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go index d7ef0d005..bed4f4306 100644 --- a/weed/shell/command_volume_balance.go +++ b/weed/shell/command_volume_balance.go @@ -27,7 +27,7 @@ func (c *commandVolumeBalance) Name() string { func (c *commandVolumeBalance) Help() string { return `balance all volumes among volume servers - volume.balance [-c ALL|EACH_COLLECTION|<collection_name>] [-force] [-dataCenter=<data_center_name>] + volume.balance [-collection ALL|EACH_COLLECTION|<collection_name>] [-force] [-dataCenter=<data_center_name>] Algorithm: @@ -79,8 +79,10 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer } typeToNodes := collectVolumeServersByType(resp.TopologyInfo, *dc) - for _, volumeServers := range typeToNodes { + + for maxVolumeCount, volumeServers := range typeToNodes { if len(volumeServers) < 2 { + fmt.Printf("only 1 node is configured max %d volumes, skipping balancing\n", maxVolumeCount) continue } if *collection == "EACH_COLLECTION" { @@ -93,8 +95,8 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer return err } } - } else if *collection == "ALL" { - if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL", *applyBalancing); err != nil { + } else if *collection == "ALL_COLLECTIONS" { + if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil { return err } } else { @@ -108,6 +110,7 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer } func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.DataNodeInfo, volumeSizeLimit uint64, collection string, applyBalancing bool) error { + var nodes []*Node for _, dn := range dataNodeInfos { nodes = append(nodes, &Node{ @@ -118,7 +121,7 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat // balance writable volumes for _, n := range nodes { n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool { - if collection != "ALL" { + if collection != "ALL_COLLECTIONS" { if v.Collection != collection { return false } @@ -133,7 +136,7 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat // balance readable volumes for _, n := range nodes { n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool { - if collection != "ALL" { + if collection != "ALL_COLLECTIONS" { if v.Collection != collection { return false } diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go index 4c7a794c0..6f35dd5d2 100644 --- a/weed/shell/command_volume_fix_replication.go +++ b/weed/shell/command_volume_fix_replication.go @@ -3,13 +3,14 @@ package shell import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" "io" "math/rand" "sort" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) func init() { @@ -78,7 +79,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, underReplicatedVolumeLocations := make(map[uint32][]location) for vid, locations := range replicatedVolumeLocations { volumeInfo := replicatedVolumeInfo[vid] - replicaPlacement, _ := storage.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) if replicaPlacement.GetCopyCount() > len(locations) { underReplicatedVolumeLocations[vid] = locations } @@ -97,7 +98,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, for vid, locations := range underReplicatedVolumeLocations { volumeInfo := replicatedVolumeInfo[vid] - replicaPlacement, _ := storage.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) foundNewLocation := false for _, dst := range allLocations { // check whether data nodes satisfy the constraints @@ -145,7 +146,7 @@ func keepDataNodesSorted(dataNodes []location) { }) } -func satisfyReplicaPlacement(replicaPlacement *storage.ReplicaPlacement, existingLocations []location, possibleLocation location) bool { +func satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, existingLocations []location, possibleLocation location) bool { existingDataCenters := make(map[string]bool) existingRacks := make(map[string]bool) diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index 134580ffe..c6c79d150 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -46,7 +46,7 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io. } func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64) statistics { - fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, volumeSizeLimitMb) + fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d remote:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount, volumeSizeLimitMb) sort.Slice(t.DataCenterInfos, func(i, j int) bool { return t.DataCenterInfos[i].Id < t.DataCenterInfos[j].Id }) @@ -58,7 +58,7 @@ func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLi return s } func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics { - fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) var s statistics sort.Slice(t.RackInfos, func(i, j int) bool { return t.RackInfos[i].Id < t.RackInfos[j].Id @@ -70,7 +70,7 @@ func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statisti return s } func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics { - fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) var s statistics sort.Slice(t.DataNodeInfos, func(i, j int) bool { return t.DataNodeInfos[i].Id < t.DataNodeInfos[j].Id @@ -82,7 +82,7 @@ func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics { return s } func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics { - fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) var s statistics sort.Slice(t.VolumeInfos, func(i, j int) bool { return t.VolumeInfos[i].Id < t.VolumeInfos[j].Id @@ -98,7 +98,7 @@ func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics { } func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) statistics { fmt.Fprintf(writer, " volume %+v \n", t) - return newStatiscis(t) + return newStatistics(t) } type statistics struct { @@ -108,7 +108,7 @@ type statistics struct { DeletedBytes uint64 } -func newStatiscis(t *master_pb.VolumeInformationMessage) statistics { +func newStatistics(t *master_pb.VolumeInformationMessage) statistics { return statistics{ Size: t.Size, FileCount: t.FileCount, diff --git a/weed/shell/command_volume_tier_download.go b/weed/shell/command_volume_tier_download.go new file mode 100644 index 000000000..88e2e8b92 --- /dev/null +++ b/weed/shell/command_volume_tier_download.go @@ -0,0 +1,167 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandVolumeTierDownload{}) +} + +type commandVolumeTierDownload struct { +} + +func (c *commandVolumeTierDownload) Name() string { + return "volume.tier.download" +} + +func (c *commandVolumeTierDownload) Help() string { + return `move the dat file of a volume to a remote tier + + volume.tier.download [-collection=""] + volume.tier.download [-collection=""] -volumeId=<volume_id> + + e.g.: + volume.tier.download -volumeId=7 + volume.tier.download -volumeId=7 + + This command will download the dat file of a volume from a remote tier to a volume server in local cluster. + +` +} + +func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := tierCommand.Int("volumeId", 0, "the volume id") + collection := tierCommand.String("collection", "", "the collection name") + if err = tierCommand.Parse(args); err != nil { + return nil + } + + ctx := context.Background() + vid := needle.VolumeId(*volumeId) + + // collect topology information + topologyInfo, err := collectTopologyInfo(ctx, commandEnv) + if err != nil { + return err + } + + // volumeId is provided + if vid != 0 { + return doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid) + } + + // apply to all volumes in the collection + // reusing collectVolumeIdsForEcEncode for now + volumeIds := collectRemoteVolumes(topologyInfo, *collection) + if err != nil { + return err + } + fmt.Printf("tier download volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid); err != nil { + return err + } + } + + return nil +} + +func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) { + + vidMap := make(map[uint32]bool) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.VolumeInfos { + if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" { + vidMap[v.Id] = true + } + } + }) + + for vid := range vidMap { + vids = append(vids, needle.VolumeId(vid)) + } + + return +} + +func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) { + // find volume location + locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) + if !found { + return fmt.Errorf("volume %d not found", vid) + } + + // TODO parallelize this + for _, loc := range locations { + // copy the .dat file from remote tier to local + err = downloadDatFromRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url) + if err != nil { + return fmt.Errorf("download dat file for volume %d to %s: %v", vid, loc.Url, err) + } + } + + return nil +} + +func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error { + + err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + }) + + var lastProcessed int64 + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + processingSpeed := float64(resp.Processed-lastProcessed) / 1024.0 / 1024.0 + + fmt.Fprintf(writer, "downloaded %.2f%%, %d bytes, %.2fMB/s\n", resp.ProcessedPercentage, resp.Processed, processingSpeed) + + lastProcessed = resp.Processed + } + if downloadErr != nil { + return downloadErr + } + + _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{ + VolumeId: uint32(volumeId), + }) + if unmountErr != nil { + return unmountErr + } + + _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ + VolumeId: uint32(volumeId), + }) + if mountErr != nil { + return mountErr + } + + return nil + }) + + return err + +} diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go new file mode 100644 index 000000000..b3a0d9fe8 --- /dev/null +++ b/weed/shell/command_volume_tier_upload.go @@ -0,0 +1,148 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandVolumeTierUpload{}) +} + +type commandVolumeTierUpload struct { +} + +func (c *commandVolumeTierUpload) Name() string { + return "volume.tier.upload" +} + +func (c *commandVolumeTierUpload) Help() string { + return `move the dat file of a volume to a remote tier + + volume.tier.upload [-collection=""] [-fullPercent=95] [-quietFor=1h] + volume.tier.upload [-collection=""] -volumeId=<volume_id> -dest=<storage_backend> [-keepLocalDatFile] + + e.g.: + volume.tier.upload -volumeId=7 -dest=s3 + volume.tier.upload -volumeId=7 -dest=s3.default + + The <storage_backend> is defined in master.toml. + For example, "s3.default" in [storage.backend.s3.default] + + This command will move the dat file of a volume to a remote tier. + + SeaweedFS enables scalable and fast local access to lots of files, + and the cloud storage is slower by cost efficient. How to combine them together? + + Usually the data follows 80/20 rule: only 20% of data is frequently accessed. + We can offload the old volumes to the cloud. + + With this, SeaweedFS can be both fast and scalable, and infinite storage space. + Just add more local SeaweedFS volume servers to increase the throughput. + + The index file is still local, and the same O(1) disk read is applied to the remote file. + +` +} + +func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := tierCommand.Int("volumeId", 0, "the volume id") + collection := tierCommand.String("collection", "", "the collection name") + fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size") + quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period") + dest := tierCommand.String("dest", "", "the target tier name") + keepLocalDatFile := tierCommand.Bool("keepLocalDatFile", false, "whether keep local dat file") + if err = tierCommand.Parse(args); err != nil { + return nil + } + + ctx := context.Background() + vid := needle.VolumeId(*volumeId) + + // volumeId is provided + if vid != 0 { + return doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile) + } + + // apply to all volumes in the collection + // reusing collectVolumeIdsForEcEncode for now + volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod) + if err != nil { + return err + } + fmt.Printf("tier upload volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil { + return err + } + } + + return nil +} + +func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) { + // find volume location + locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) + if !found { + return fmt.Errorf("volume %d not found", vid) + } + + err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) + if err != nil { + return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) + } + + // copy the .dat file to remote tier + err = uploadDatToRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile) + if err != nil { + return fmt.Errorf("copy dat file for volume %d on %s to %s: %v", vid, locations[0].Url, dest, err) + } + + return nil +} + +func uploadDatToRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error { + + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + DestinationBackendName: dest, + KeepLocalDatFile: keepLocalDatFile, + }) + + var lastProcessed int64 + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + processingSpeed := float64(resp.Processed-lastProcessed) / 1024.0 / 1024.0 + + fmt.Fprintf(writer, "copied %.2f%%, %d bytes, %.2fMB/s\n", resp.ProcessedPercentage, resp.Processed, processingSpeed) + + lastProcessed = resp.Processed + } + + return copyErr + }) + + return err + +} diff --git a/weed/shell/commands.go b/weed/shell/commands.go index b642ec253..a6a0f7dec 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -9,10 +9,11 @@ import ( "strconv" "strings" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/wdclient" - "google.golang.org/grpc" ) type ShellOptions struct { @@ -71,26 +72,19 @@ func (ce *CommandEnv) checkDirectory(ctx context.Context, filerServer string, fi return ce.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: name, - InclusiveStartFrom: true, - Limit: 1, + resp, lookupErr := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, }) - if listErr != nil { - return listErr + if lookupErr != nil { + return lookupErr } - if len(resp.Entries) == 0 { + if resp.Entry == nil { return fmt.Errorf("entry not found") } - if resp.Entries[0].Name != name { - return fmt.Errorf("not a valid directory, found %s", resp.Entries[0].Name) - } - - if !resp.Entries[0].IsDirectory { + if !resp.Entry.IsDirectory { return fmt.Errorf("not a directory") } |
