aboutsummaryrefslogtreecommitdiff
path: root/weed/shell/command_ec_encode.go
diff options
context:
space:
mode:
Diffstat (limited to 'weed/shell/command_ec_encode.go')
-rw-r--r--weed/shell/command_ec_encode.go35
1 files changed, 25 insertions, 10 deletions
diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go
index 014b9bab7..33eef9ca7 100644
--- a/weed/shell/command_ec_encode.go
+++ b/weed/shell/command_ec_encode.go
@@ -4,6 +4,7 @@ import (
"context"
"flag"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"io"
"sync"
"time"
@@ -54,10 +55,6 @@ func (c *commandEcEncode) Help() string {
func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- if err = commandEnv.confirmIsLocked(); err != nil {
- return
- }
-
encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
collection := encodeCommand.String("collection", "", "the collection name")
@@ -68,6 +65,10 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return nil
}
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
vid := needle.VolumeId(*volumeId)
// volumeId is provided
@@ -106,7 +107,7 @@ func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId,
}
// generate ec shards
- err = generateEcShards(commandEnv.option.GrpcDialOption, vid, collection, locations[0].Url)
+ err = generateEcShards(commandEnv.option.GrpcDialOption, vid, collection, locations[0].ServerAddress())
if err != nil {
return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err)
}
@@ -120,7 +121,7 @@ func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId,
return nil
}
-func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error {
+func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer pb.ServerAddress) error {
fmt.Printf("generateEcShards %s %d on %s ...\n", collection, volumeId, sourceVolumeServer)
@@ -161,13 +162,13 @@ func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection
}
// unmount the to be deleted shards
- err = unmountEcShards(commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds)
+ err = unmountEcShards(commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].ServerAddress(), copiedShardIds)
if err != nil {
return err
}
// ask the source volume server to clean up copied ec shards
- err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds)
+ err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].ServerAddress(), copiedShardIds)
if err != nil {
return fmt.Errorf("source delete copied ecShards %s %d.%v: %v", existingLocations[0].Url, volumeId, copiedShardIds, err)
}
@@ -175,7 +176,7 @@ func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection
// ask the source volume server to delete the original volume
for _, location := range existingLocations {
fmt.Printf("delete volume %d from %s\n", volumeId, location.Url)
- err = deleteVolume(commandEnv.option.GrpcDialOption, volumeId, location.Url)
+ err = deleteVolume(commandEnv.option.GrpcDialOption, volumeId, location.ServerAddress())
if err != nil {
return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, volumeId, err)
}
@@ -194,7 +195,7 @@ func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServer
copyFunc := func(server *EcNode, allocatedEcShardIds []uint32) {
defer wg.Done()
copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server,
- allocatedEcShardIds, volumeId, collection, existingLocation.Url)
+ allocatedEcShardIds, volumeId, collection, existingLocation.ServerAddress())
if copyErr != nil {
err = copyErr
} else {
@@ -202,6 +203,14 @@ func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServer
server.addEcVolumeShards(volumeId, collection, copiedShardIds)
}
}
+ cleanupFunc := func(server *EcNode, allocatedEcShardIds []uint32) {
+ if err := unmountEcShards(grpcDialOption, volumeId, pb.NewServerAddressFromDataNode(server.info), allocatedEcShardIds); err != nil {
+ fmt.Printf("unmount aborted shards %d.%v on %s: %v\n", volumeId, allocatedEcShardIds, server.info.Id, err)
+ }
+ if err := sourceServerDeleteEcShards(grpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(server.info), allocatedEcShardIds); err != nil {
+ fmt.Printf("remove aborted shards %d.%v on %s: %v\n", volumeId, allocatedEcShardIds, server.info.Id, err)
+ }
+ }
// maybe parallelize
for i, server := range targetServers {
@@ -220,6 +229,12 @@ func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServer
close(shardIdChan)
if err != nil {
+ for i, server := range targetServers {
+ if len(allocatedEcIds[i]) <= 0 {
+ continue
+ }
+ cleanupFunc(server, allocatedEcIds[i])
+ }
return nil, err
}