aboutsummaryrefslogtreecommitdiff
path: root/weed/shell/command_ec_encode.go
diff options
context:
space:
mode:
Diffstat (limited to 'weed/shell/command_ec_encode.go')
-rw-r--r--weed/shell/command_ec_encode.go32
1 files changed, 28 insertions, 4 deletions
diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go
index 355869767..2d62aff3f 100644
--- a/weed/shell/command_ec_encode.go
+++ b/weed/shell/command_ec_encode.go
@@ -37,8 +37,8 @@ func (c *commandEcEncode) Name() string {
func (c *commandEcEncode) Help() string {
return `apply erasure coding to a volume
- ec.encode [-collection=""] [-fullPercent=95 -quietFor=1h] [-verbose]
- ec.encode [-collection=""] [-volumeId=<volume_id>] [-verbose]
+ ec.encode [-collection=""] [-fullPercent=95 -quietFor=1h] [-verbose] [-sourceDiskType=<disk_type>] [-diskType=<disk_type>]
+ ec.encode [-collection=""] [-volumeId=<volume_id>] [-verbose] [-diskType=<disk_type>]
This command will:
1. freeze one volume
@@ -61,6 +61,18 @@ func (c *commandEcEncode) Help() string {
Options:
-verbose: show detailed reasons why volumes are not selected for encoding
+ -sourceDiskType: filter source volumes by disk type (hdd, ssd, or empty for all)
+ -diskType: target disk type for EC shards (hdd, ssd, or empty for default hdd)
+
+ Examples:
+ # Encode SSD volumes to SSD EC shards (same tier)
+ ec.encode -collection=mybucket -sourceDiskType=ssd -diskType=ssd
+
+ # Encode SSD volumes to HDD EC shards (tier migration to cheaper storage)
+ ec.encode -collection=mybucket -sourceDiskType=ssd -diskType=hdd
+
+ # Encode all volumes to SSD EC shards
+ ec.encode -collection=mybucket -diskType=ssd
Re-balancing algorithm:
` + ecBalanceAlgorithmDescription
@@ -80,6 +92,8 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
maxParallelization := encodeCommand.Int("maxParallelization", DefaultMaxParallelization, "run up to X tasks in parallel, whenever possible")
forceChanges := encodeCommand.Bool("force", false, "force the encoding even if the cluster has less than recommended 4 nodes")
shardReplicaPlacement := encodeCommand.String("shardReplicaPlacement", "", "replica placement for EC shards, or master default if empty")
+ sourceDiskTypeStr := encodeCommand.String("sourceDiskType", "", "filter source volumes by disk type (hdd, ssd, or empty for all)")
+ diskTypeStr := encodeCommand.String("diskType", "", "target disk type for EC shards (hdd, ssd, or empty for default hdd)")
applyBalancing := encodeCommand.Bool("rebalance", false, "re-balance EC shards after creation")
verbose := encodeCommand.Bool("verbose", false, "show detailed reasons why volumes are not selected for encoding")
@@ -94,6 +108,16 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return err
}
+ // Parse source disk type filter (optional)
+ var sourceDiskType *types.DiskType
+ if *sourceDiskTypeStr != "" {
+ sdt := types.ToDiskType(*sourceDiskTypeStr)
+ sourceDiskType = &sdt
+ }
+
+ // Parse target disk type for EC shards
+ diskType := types.ToDiskType(*diskTypeStr)
+
// collect topology information
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
if err != nil {
@@ -119,7 +143,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
balanceCollections = collectCollectionsForVolumeIds(topologyInfo, volumeIds)
} else {
// apply to all volumes for the given collection pattern (regex)
- volumeIds, balanceCollections, err = collectVolumeIdsForEcEncode(commandEnv, *collection, nil, *fullPercentage, *quietPeriod, *verbose)
+ volumeIds, balanceCollections, err = collectVolumeIdsForEcEncode(commandEnv, *collection, sourceDiskType, *fullPercentage, *quietPeriod, *verbose)
if err != nil {
return err
}
@@ -142,7 +166,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return fmt.Errorf("ec encode for volumes %v: %w", volumeIds, err)
}
// ...re-balance ec shards...
- if err := EcBalance(commandEnv, balanceCollections, "", rp, *maxParallelization, *applyBalancing); err != nil {
+ if err := EcBalance(commandEnv, balanceCollections, "", rp, diskType, *maxParallelization, *applyBalancing); err != nil {
return fmt.Errorf("re-balance ec shards for collection(s) %v: %w", balanceCollections, err)
}
// ...then delete original volumes using pre-collected locations.