aboutsummaryrefslogtreecommitdiff
path: root/weed/command
diff options
context:
space:
mode:
authorbingoohuang <bingoo.huang@gmail.com>2021-02-18 13:57:34 +0800
committerGitHub <noreply@github.com>2021-02-18 13:57:34 +0800
commitc8f56f5712c1efffc46de95a8057ed09c21da2db (patch)
treebc3330e274901d782395b7396cb54d7cc42608b1 /weed/command
parent12a78335860c4b1e220748e4adc4097050af5272 (diff)
parent3575d41009e4367658e75e6ae780c6260b80daf9 (diff)
downloadseaweedfs-c8f56f5712c1efffc46de95a8057ed09c21da2db.tar.xz
seaweedfs-c8f56f5712c1efffc46de95a8057ed09c21da2db.zip
Merge pull request #2 from chrislusf/master
Diffstat (limited to 'weed/command')
-rw-r--r--weed/command/backup.go2
-rw-r--r--weed/command/benchmark.go9
-rw-r--r--weed/command/download.go12
-rw-r--r--weed/command/filer_copy.go8
-rw-r--r--weed/command/filer_sync.go18
-rw-r--r--weed/command/mount.go2
-rw-r--r--weed/command/mount_std.go4
-rw-r--r--weed/command/scaffold.go14
-rw-r--r--weed/command/server.go1
-rw-r--r--weed/command/upload.go6
-rw-r--r--weed/command/volume.go20
11 files changed, 68 insertions, 28 deletions
diff --git a/weed/command/backup.go b/weed/command/backup.go
index 4c37c2763..207df770b 100644
--- a/weed/command/backup.go
+++ b/weed/command/backup.go
@@ -72,7 +72,7 @@ func runBackup(cmd *Command, args []string) bool {
vid := needle.VolumeId(*s.volumeId)
// find volume location, replication, ttl info
- lookup, err := operation.Lookup(*s.master, vid.String())
+ lookup, err := operation.Lookup(func() string { return *s.master }, vid.String())
if err != nil {
fmt.Printf("Error looking up volume %d: %v\n", vid, err)
return true
diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go
index 080312aa8..e1b6d8d6c 100644
--- a/weed/command/benchmark.go
+++ b/weed/command/benchmark.go
@@ -35,6 +35,7 @@ type BenchmarkOptions struct {
sequentialRead *bool
collection *string
replication *string
+ diskType *string
cpuprofile *string
maxCpu *int
grpcDialOption grpc.DialOption
@@ -62,6 +63,7 @@ func init() {
b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file")
b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection")
b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type")
+ b.diskType = cmdBenchmark.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file")
b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write")
@@ -234,13 +236,14 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
Count: 1,
Collection: *b.collection,
Replication: *b.replication,
+ DiskType: *b.diskType,
}
- if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil {
+ if assignResult, err := operation.Assign(b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil {
fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection
if !isSecure && assignResult.Auth != "" {
isSecure = true
}
- if _, err := fp.Upload(0, b.masterClient.GetMaster(), false, assignResult.Auth, b.grpcDialOption); err == nil {
+ if _, err := fp.Upload(0, b.masterClient.GetMaster, false, assignResult.Auth, b.grpcDialOption); err == nil {
if random.Intn(100) < *b.deletePercentage {
s.total++
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
@@ -290,7 +293,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
}
var bytes []byte
for _, url := range urls {
- bytes, _, err = util.Get(url)
+ bytes, _, err = util.FastGet(url)
if err == nil {
break
}
diff --git a/weed/command/download.go b/weed/command/download.go
index f7588fbf0..7bbff9448 100644
--- a/weed/command/download.go
+++ b/weed/command/download.go
@@ -44,15 +44,15 @@ var cmdDownload = &Command{
func runDownload(cmd *Command, args []string) bool {
for _, fid := range args {
- if e := downloadToFile(*d.server, fid, util.ResolvePath(*d.dir)); e != nil {
+ if e := downloadToFile(func() string { return *d.server }, fid, util.ResolvePath(*d.dir)); e != nil {
fmt.Println("Download Error: ", fid, e)
}
}
return true
}
-func downloadToFile(server, fileId, saveDir string) error {
- fileUrl, lookupError := operation.LookupFileId(server, fileId)
+func downloadToFile(masterFn operation.GetMasterFn, fileId, saveDir string) error {
+ fileUrl, lookupError := operation.LookupFileId(masterFn, fileId)
if lookupError != nil {
return lookupError
}
@@ -83,7 +83,7 @@ func downloadToFile(server, fileId, saveDir string) error {
fids := strings.Split(string(content), "\n")
for _, partId := range fids {
var n int
- _, part, err := fetchContent(*d.server, partId)
+ _, part, err := fetchContent(masterFn, partId)
if err == nil {
n, err = f.Write(part)
}
@@ -103,8 +103,8 @@ func downloadToFile(server, fileId, saveDir string) error {
return nil
}
-func fetchContent(server string, fileId string) (filename string, content []byte, e error) {
- fileUrl, lookupError := operation.LookupFileId(server, fileId)
+func fetchContent(masterFn operation.GetMasterFn, fileId string) (filename string, content []byte, e error) {
+ fileUrl, lookupError := operation.LookupFileId(masterFn, fileId)
if lookupError != nil {
return "", nil, lookupError
}
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index b95df696c..bf64e72b3 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -37,6 +37,7 @@ type CopyOptions struct {
replication *string
collection *string
ttl *string
+ diskType *string
maxMB *int
masterClient *wdclient.MasterClient
concurrenctFiles *int
@@ -54,6 +55,7 @@ func init() {
copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
+ copy.diskType = cmdCopy.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit")
copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
@@ -311,6 +313,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: worker.options.ttlSec,
+ DiskType: *worker.options.diskType,
Path: task.destinationUrlPath,
}
@@ -405,6 +408,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: worker.options.ttlSec,
+ DiskType: *worker.options.diskType,
Path: task.destinationUrlPath + fileName,
}
@@ -459,7 +463,9 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
for _, chunk := range chunks {
fileIds = append(fileIds, chunk.FileId)
}
- operation.DeleteFiles(copy.masters[0], false, worker.options.grpcDialOption, fileIds)
+ operation.DeleteFiles(func() string {
+ return copy.masters[0]
+ }, false, worker.options.grpcDialOption, fileIds)
return uploadError
}
diff --git a/weed/command/filer_sync.go b/weed/command/filer_sync.go
index 9a23fd731..725f7d485 100644
--- a/weed/command/filer_sync.go
+++ b/weed/command/filer_sync.go
@@ -31,6 +31,8 @@ type SyncOptions struct {
bCollection *string
aTtlSec *int
bTtlSec *int
+ aDiskType *string
+ bDiskType *string
aDebug *bool
bDebug *bool
aProxyByFiler *bool
@@ -56,6 +58,8 @@ func init() {
syncOptions.bCollection = cmdFilerSynchronize.Flag.String("b.collection", "", "collection on filer B")
syncOptions.aTtlSec = cmdFilerSynchronize.Flag.Int("a.ttlSec", 0, "ttl in seconds on filer A")
syncOptions.bTtlSec = cmdFilerSynchronize.Flag.Int("b.ttlSec", 0, "ttl in seconds on filer B")
+ syncOptions.aDiskType = cmdFilerSynchronize.Flag.String("a.disk", "", "[hdd|ssd] hard drive or solid state drive on filer A")
+ syncOptions.bDiskType = cmdFilerSynchronize.Flag.String("b.disk", "", "[hdd|ssd] hard drive or solid state drive on filer B")
syncOptions.aProxyByFiler = cmdFilerSynchronize.Flag.Bool("a.filerProxy", false, "read and write file chunks by filer A instead of volume servers")
syncOptions.bProxyByFiler = cmdFilerSynchronize.Flag.Bool("b.filerProxy", false, "read and write file chunks by filer B instead of volume servers")
syncOptions.aDebug = cmdFilerSynchronize.Flag.Bool("a.debug", false, "debug mode to print out filer A received files")
@@ -90,9 +94,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
go func() {
for {
- err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler,
- *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler,
- *syncOptions.bDebug)
+ err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB,
+ *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug)
if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
time.Sleep(1747 * time.Millisecond)
@@ -103,9 +106,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
if !*syncOptions.isActivePassive {
go func() {
for {
- err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler,
- *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler,
- *syncOptions.aDebug)
+ err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA,
+ *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug)
if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
time.Sleep(2147 * time.Millisecond)
@@ -120,7 +122,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
}
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string,
- replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler, debug bool) error {
+ replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error {
// read source filer signature
sourceFilerSignature, sourceErr := replication.ReadFilerSignature(grpcDialOption, sourceFiler)
@@ -146,7 +148,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
filerSource := &source.FilerSource{}
filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler)
filerSink := &filersink.FilerSink{}
- filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, grpcDialOption, sinkWriteChunkByFiler)
+ filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
filerSink.SetSourceFiler(filerSource)
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
diff --git a/weed/command/mount.go b/weed/command/mount.go
index fce3eba22..aa6d91740 100644
--- a/weed/command/mount.go
+++ b/weed/command/mount.go
@@ -12,6 +12,7 @@ type MountOptions struct {
dirAutoCreate *bool
collection *string
replication *string
+ diskType *string
ttlSec *int
chunkSizeLimitMB *int
concurrentWriters *int
@@ -41,6 +42,7 @@ func init() {
mountOptions.dirAutoCreate = cmdMount.Flag.Bool("dirAutoCreate", false, "auto create the directory to mount to")
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
+ mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files")
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 128, "limit concurrent goroutine writers if not 0")
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 83ae870e6..a6d562d40 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -5,6 +5,7 @@ package command
import (
"context"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"os"
"os/user"
"path"
@@ -168,6 +169,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
mountRoot = mountRoot[0 : len(mountRoot)-1]
}
+ diskType := types.ToDiskType(*option.diskType)
+
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
MountDirectory: dir,
FilerAddress: filer,
@@ -177,6 +180,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
Collection: *option.collection,
Replication: *option.replication,
TtlSec: int32(*option.ttlSec),
+ DiskType: diskType,
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
ConcurrentWriters: *option.concurrentWriters,
CacheDir: *option.cacheDir,
diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go
index 58143a0ad..993391a42 100644
--- a/weed/command/scaffold.go
+++ b/weed/command/scaffold.go
@@ -124,11 +124,11 @@ interpolateParams = false
[mysql2] # or memsql, tidb
enabled = false
createTable = """
- CREATE TABLE IF NOT EXISTS %s (
- dirhash BIGINT,
- name VARCHAR(1000),
- directory TEXT,
- meta LONGBLOB,
+ CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
+ dirhash BIGINT,
+ name VARCHAR(1000),
+ directory TEXT,
+ meta LONGBLOB,
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
"""
@@ -160,11 +160,12 @@ schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
+connection_max_lifetime_seconds = 0
[postgres2]
enabled = false
createTable = """
- CREATE TABLE IF NOT EXISTS %s (
+ CREATE TABLE IF NOT EXISTS "%s" (
dirhash BIGINT,
name VARCHAR(65535),
directory VARCHAR(65535),
@@ -181,6 +182,7 @@ schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
+connection_max_lifetime_seconds = 0
[cassandra]
# CREATE TABLE filemeta (
diff --git a/weed/command/server.go b/weed/command/server.go
index 206de34d1..61cac2cc7 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -102,6 +102,7 @@ func init() {
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
+ serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd] hard drive or solid state drive")
serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.")
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
diff --git a/weed/command/upload.go b/weed/command/upload.go
index 7115da587..149d71241 100644
--- a/weed/command/upload.go
+++ b/weed/command/upload.go
@@ -27,6 +27,7 @@ type UploadOptions struct {
collection *string
dataCenter *string
ttl *string
+ diskType *string
maxMB *int
usePublicUrl *bool
}
@@ -40,6 +41,7 @@ func init() {
upload.replication = cmdUpload.Flag.String("replication", "", "replication type")
upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name")
upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name")
+ upload.diskType = cmdUpload.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit")
upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server")
@@ -94,7 +96,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
return e
}
- results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
+ results, e := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
if e != nil {
@@ -111,7 +113,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
fmt.Println(e.Error())
}
- results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
+ results, _ := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
}
diff --git a/weed/command/volume.go b/weed/command/volume.go
index 9597e843a..ff951afdc 100644
--- a/weed/command/volume.go
+++ b/weed/command/volume.go
@@ -2,6 +2,7 @@ package command
import (
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"net/http"
httppprof "net/http/pprof"
"os"
@@ -49,6 +50,7 @@ type VolumeServerOptions struct {
rack *string
whiteList []string
indexType *string
+ diskType *string
fixJpgOrientation *bool
readRedirect *bool
cpuProfile *string
@@ -76,6 +78,7 @@ func init() {
v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name")
v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name")
v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
+ v.diskType = cmdVolume.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.")
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
@@ -167,6 +170,21 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
}
+ // set disk types
+ var diskTypes []types.DiskType
+ diskTypeStrings := strings.Split(*v.diskType, ",")
+ for _, diskTypeString := range diskTypeStrings {
+ diskTypes = append(diskTypes, types.ToDiskType(diskTypeString))
+ }
+ if len(diskTypes) == 1 && len(v.folders) > 1 {
+ for i := 0; i < len(v.folders)-1; i++ {
+ diskTypes = append(diskTypes, diskTypes[0])
+ }
+ }
+ if len(v.folders) != len(diskTypes) {
+ glog.Fatalf("%d directories by -dir, but only %d disk types is set by -disk", len(v.folders), len(diskTypes))
+ }
+
// security related white list configuration
if volumeWhiteListOption != "" {
v.whiteList = strings.Split(volumeWhiteListOption, ",")
@@ -212,7 +230,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
*v.ip, *v.port, *v.publicUrl,
- v.folders, v.folderMaxLimits, v.minFreeSpacePercents,
+ v.folders, v.folderMaxLimits, v.minFreeSpacePercents, diskTypes,
*v.idxFolder,
volumeNeedleMapKind,
strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,