aboutsummaryrefslogtreecommitdiff
path: root/weed/command/scaffold.go
diff options
context:
space:
mode:
Diffstat (limited to 'weed/command/scaffold.go')
-rw-r--r--weed/command/scaffold.go261
1 files changed, 222 insertions, 39 deletions
diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go
index 95ddbd57c..cb20adc72 100644
--- a/weed/command/scaffold.go
+++ b/weed/command/scaffold.go
@@ -10,16 +10,24 @@ func init() {
}
var cmdScaffold = &Command{
- UsageLine: "scaffold [filer]",
+ UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
Short: "generate basic configuration files",
Long: `Generate filer.toml with all possible configurations for you to customize.
+ The options can also be overwritten by environment variables.
+ For example, the filer.toml mysql password can be overwritten by environment variable
+ export WEED_MYSQL_PASSWORD=some_password
+ Environment variable rules:
+ * Prefix fix with "WEED_"
+ * Upppercase the reset of variable name.
+ * Replace '.' with '_'
+
`,
}
var (
outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
- config = cmdScaffold.Flag.String("config", "filer", "[filer|replication] the configuration file to generate")
+ config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
)
func runScaffold(cmd *Command, args []string) bool {
@@ -28,8 +36,14 @@ func runScaffold(cmd *Command, args []string) bool {
switch *config {
case "filer":
content = FILER_TOML_EXAMPLE
+ case "notification":
+ content = NOTIFICATION_TOML_EXAMPLE
case "replication":
content = REPLICATION_TOML_EXAMPLE
+ case "security":
+ content = SECURITY_TOML_EXAMPLE
+ case "master":
+ content = MASTER_TOML_EXAMPLE
}
if content == "" {
println("need a valid -config option")
@@ -37,7 +51,7 @@ func runScaffold(cmd *Command, args []string) bool {
}
if *outputPath != "" {
- ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0x755)
+ ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
} else {
println(content)
}
@@ -53,27 +67,37 @@ const (
# $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml
-[memory]
-# local in memory, mostly for testing purpose
-enabled = false
+####################################################
+# Customizable filer server options
+####################################################
+[filer.options]
+# with http DELETE, by default the filer would check whether a folder is empty.
+# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
+recursive_delete = false
+# directories under this folder will be automatically creating a separate bucket
+buckets_folder = "/buckets"
+# directories under this folder will be store message queue data
+queues_folder = "/queues"
-[leveldb]
+####################################################
+# The following are filer store options
+####################################################
+
+[leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable
+# faster than previous leveldb, recommended.
enabled = true
dir = "." # directory to store level db files
-####################################################
-# multiple filers on shared storage, fairly scalable
-####################################################
-
-[mysql]
+[mysql] # or tidb
# CREATE TABLE IF NOT EXISTS filemeta (
-# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
-# name VARCHAR(1000) COMMENT 'directory or file name',
-# directory VARCHAR(4096) COMMENT 'full path to parent directory',
-# meta BLOB,
+# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
+# name VARCHAR(1000) COMMENT 'directory or file name',
+# directory TEXT COMMENT 'full path to parent directory',
+# meta LONGBLOB,
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
+
enabled = false
hostname = "localhost"
port = 3306
@@ -82,12 +106,13 @@ password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
+interpolateParams = false
-[postgres]
+[postgres] # or cockroachdb
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
-# name VARCHAR(1000),
-# directory VARCHAR(4096),
+# name VARCHAR(65535),
+# directory VARCHAR(65535),
# meta bytea,
# PRIMARY KEY (dirhash, name)
# );
@@ -118,7 +143,7 @@ hosts=[
enabled = false
address = "localhost:6379"
password = ""
-db = 0
+database = 0
[redis_cluster]
enabled = false
@@ -130,23 +155,75 @@ addresses = [
"localhost:30005",
"localhost:30006",
]
+password = ""
+# allows reads from slave servers or the master, but all writes still go to the master
+readOnly = true
+# automatically use the closest Redis server for reads
+routeByLatency = true
+
+[etcd]
+enabled = false
+servers = "localhost:2379"
+timeout = "3s"
+`
+
+ NOTIFICATION_TOML_EXAMPLE = `
+# A sample TOML config file for SeaweedFS filer store
+# Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
+# Put this file to one of the location, with descending priority
+# ./notification.toml
+# $HOME/.seaweedfs/notification.toml
+# /etc/seaweedfs/notification.toml
####################################################
# notification
-# sends filer updates for each file to an external message queue
+# send and receive filer updates for each file to an external message queue
####################################################
[notification.log]
+# this is only for debugging perpose and does not work with "weed filer.replicate"
enabled = false
+
[notification.kafka]
enabled = false
hosts = [
"localhost:9092"
]
topic = "seaweedfs_filer"
+offsetFile = "./last.offset"
+offsetSaveIntervalSeconds = 10
+
+
+[notification.aws_sqs]
+# experimental, let me know if it works
+enabled = false
+aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
+aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
+region = "us-east-2"
+sqs_queue_name = "my_filer_queue" # an existing queue name
+
+[notification.google_pub_sub]
+# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
+enabled = false
+google_application_credentials = "/path/to/x.json" # path to json credential file
+project_id = "" # an existing project id
+topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
+
+[notification.gocdk_pub_sub]
+# The Go Cloud Development Kit (https://gocloud.dev).
+# PubSub API (https://godoc.org/gocloud.dev/pubsub).
+# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
+enabled = false
+# This URL will Dial the RabbitMQ server at the URL in the environment
+# variable RABBIT_SERVER_URL and open the exchange "myexchange".
+# The exchange must have already been created by some other means, like
+# the RabbitMQ management plugin.
+topic_url = "rabbit://myexchange"
+sub_url = "rabbit://myqueue"
`
+
REPLICATION_TOML_EXAMPLE = `
# A sample TOML config file for replicating SeaweedFS filer
# Used with "weed filer.replicate"
@@ -158,34 +235,31 @@ topic = "seaweedfs_filer"
[source.filer]
enabled = true
grpcAddress = "localhost:18888"
-directory = "/buckets" # all files under this directory tree are replicated
-
-[notification.kafka]
-enabled = false
-hosts = [
- "localhost:9092"
-]
-topic = "seaweedfs_filer1_to_filer2"
-offsetFile = "./last.offset"
-offsetSaveIntervalSeconds = 10
+# all files under this directory tree are replicated.
+# this is not a directory on your hard drive, but on your filer.
+# i.e., all files with this "prefix" are sent to notification message queue.
+directory = "/buckets"
[sink.filer]
enabled = false
grpcAddress = "localhost:18888"
-directory = "/backup" # all replicated files are under this directory tree
+# all replicated files are under this directory tree
+# this is not a directory on your hard drive, but on your filer.
+# i.e., all received files will be "prefixed" to this directory.
+directory = "/backup"
replication = ""
collection = ""
ttlSec = 0
[sink.s3]
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
-# default loads credentials from the shared credentials file (~/.aws/credentials).
+# default loads credentials from the shared credentials file (~/.aws/credentials).
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
-directory = "" # destination directory (do not prefix or suffix with "/")
+directory = "/" # destination directory
[sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
@@ -200,15 +274,124 @@ enabled = false
account_name = ""
account_key = ""
container = "mycontainer" # an existing container
-directory = "" # destination directory (do not prefix or suffix with "/")
+directory = "/" # destination directory
[sink.backblaze]
-# experimental, let me know if it works
enabled = false
-account_id = ""
-account_key = ""
+b2_account_id = ""
+b2_master_application_key = ""
bucket = "mybucket" # an existing bucket
-directory = "" # destination directory (do not prefix or suffix with "/")
+directory = "/" # destination directory
+
+`
+
+ SECURITY_TOML_EXAMPLE = `
+# Put this file to one of the location, with descending priority
+# ./security.toml
+# $HOME/.seaweedfs/security.toml
+# /etc/seaweedfs/security.toml
+# this file is read by master, volume server, and filer
+
+# the jwt signing key is read by master and volume server.
+# a jwt defaults to expire after 10 seconds.
+[jwt.signing]
+key = ""
+expires_after_seconds = 10 # seconds
+
+# jwt for read is only supported with master+volume setup. Filer does not support this mode.
+[jwt.signing.read]
+key = ""
+expires_after_seconds = 10 # seconds
+
+# all grpc tls authentications are mutual
+# the values for the following ca, cert, and key are paths to the PERM files.
+# the host name is not checked, so the PERM files can be shared.
+[grpc]
+ca = ""
+
+[grpc.volume]
+cert = ""
+key = ""
+
+[grpc.master]
+cert = ""
+key = ""
+
+[grpc.filer]
+cert = ""
+key = ""
+
+[grpc.msg_broker]
+cert = ""
+key = ""
+
+# use this for any place needs a grpc client
+# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
+[grpc.client]
+cert = ""
+key = ""
+
+
+# volume server https options
+# Note: work in progress!
+# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
+[https.client]
+enabled = true
+[https.volume]
+cert = ""
+key = ""
+
+
+`
+
+ MASTER_TOML_EXAMPLE = `
+# Put this file to one of the location, with descending priority
+# ./master.toml
+# $HOME/.seaweedfs/master.toml
+# /etc/seaweedfs/master.toml
+# this file is read by master
+
+[master.maintenance]
+# periodically run these scripts are the same as running them from 'weed shell'
+scripts = """
+ ec.encode -fullPercent=95 -quietFor=1h
+ ec.rebuild -force
+ ec.balance -force
+ volume.balance -force
+"""
+sleep_minutes = 17 # sleep minutes between each script execution
+
+[master.filer]
+default_filer_url = "http://localhost:8888/"
+
+[master.sequencer]
+type = "memory" # Choose [memory|etcd] type for storing the file id sequence
+# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
+# example : http://127.0.0.1:2379,http://127.0.0.1:2389
+sequencer_etcd_urls = "http://127.0.0.1:2379"
+
+
+# configurations for tiered cloud storage
+# old volumes are transparently moved to cloud for cost efficiency
+[storage.backend]
+ [storage.backend.s3.default]
+ enabled = false
+ aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
+ aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
+ region = "us-east-2"
+ bucket = "your_bucket_name" # an existing bucket
+
+# create this number of logical volumes if no more writable volumes
+# count_x means how many copies of data.
+# e.g.:
+# 000 has only one copy, count_1
+# 010 and 001 has two copies, count_2
+# 011 has only 3 copies, count_3
+[master.volume_growth]
+count_1 = 7 # create 1 x 7 = 7 actual volumes
+count_2 = 6 # create 2 x 6 = 12 actual volumes
+count_3 = 3 # create 3 x 3 = 9 actual volumes
+count_other = 1 # create n x 1 = n actual volumes
`
)