aboutsummaryrefslogtreecommitdiff
path: root/deploy
diff options
context:
space:
mode:
authorMateusz Kondej <mk@pop-os.localdomain>2022-04-17 04:26:17 +0200
committerMateusz Kondej <mateuszkj@gmail.com>2022-04-18 20:31:22 +0200
commit8c70bb5b9f6e1de2b447ba40e62ab240f00e383d (patch)
treef8d6a191db3136efb044c4b1927621cc89d94fb3 /deploy
parentc2ec139c1c34f7d6653b7094fdb97c8117e1c1be (diff)
downloadseaweedfs-csi-driver-8c70bb5b9f6e1de2b447ba40e62ab240f00e383d.tar.xz
seaweedfs-csi-driver-8c70bb5b9f6e1de2b447ba40e62ab240f00e383d.zip
Hashicorp Nomad deploy example
Diffstat (limited to 'deploy')
-rw-r--r--deploy/nomad/README.md38
-rw-r--r--deploy/nomad/example-seaweedfs-app.hcl32
-rw-r--r--deploy/nomad/example-seaweedfs-volume.hcl26
-rw-r--r--deploy/nomad/seaweedfs-csi.hcl52
-rw-r--r--deploy/nomad/seaweedfs.hcl317
5 files changed, 465 insertions, 0 deletions
diff --git a/deploy/nomad/README.md b/deploy/nomad/README.md
new file mode 100644
index 0000000..65030c3
--- /dev/null
+++ b/deploy/nomad/README.md
@@ -0,0 +1,38 @@
+# Example of using seaweedfs with HashiCorp Nomad
+
+
+## Running seaweedfs cluster
+
+You can skip this part if you have already running seaweedfs.
+
+Assumptions:
+ - Running Nomad cluster
+ - At least 3 nodes with static IP addresses
+ - Enabled memroy oversuscription (https://learn.hashicorp.com/tutorials/nomad/memory-oversubscription?in=nomad%2Fadvanced-scheduling)
+ - Running PostgreSQL instance for filer
+
+```shell
+export NOMAD_ADDR=http://nomad.service.consul:4646
+
+nomad run seaweedfs.hcl
+```
+
+Seaweedfs master will be available on http://seaweedfs-master.service.consul:9333/
+
+Seaweedfs filer will be available on http://seaweedfs-filer.service.consul:8888/
+
+
+## Running CSI
+
+```shell
+export NOMAD_ADDR=http://nomad.service.consul:4646
+
+# Start CSI plugin
+nomad run seaweedfs-csi.hcl
+
+# Create volume
+nomad volume create example-seaweedfs-volume.hcl
+
+# Start sample app
+nomad run example-seaweedfs-app.hcl
+```
diff --git a/deploy/nomad/example-seaweedfs-app.hcl b/deploy/nomad/example-seaweedfs-app.hcl
new file mode 100644
index 0000000..398e557
--- /dev/null
+++ b/deploy/nomad/example-seaweedfs-app.hcl
@@ -0,0 +1,32 @@
+job "example-seaweedfs-app" {
+ datacenters = ["dc1"]
+
+ group "apps" {
+ volume "example-seaweedfs-volume" {
+ type = "csi"
+ source = "example-seaweedfs-volume"
+ access_mode = "multi-node-multi-writer"
+ attachment_mode = "file-system"
+ }
+
+ task "sample" {
+ driver = "docker"
+
+ config {
+ image = "kadalu/sample-pv-check-app:latest"
+ force_pull = false
+
+ entrypoint = [
+ "tail",
+ "-f",
+ "/dev/null",
+ ]
+ }
+
+ volume_mount {
+ volume = "example-seaweedfs-volume"
+ destination = "/mnt/pv"
+ }
+ }
+ }
+}
diff --git a/deploy/nomad/example-seaweedfs-volume.hcl b/deploy/nomad/example-seaweedfs-volume.hcl
new file mode 100644
index 0000000..3a5fb66
--- /dev/null
+++ b/deploy/nomad/example-seaweedfs-volume.hcl
@@ -0,0 +1,26 @@
+id = "example-seaweedfs-volume"
+name = "example-seaweedfs-volume"
+type = "csi"
+plugin_id = "seaweedfs"
+
+capacity_min = "256GiB"
+capacity_max = "512GiB"
+
+capability {
+ access_mode = "multi-node-multi-writer"
+ attachment_mode = "file-system"
+}
+
+# Optional: for 'nomad volume create', specify mount options to validate for
+# 'attachment_mode = "file-system". Registering an existing volume will record
+# but ignore these fields.
+mount_options {
+ mount_flags = ["rw"]
+}
+
+parameters {
+ # Available options: https://github.com/seaweedfs/seaweedfs-csi-driver/blob/master/pkg/driver/mounter_seaweedfs.go
+ collection = "example"
+ replication = "000"
+ path = "/buckets/example"
+}
diff --git a/deploy/nomad/seaweedfs-csi.hcl b/deploy/nomad/seaweedfs-csi.hcl
new file mode 100644
index 0000000..3ad09f7
--- /dev/null
+++ b/deploy/nomad/seaweedfs-csi.hcl
@@ -0,0 +1,52 @@
+job "seaweedfs-csi" {
+ datacenters = ["dc1"]
+
+ type = "system"
+
+ update {
+ max_parallel = 1
+ stagger = "60s"
+ }
+
+ group "nodes" {
+
+ ephemeral_disk {
+ migrate = false
+ size = 10240
+ sticky = false
+ }
+
+ task "plugin" {
+ driver = "docker"
+
+ config {
+ image = "chrislusf/seaweedfs-csi-driver:latest"
+ force_pull = "true"
+ network_mode = "host"
+
+ args = [
+ "--endpoint=unix://csi/csi.sock",
+ "--filer=seaweedfs-filer.service.consul:8888",
+ "--nodeid=${node.unique.name}",
+ "--cacheCapacityMB=256",
+ "--cacheDir=${NOMAD_TASK_DIR}/cache_dir",
+ ]
+
+ privileged = true
+ }
+
+ csi_plugin {
+ id = "seaweedfs"
+ type = "monolith"
+ mount_dir = "/csi"
+ }
+
+ resources {
+ cpu = 512
+ memory = 1024
+ memory_max = 3072 # W need to have memory oversubscription enabled
+ }
+ }
+ }
+}
+
diff --git a/deploy/nomad/seaweedfs.hcl b/deploy/nomad/seaweedfs.hcl
new file mode 100644
index 0000000..999cfd8
--- /dev/null
+++ b/deploy/nomad/seaweedfs.hcl
@@ -0,0 +1,317 @@
+# WWW naster: http://seaweedfs-master.service.consul:9333/
+# WWW filer: http://seaweedfs-filer.service.consul:8888/
+
+job "seaweedfs" {
+ datacenters = ["dc1"]
+ type = "service"
+
+ group "seaweedfs-master" {
+ count = 3
+
+ constraint {
+ attribute = "${attr.unique.hostname}"
+ operator = "regexp"
+ # We need static IPs for master servers
+ # dc1-n1 - 172.21.100.51
+ # dc1-n2 - 172.21.100.52
+ # dc1-n3 - 172.21.100.53
+ value = "^dc1-n1|dc1-n2|dc1-n3$"
+ }
+
+ constraint {
+ operator = "distinct_hosts"
+ value = "true"
+ }
+
+ restart {
+ attempts = 10
+ interval = "5m"
+ delay = "25s"
+ mode = "delay"
+ }
+
+ update {
+ max_parallel = 1
+ stagger = "5m"
+ canary = 0
+ }
+
+ migrate {
+ min_healthy_time = "2m"
+ }
+
+ network {
+ port "http" {
+ static = 9333
+ }
+ port "grpc" {
+ static = 19333
+ }
+ }
+
+ task "seaweedfs-master" {
+ driver = "docker"
+ env {
+ WEED_MASTER_VOLUME_GROWTH_COPY_1 = "1"
+ WEED_MASTER_VOLUME_GROWTH_COPY_2 = "2"
+ WEED_MASTER_VOLUME_GROWTH_COPY_OTHER = "1"
+ }
+ config {
+ image = "chrislusf/seaweedfs:latest"
+ force_pull = "true"
+ network_mode = "host"
+ args = [
+ "-v=1", "master",
+ "-volumeSizeLimitMB=100",
+ "-resumeState=false",
+ "-ip=${NOMAD_IP_http}",
+ "-port=${NOMAD_PORT_http}",
+ "-peers=172.21.100.51:${NOMAD_PORT_http},172.21.100.52:${NOMAD_PORT_http},172.21.100.53:${NOMAD_PORT_http}",
+ "-mdir=${NOMAD_TASK_DIR}/master"
+ ]
+ }
+
+ resources {
+ cpu = 128
+ memory = 128
+ }
+
+ service {
+ tags = ["${node.unique.name}"]
+ name = "seaweedfs-master"
+ port = "http"
+ check {
+ type = "tcp"
+ port = "http"
+ interval = "10s"
+ timeout = "2s"
+ }
+ }
+ }
+ }
+
+
+
+
+ group "seaweedfs-volume" {
+ count = 3
+
+ constraint {
+ attribute = "${attr.unique.hostname}"
+ operator = "regexp"
+ # We want to store data on predictive servers
+ value = "^dc1-n1|dc1-n2|dc1-n3$"
+ }
+
+ constraint {
+ operator = "distinct_hosts"
+ value = "true"
+ }
+
+ restart {
+ attempts = 10
+ interval = "5m"
+ delay = "25s"
+ mode = "delay"
+ }
+
+ update {
+ max_parallel = 1
+ stagger = "2m"
+ }
+
+ migrate {
+ min_healthy_time = "2m"
+ }
+
+ network {
+ port "http" {
+ static = 8082
+ }
+ port "grpc" {
+ static = 18082
+ }
+ }
+
+ task "seaweedfs-volume" {
+ driver = "docker"
+ user = "1000:1000"
+
+ config {
+ image = "chrislusf/seaweedfs:latest"
+ force_pull = "true"
+ network_mode = "host"
+ args = [
+ "volume",
+ "-dataCenter=${NOMAD_DC}",
+# "-rack=${meta.rack}",
+ "-rack=${node.unique.name}",
+ "-mserver=seaweedfs-master.service.consul:9333",
+ "-port=${NOMAD_PORT_http}",
+ "-ip=${NOMAD_IP_http}",
+ "-publicUrl=${NOMAD_ADDR_http}",
+ "-preStopSeconds=1",
+ "-dir=/data"
+ ]
+
+ mounts = [
+ {
+ type = "bind"
+ source = "/data/seaweedfs-volume-data" # there should be directory in host VM
+ target = "/data"
+ readonly = false
+ bind_options = {
+ propagation = "rprivate"
+ }
+ }
+ ]
+ }
+
+ resources {
+ cpu = 512
+ memory = 2048
+ memory_max = 4096 # W need to have memory oversubscription enabled
+ }
+
+ service {
+ tags = ["${node.unique.name}"]
+ name = "seaweedfs-volume"
+ port = "http"
+ check {
+ type = "tcp"
+ port = "http"
+ interval = "10s"
+ timeout = "2s"
+ }
+ }
+ }
+ }
+
+
+ group "seaweedfs-filer" {
+ count = 1
+
+ constraint {
+ operator = "distinct_hosts"
+ value = "true"
+ }
+
+ restart {
+ attempts = 10
+ interval = "5m"
+ delay = "25s"
+ mode = "delay"
+ }
+
+ migrate {
+ min_healthy_time = "2m"
+ }
+
+ network {
+ port "http" {
+ static = 8888
+ }
+ port "grpc" {
+ static = 18888
+ }
+ port "s3" {
+ static = 8333
+ }
+ }
+
+ task "seaweedfs-filer" {
+ driver = "docker"
+ user = "1000:1000"
+
+ config {
+ image = "chrislusf/seaweedfs:latest"
+ force_pull = "true"
+ network_mode = "host"
+ args = [
+ "filer",
+ "-dataCenter=${NOMAD_DC}",
+# "-rack=${meta.rack}",
+ "-rack=${node.unique.name}",
+ "-defaultReplicaPlacement=000",
+ "-master=seaweedfs-master.service.consul:9333",
+ "-s3",
+ "-ip=${NOMAD_IP_http}",
+ "-port=${NOMAD_PORT_http}",
+ "-s3.port=${NOMAD_PORT_s3}"
+ ]
+ mounts = [
+ {
+ type = "bind"
+ source = "local/filer.toml"
+ target = "/etc/seaweedfs/filer.toml"
+ }
+ ]
+
+ }
+
+ template {
+ destination = "local/filer.toml"
+ change_mode = "restart"
+ data = <<EOH
+[postgres2]
+enabled = true
+createTable = """
+ CREATE TABLE IF NOT EXISTS "%s" (
+ dirhash BIGINT,
+ name VARCHAR(65535),
+ directory VARCHAR(65535),
+ meta bytea,
+ PRIMARY KEY (dirhash, name)
+ );
+"""
+hostname = "172.21.100.54"
+port = 5432
+username = "seaweedfs"
+password = "pass1234567"
+database = "seaweedfs"
+schema = ""
+sslmode = "disable"
+connection_max_idle = 100
+connection_max_open = 100
+connection_max_lifetime_seconds = 0
+enableUpsert = true
+upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
+
+# ssh ubuntu@172.21.100.54
+# sudo -u postgres psql -c "CREATE ROLE seaweedfs WITH PASSWORD 'pass1234567';"
+# sudo -u postgres psql -c "CREATE DATABASE seaweedfs OWNER seaweedfs;"
+EOH
+ }
+
+ resources {
+ cpu = 512
+ memory = 256
+ }
+
+ service {
+ tags = ["${node.unique.name}"]
+ name = "seaweedfs-filer"
+ port = "http"
+ check {
+ type = "tcp"
+ port = "http"
+ interval = "10s"
+ timeout = "2s"
+ }
+ }
+
+ service {
+ tags = ["${node.unique.name}"]
+ name = "seaweedfs-s3"
+ port = "s3"
+ check {
+ type = "tcp"
+ port = "s3"
+ interval = "10s"
+ timeout = "2s"
+ }
+ }
+ }
+ }
+
+}