aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKonstantin Lebedev <9497591+kmlebedev@users.noreply.github.com>2025-11-05 13:44:27 +0500
committerGitHub <noreply@github.com>2025-11-05 13:44:27 +0500
commit7c4179507810633f20d987fcf159a1c0412bc43d (patch)
tree65c3707fc8d97932405b681fe99eea2eedbd3e97
parentbbd7546cea2c553064e4ed7fa9d1470e0598cdd1 (diff)
parentcc444b186849cc4e476d539dd2643058a8160534 (diff)
downloadseaweedfs-7c4179507810633f20d987fcf159a1c0412bc43d.tar.xz
seaweedfs-7c4179507810633f20d987fcf159a1c0412bc43d.zip
Merge branch 'master' into allow_delete_objects_by_TTL
-rw-r--r--k8s/charts/seaweedfs/values.yaml72
-rw-r--r--weed/admin/static/css/admin.css120
-rw-r--r--weed/command/volume.go2
-rw-r--r--weed/server/constants/volume.go4
-rw-r--r--weed/server/volume_grpc_client_to_master.go4
-rw-r--r--weed/server/volume_server.go6
-rw-r--r--weed/shell/command_volume_check_disk.go7
-rw-r--r--weed/shell/command_volume_check_disk_test.go7
-rw-r--r--weed/topology/disk.go13
-rw-r--r--weed/topology/topology_test.go114
-rw-r--r--weed/topology/volume_growth.go4
11 files changed, 289 insertions, 64 deletions
diff --git a/k8s/charts/seaweedfs/values.yaml b/k8s/charts/seaweedfs/values.yaml
index 7961d9be4..1bfe5c72c 100644
--- a/k8s/charts/seaweedfs/values.yaml
+++ b/k8s/charts/seaweedfs/values.yaml
@@ -235,27 +235,27 @@ master:
ingress:
enabled: false
- className: "nginx"
+ className: ""
# host: false for "*" hostname
host: "master.seaweedfs.local"
path: "/sw-master/?(.*)"
pathType: ImplementationSpecific
- annotations:
- nginx.ingress.kubernetes.io/auth-type: "basic"
- nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
- nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
- nginx.ingress.kubernetes.io/service-upstream: "true"
- nginx.ingress.kubernetes.io/rewrite-target: /$1
- nginx.ingress.kubernetes.io/use-regex: "true"
- nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
- nginx.ingress.kubernetes.io/ssl-redirect: "false"
- nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
- nginx.ingress.kubernetes.io/configuration-snippet: |
- sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
- sub_filter '="/' '="./'; #make absolute paths to relative
- sub_filter '=/' '=./';
- sub_filter '/seaweedfsstatic' './seaweedfsstatic';
- sub_filter_once off;
+ annotations: {}
+ # nginx.ingress.kubernetes.io/auth-type: "basic"
+ # nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
+ # nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
+ # nginx.ingress.kubernetes.io/service-upstream: "true"
+ # nginx.ingress.kubernetes.io/rewrite-target: /$1
+ # nginx.ingress.kubernetes.io/use-regex: "true"
+ # nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
+ # nginx.ingress.kubernetes.io/ssl-redirect: "false"
+ # nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
+ # nginx.ingress.kubernetes.io/configuration-snippet: |
+ # sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
+ # sub_filter '="/' '="./'; #make absolute paths to relative
+ # sub_filter '=/' '=./';
+ # sub_filter '/seaweedfsstatic' './seaweedfsstatic';
+ # sub_filter_once off;
tls: []
extraEnvironmentVars:
@@ -769,28 +769,28 @@ filer:
ingress:
enabled: false
- className: "nginx"
+ className: ""
# host: false for "*" hostname
host: "seaweedfs.cluster.local"
path: "/sw-filer/?(.*)"
pathType: ImplementationSpecific
- annotations:
- nginx.ingress.kubernetes.io/backend-protocol: GRPC
- nginx.ingress.kubernetes.io/auth-type: "basic"
- nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
- nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
- nginx.ingress.kubernetes.io/service-upstream: "true"
- nginx.ingress.kubernetes.io/rewrite-target: /$1
- nginx.ingress.kubernetes.io/use-regex: "true"
- nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
- nginx.ingress.kubernetes.io/ssl-redirect: "false"
- nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
- nginx.ingress.kubernetes.io/configuration-snippet: |
- sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
- sub_filter '="/' '="./'; #make absolute paths to relative
- sub_filter '=/' '=./';
- sub_filter '/seaweedfsstatic' './seaweedfsstatic';
- sub_filter_once off;
+ annotations: {}
+ # nginx.ingress.kubernetes.io/backend-protocol: GRPC
+ # nginx.ingress.kubernetes.io/auth-type: "basic"
+ # nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
+ # nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
+ # nginx.ingress.kubernetes.io/service-upstream: "true"
+ # nginx.ingress.kubernetes.io/rewrite-target: /$1
+ # nginx.ingress.kubernetes.io/use-regex: "true"
+ # nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
+ # nginx.ingress.kubernetes.io/ssl-redirect: "false"
+ # nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
+ # nginx.ingress.kubernetes.io/configuration-snippet: |
+ # sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
+ # sub_filter '="/' '="./'; #make absolute paths to relative
+ # sub_filter '=/' '=./';
+ # sub_filter '/seaweedfsstatic' './seaweedfsstatic';
+ # sub_filter_once off;
# extraEnvVars is a list of extra environment variables to set with the stateful set.
extraEnvironmentVars:
@@ -1009,7 +1009,7 @@ s3:
ingress:
enabled: false
- className: "nginx"
+ className: ""
# host: false for "*" hostname
host: "seaweedfs.cluster.local"
path: "/"
diff --git a/weed/admin/static/css/admin.css b/weed/admin/static/css/admin.css
index a945d320e..8f387b1df 100644
--- a/weed/admin/static/css/admin.css
+++ b/weed/admin/static/css/admin.css
@@ -1,5 +1,14 @@
/* SeaweedFS Dashboard Custom Styles */
+/* Link colors - muted */
+a {
+ color: #5b7c99;
+}
+
+a:hover {
+ color: #4a6a88;
+}
+
/* Sidebar Styles */
.sidebar {
position: fixed;
@@ -23,11 +32,11 @@
}
.sidebar .nav-link:hover {
- color: #007bff;
+ color: #5b7c99;
}
.sidebar .nav-link.active {
- color: #007bff;
+ color: #5b7c99;
}
.sidebar .nav-link:hover .feather,
@@ -51,23 +60,23 @@ main {
/* Custom card styles */
.border-left-primary {
- border-left: 0.25rem solid #4e73df !important;
+ border-left: 0.25rem solid #6b8caf !important;
}
.border-left-success {
- border-left: 0.25rem solid #1cc88a !important;
+ border-left: 0.25rem solid #5a8a72 !important;
}
.border-left-info {
- border-left: 0.25rem solid #36b9cc !important;
+ border-left: 0.25rem solid #6a9aaa !important;
}
.border-left-warning {
- border-left: 0.25rem solid #f6c23e !important;
+ border-left: 0.25rem solid #b8995e !important;
}
.border-left-danger {
- border-left: 0.25rem solid #e74a3b !important;
+ border-left: 0.25rem solid #a5615c !important;
}
/* Status badges */
@@ -75,6 +84,89 @@ main {
font-size: 0.875em;
}
+/* Muted badge colors - override Bootstrap defaults */
+.badge.bg-primary,
+.bg-primary {
+ background-color: #6b8caf !important;
+}
+
+.badge.bg-success,
+.bg-success {
+ background-color: #5a8a72 !important;
+}
+
+.badge.bg-info,
+.bg-info {
+ background-color: #6a9aaa !important;
+}
+
+.badge.bg-warning,
+.bg-warning {
+ background-color: #b8995e !important;
+}
+
+.badge.bg-danger,
+.bg-danger {
+ background-color: #a5615c !important;
+}
+
+.badge.bg-secondary,
+.bg-secondary {
+ background-color: #7a7d85 !important;
+}
+
+/* Muted card background colors for text-bg-* utility classes */
+.text-bg-primary,
+.card.text-bg-primary {
+ background-color: #6b8caf !important;
+ color: #fff !important;
+}
+
+.text-bg-success,
+.card.text-bg-success {
+ background-color: #5a8a72 !important;
+ color: #fff !important;
+}
+
+.text-bg-info,
+.card.text-bg-info {
+ background-color: #6a9aaa !important;
+ color: #fff !important;
+}
+
+.text-bg-warning,
+.card.text-bg-warning {
+ background-color: #b8995e !important;
+ color: #fff !important;
+}
+
+.text-bg-danger,
+.card.text-bg-danger {
+ background-color: #a5615c !important;
+ color: #fff !important;
+}
+
+/* Muted text color utilities */
+.text-primary {
+ color: #6b8caf !important;
+}
+
+.text-success {
+ color: #5a8a72 !important;
+}
+
+.text-info {
+ color: #6a9aaa !important;
+}
+
+.text-warning {
+ color: #b8995e !important;
+}
+
+.text-danger {
+ color: #a5615c !important;
+}
+
/* Progress bars */
.progress {
background-color: #f8f9fc;
@@ -123,13 +215,13 @@ main {
/* Buttons */
.btn-primary {
- background-color: #4e73df;
- border-color: #4e73df;
+ background-color: #6b8caf;
+ border-color: #6b8caf;
}
.btn-primary:hover {
- background-color: #2e59d9;
- border-color: #2653d4;
+ background-color: #5b7c99;
+ border-color: #5b7c99;
}
/* Text utilities */
@@ -163,7 +255,7 @@ main {
/* Custom utilities */
.bg-gradient-primary {
- background: linear-gradient(180deg, #4e73df 10%, #224abe 100%);
+ background: linear-gradient(180deg, #6b8caf 10%, #5b7c99 100%);
}
.shadow {
@@ -184,11 +276,11 @@ main {
}
.nav-link[data-bs-toggle="collapse"]:not(.collapsed) {
- color: #007bff;
+ color: #5b7c99;
}
.nav-link[data-bs-toggle="collapse"]:not(.collapsed) .fa-chevron-down {
- color: #007bff;
+ color: #5b7c99;
}
/* Submenu styles */
diff --git a/weed/command/volume.go b/weed/command/volume.go
index 58dee0e52..cbd5bc676 100644
--- a/weed/command/volume.go
+++ b/weed/command/volume.go
@@ -258,7 +258,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
v.folders, v.folderMaxLimits, minFreeSpaces, diskTypes,
*v.idxFolder,
volumeNeedleMapKind,
- v.masters, constants.VolumePulseSeconds, *v.dataCenter, *v.rack,
+ v.masters, constants.VolumePulsePeriod, *v.dataCenter, *v.rack,
v.whiteList,
*v.fixJpgOrientation, *v.readMode,
*v.compactionMBPerSecond,
diff --git a/weed/server/constants/volume.go b/weed/server/constants/volume.go
index 77c7b7b47..a1287d118 100644
--- a/weed/server/constants/volume.go
+++ b/weed/server/constants/volume.go
@@ -1,5 +1,7 @@
package constants
+import "time"
+
const (
- VolumePulseSeconds = 5
+ VolumePulsePeriod = 5 * time.Second
)
diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go
index 2abde1bd9..9c2f8b213 100644
--- a/weed/server/volume_grpc_client_to_master.go
+++ b/weed/server/volume_grpc_client_to_master.go
@@ -68,7 +68,7 @@ func (vs *VolumeServer) heartbeat() {
master = newLeader
}
vs.store.MasterAddress = master
- newLeader, err = vs.doHeartbeatWithRetry(master, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second, duplicateRetryCount)
+ newLeader, err = vs.doHeartbeatWithRetry(master, grpcDialOption, vs.pulsePeriod, duplicateRetryCount)
if err != nil {
glog.V(0).Infof("heartbeat to %s error: %v", master, err)
@@ -81,7 +81,7 @@ func (vs *VolumeServer) heartbeat() {
} else {
// Regular error, reset duplicate retry count
duplicateRetryCount = 0
- time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
+ time.Sleep(vs.pulsePeriod)
}
newLeader = ""
diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go
index 66c62b98c..4f8a7fb0d 100644
--- a/weed/server/volume_server.go
+++ b/weed/server/volume_server.go
@@ -35,7 +35,7 @@ type VolumeServer struct {
SeedMasterNodes []pb.ServerAddress
whiteList []string
currentMaster pb.ServerAddress
- pulseSeconds int
+ pulsePeriod time.Duration
dataCenter string
rack string
store *storage.Store
@@ -59,7 +59,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
folders []string, maxCounts []int32, minFreeSpaces []util.MinFreeSpace, diskTypes []types.DiskType,
idxFolder string,
needleMapKind storage.NeedleMapKind,
- masterNodes []pb.ServerAddress, pulseSeconds int,
+ masterNodes []pb.ServerAddress, pulsePeriod time.Duration,
dataCenter string, rack string,
whiteList []string,
fixJpgOrientation bool,
@@ -86,7 +86,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
readExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds")
vs := &VolumeServer{
- pulseSeconds: pulseSeconds,
+ pulsePeriod: pulsePeriod,
dataCenter: dataCenter,
rack: rack,
needleMapKind: needleMapKind,
diff --git a/weed/shell/command_volume_check_disk.go b/weed/shell/command_volume_check_disk.go
index a8cc72d4d..741df0dd4 100644
--- a/weed/shell/command_volume_check_disk.go
+++ b/weed/shell/command_volume_check_disk.go
@@ -88,7 +88,8 @@ func (c *commandVolumeCheckDisk) eqVolumeFileCount(a, b *VolumeReplica) (bool, b
return fileCountA == fileCountB, fileDeletedCountA == fileDeletedCountB
}
-func (c *commandVolumeCheckDisk) shouldSkipVolume(a, b *VolumeReplica, pulseTimeAtSecond int64, syncDeletions, verbose bool) bool {
+func (c *commandVolumeCheckDisk) shouldSkipVolume(a, b *VolumeReplica, pulseTime time.Time, syncDeletions, verbose bool) bool {
+ pulseTimeAtSecond := pulseTime.Unix()
doSyncDeletedCount := false
if syncDeletions && a.info.DeleteCount != b.info.DeleteCount {
doSyncDeletedCount = true
@@ -135,7 +136,7 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
c.writer = writer
// collect topology information
- pulseTimeAtSecond := time.Now().Unix() - constants.VolumePulseSeconds*2
+ pulseTime := time.Now().Add(-constants.VolumePulsePeriod * 2)
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
if err != nil {
return err
@@ -162,7 +163,7 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
})
for len(writableReplicas) >= 2 {
a, b := writableReplicas[0], writableReplicas[1]
- if !*slowMode && c.shouldSkipVolume(a, b, pulseTimeAtSecond, *syncDeletions, *verbose) {
+ if !*slowMode && c.shouldSkipVolume(a, b, pulseTime, *syncDeletions, *verbose) {
// always choose the larger volume to be the source
writableReplicas = append(replicas[:1], writableReplicas[2:]...)
continue
diff --git a/weed/shell/command_volume_check_disk_test.go b/weed/shell/command_volume_check_disk_test.go
index ab9832bd4..d86b40f1f 100644
--- a/weed/shell/command_volume_check_disk_test.go
+++ b/weed/shell/command_volume_check_disk_test.go
@@ -1,9 +1,11 @@
package shell
import (
- "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"os"
"testing"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
type testCommandVolumeCheckDisk struct {
@@ -65,7 +67,8 @@ func TestShouldSkipVolume(t *testing.T) {
},
}
for num, tt := range tests {
- if isShould := cmdVolumeCheckDisk.shouldSkipVolume(&tt.a, &tt.b, tt.pulseTimeAtSecond, true, true); isShould != tt.shouldSkipVolume {
+ pulseTime := time.Unix(tt.pulseTimeAtSecond, 0)
+ if isShould := cmdVolumeCheckDisk.shouldSkipVolume(&tt.a, &tt.b, pulseTime, true, true); isShould != tt.shouldSkipVolume {
t.Fatalf("result of should skip volume is unexpected for %d test", num)
}
}
diff --git a/weed/topology/disk.go b/weed/topology/disk.go
index 8ca25c244..f27589916 100644
--- a/weed/topology/disk.go
+++ b/weed/topology/disk.go
@@ -176,6 +176,19 @@ func (d *Disk) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChanged bool)
d.UpAdjustDiskUsageDelta(types.ToDiskType(v.DiskType), deltaDiskUsage)
}
isChanged = d.volumes[v.Id].ReadOnly != v.ReadOnly
+ if isChanged {
+ // Adjust active volume count when ReadOnly status changes
+ // Use a separate delta object to avoid affecting other metric adjustments
+ readOnlyDelta := &DiskUsageCounts{}
+ if v.ReadOnly {
+ // Changed from writable to read-only
+ readOnlyDelta.activeVolumeCount = -1
+ } else {
+ // Changed from read-only to writable
+ readOnlyDelta.activeVolumeCount = 1
+ }
+ d.UpAdjustDiskUsageDelta(types.ToDiskType(v.DiskType), readOnlyDelta)
+ }
d.volumes[v.Id] = v
}
return
diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go
index 667e941df..8515d2f81 100644
--- a/weed/topology/topology_test.go
+++ b/weed/topology/topology_test.go
@@ -211,6 +211,120 @@ func TestAddRemoveVolume(t *testing.T) {
}
}
+func TestVolumeReadOnlyStatusChange(t *testing.T) {
+ topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
+
+ dc := topo.GetOrCreateDataCenter("dc1")
+ rack := dc.GetOrCreateRack("rack1")
+ maxVolumeCounts := make(map[string]uint32)
+ maxVolumeCounts[""] = 25
+ dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0, "127.0.0.1", maxVolumeCounts)
+
+ // Create a writable volume
+ v := storage.VolumeInfo{
+ Id: needle.VolumeId(1),
+ Size: 100,
+ Collection: "",
+ DiskType: "",
+ FileCount: 10,
+ DeleteCount: 0,
+ DeletedByteCount: 0,
+ ReadOnly: false, // Initially writable
+ Version: needle.GetCurrentVersion(),
+ ReplicaPlacement: &super_block.ReplicaPlacement{},
+ Ttl: needle.EMPTY_TTL,
+ }
+
+ dn.UpdateVolumes([]storage.VolumeInfo{v})
+ topo.RegisterVolumeLayout(v, dn)
+
+ // Check initial active count (should be 1 since volume is writable)
+ usageCounts := topo.diskUsages.usages[types.HardDriveType]
+ assert(t, "initial activeVolumeCount", int(usageCounts.activeVolumeCount), 1)
+ assert(t, "initial remoteVolumeCount", int(usageCounts.remoteVolumeCount), 0)
+
+ // Change volume to read-only
+ v.ReadOnly = true
+ dn.UpdateVolumes([]storage.VolumeInfo{v})
+
+ // Check active count after marking read-only (should be 0)
+ usageCounts = topo.diskUsages.usages[types.HardDriveType]
+ assert(t, "activeVolumeCount after read-only", int(usageCounts.activeVolumeCount), 0)
+
+ // Change volume back to writable
+ v.ReadOnly = false
+ dn.UpdateVolumes([]storage.VolumeInfo{v})
+
+ // Check active count after marking writable again (should be 1)
+ usageCounts = topo.diskUsages.usages[types.HardDriveType]
+ assert(t, "activeVolumeCount after writable again", int(usageCounts.activeVolumeCount), 1)
+}
+
+func TestVolumeReadOnlyAndRemoteStatusChange(t *testing.T) {
+ topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
+
+ dc := topo.GetOrCreateDataCenter("dc1")
+ rack := dc.GetOrCreateRack("rack1")
+ maxVolumeCounts := make(map[string]uint32)
+ maxVolumeCounts[""] = 25
+ dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0, "127.0.0.1", maxVolumeCounts)
+
+ // Create a writable, local volume
+ v := storage.VolumeInfo{
+ Id: needle.VolumeId(1),
+ Size: 100,
+ Collection: "",
+ DiskType: "",
+ FileCount: 10,
+ DeleteCount: 0,
+ DeletedByteCount: 0,
+ ReadOnly: false, // Initially writable
+ RemoteStorageName: "", // Initially local
+ Version: needle.GetCurrentVersion(),
+ ReplicaPlacement: &super_block.ReplicaPlacement{},
+ Ttl: needle.EMPTY_TTL,
+ }
+
+ dn.UpdateVolumes([]storage.VolumeInfo{v})
+ topo.RegisterVolumeLayout(v, dn)
+
+ // Check initial counts
+ usageCounts := topo.diskUsages.usages[types.HardDriveType]
+ assert(t, "initial activeVolumeCount", int(usageCounts.activeVolumeCount), 1)
+ assert(t, "initial remoteVolumeCount", int(usageCounts.remoteVolumeCount), 0)
+
+ // Simultaneously change to read-only AND remote
+ v.ReadOnly = true
+ v.RemoteStorageName = "s3"
+ v.RemoteStorageKey = "key1"
+ dn.UpdateVolumes([]storage.VolumeInfo{v})
+
+ // Check counts after both changes
+ usageCounts = topo.diskUsages.usages[types.HardDriveType]
+ assert(t, "activeVolumeCount after read-only+remote", int(usageCounts.activeVolumeCount), 0)
+ assert(t, "remoteVolumeCount after read-only+remote", int(usageCounts.remoteVolumeCount), 1)
+
+ // Change back to writable but keep remote
+ v.ReadOnly = false
+ dn.UpdateVolumes([]storage.VolumeInfo{v})
+
+ // Check counts - should be writable (active=1) and still remote
+ usageCounts = topo.diskUsages.usages[types.HardDriveType]
+ assert(t, "activeVolumeCount after writable+remote", int(usageCounts.activeVolumeCount), 1)
+ assert(t, "remoteVolumeCount after writable+remote", int(usageCounts.remoteVolumeCount), 1)
+
+ // Change back to local AND read-only simultaneously
+ v.ReadOnly = true
+ v.RemoteStorageName = ""
+ v.RemoteStorageKey = ""
+ dn.UpdateVolumes([]storage.VolumeInfo{v})
+
+ // Check final counts
+ usageCounts = topo.diskUsages.usages[types.HardDriveType]
+ assert(t, "final activeVolumeCount", int(usageCounts.activeVolumeCount), 0)
+ assert(t, "final remoteVolumeCount", int(usageCounts.remoteVolumeCount), 0)
+}
+
func TestListCollections(t *testing.T) {
rp, _ := super_block.NewReplicaPlacementFromString("002")
diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go
index 2a71c6e23..5442ccdce 100644
--- a/weed/topology/volume_growth.go
+++ b/weed/topology/volume_growth.go
@@ -152,9 +152,9 @@ func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topolo
}
}()
- for !topo.LastLeaderChangeTime.Add(constants.VolumePulseSeconds * 2).Before(time.Now()) {
+ for !topo.LastLeaderChangeTime.Add(constants.VolumePulsePeriod * 2).Before(time.Now()) {
glog.V(0).Infof("wait for volume servers to join back")
- time.Sleep(constants.VolumePulseSeconds / 2)
+ time.Sleep(constants.VolumePulsePeriod / 2)
}
vid, raftErr := topo.NextVolumeId()
if raftErr != nil {