aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--DESIGN.md413
-rw-r--r--docker/Makefile2
-rw-r--r--docker/admin_integration/Dockerfile.local18
-rw-r--r--docker/admin_integration/EC-TESTING-README.md438
-rw-r--r--docker/admin_integration/Makefile346
-rwxr-xr-xdocker/admin_integration/check_volumes.sh32
-rw-r--r--docker/admin_integration/create_vacuum_test_data.go280
-rwxr-xr-xdocker/admin_integration/demo_vacuum_testing.sh105
-rw-r--r--docker/admin_integration/docker-compose-ec-test.yml240
-rwxr-xr-xdocker/admin_integration/test-integration.sh73
-rw-r--r--weed/admin/config/schema.go360
-rw-r--r--weed/admin/config/schema_test.go226
-rw-r--r--weed/admin/dash/admin_server.go321
-rw-r--r--weed/admin/dash/collection_management.go268
-rw-r--r--weed/admin/dash/config_persistence.go632
-rw-r--r--weed/admin/dash/ec_shard_management.go734
-rw-r--r--weed/admin/dash/middleware.go23
-rw-r--r--weed/admin/dash/types.go163
-rw-r--r--weed/admin/dash/worker_grpc_server.go34
-rw-r--r--weed/admin/handlers/admin_handlers.go9
-rw-r--r--weed/admin/handlers/cluster_handlers.go124
-rw-r--r--weed/admin/handlers/maintenance_handlers.go502
-rw-r--r--weed/admin/handlers/maintenance_handlers_test.go389
-rw-r--r--weed/admin/handlers/task_config_interface.go25
-rw-r--r--weed/admin/maintenance/config_schema.go190
-rw-r--r--weed/admin/maintenance/config_verification.go124
-rw-r--r--weed/admin/maintenance/maintenance_config_proto.go287
-rw-r--r--weed/admin/maintenance/maintenance_integration.go577
-rw-r--r--weed/admin/maintenance/maintenance_manager.go225
-rw-r--r--weed/admin/maintenance/maintenance_queue.go352
-rw-r--r--weed/admin/maintenance/maintenance_queue_test.go353
-rw-r--r--weed/admin/maintenance/maintenance_scanner.go98
-rw-r--r--weed/admin/maintenance/maintenance_types.go299
-rw-r--r--weed/admin/maintenance/maintenance_worker.go29
-rw-r--r--weed/admin/maintenance/pending_operations.go311
-rw-r--r--weed/admin/maintenance/pending_operations_test.go250
-rw-r--r--weed/admin/static/css/admin.css1
-rw-r--r--weed/admin/topology/active_topology.go741
-rw-r--r--weed/admin/topology/active_topology_test.go654
-rw-r--r--weed/admin/view/app/cluster_collections.templ75
-rw-r--r--weed/admin/view/app/cluster_collections_templ.go256
-rw-r--r--weed/admin/view/app/cluster_ec_shards.templ455
-rw-r--r--weed/admin/view/app/cluster_ec_shards_templ.go840
-rw-r--r--weed/admin/view/app/cluster_ec_volumes.templ775
-rw-r--r--weed/admin/view/app/cluster_ec_volumes_templ.go1313
-rw-r--r--weed/admin/view/app/cluster_volumes.templ2
-rw-r--r--weed/admin/view/app/cluster_volumes_templ.go2
-rw-r--r--weed/admin/view/app/collection_details.templ371
-rw-r--r--weed/admin/view/app/collection_details_templ.go567
-rw-r--r--weed/admin/view/app/ec_volume_details.templ313
-rw-r--r--weed/admin/view/app/ec_volume_details_templ.go560
-rw-r--r--weed/admin/view/app/maintenance_config.templ117
-rw-r--r--weed/admin/view/app/maintenance_config_schema.templ381
-rw-r--r--weed/admin/view/app/maintenance_config_schema_templ.go622
-rw-r--r--weed/admin/view/app/maintenance_config_templ.go44
-rw-r--r--weed/admin/view/app/maintenance_queue.templ269
-rw-r--r--weed/admin/view/app/maintenance_queue_templ.go741
-rw-r--r--weed/admin/view/app/task_config_schema.templ486
-rw-r--r--weed/admin/view/app/task_config_schema_templ.go921
-rw-r--r--weed/admin/view/app/task_config_schema_test.go232
-rw-r--r--weed/admin/view/components/form_fields.templ118
-rw-r--r--weed/admin/view/components/form_fields_templ.go259
-rw-r--r--weed/admin/view/layout/layout.templ5
-rw-r--r--weed/admin/view/layout/layout_templ.go24
-rw-r--r--weed/command/admin.go17
-rw-r--r--weed/command/worker.go66
-rw-r--r--weed/pb/master.proto4
-rw-r--r--weed/pb/master_pb/master.pb.go48
-rw-r--r--weed/pb/volume_server.proto39
-rw-r--r--weed/pb/volume_server_pb/volume_server.pb.go1148
-rw-r--r--weed/pb/volume_server_pb/volume_server_grpc.pb.go81
-rw-r--r--weed/pb/worker.proto192
-rw-r--r--weed/pb/worker_pb/worker.pb.go1949
-rw-r--r--weed/server/volume_grpc_copy.go117
-rw-r--r--weed/server/volume_grpc_erasure_coding.go64
-rw-r--r--weed/server/volume_server_handlers_admin.go5
-rw-r--r--weed/server/volume_server_ui/volume.html14
-rw-r--r--weed/shell/command_volume_list.go15
-rw-r--r--weed/storage/disk_location.go25
-rw-r--r--weed/storage/erasure_coding/ec_volume.go18
-rw-r--r--weed/storage/erasure_coding/ec_volume_info.go7
-rw-r--r--weed/storage/store.go65
-rw-r--r--weed/storage/store_ec.go32
-rw-r--r--weed/storage/volume.go2
-rw-r--r--weed/storage/volume_info.go3
-rw-r--r--weed/topology/disk.go16
-rw-r--r--weed/topology/topology_ec.go11
-rw-r--r--weed/worker/client.go307
-rw-r--r--weed/worker/client_test.go111
-rw-r--r--weed/worker/client_tls_test.go146
-rw-r--r--weed/worker/tasks/balance/balance.go65
-rw-r--r--weed/worker/tasks/balance/balance_detector.go171
-rw-r--r--weed/worker/tasks/balance/balance_register.go109
-rw-r--r--weed/worker/tasks/balance/balance_scheduler.go197
-rw-r--r--weed/worker/tasks/balance/balance_typed.go156
-rw-r--r--weed/worker/tasks/balance/config.go170
-rw-r--r--weed/worker/tasks/balance/detection.go134
-rw-r--r--weed/worker/tasks/balance/ui.go361
-rw-r--r--weed/worker/tasks/base/generic_components.go129
-rw-r--r--weed/worker/tasks/base/registration.go155
-rw-r--r--weed/worker/tasks/base/task_definition.go272
-rw-r--r--weed/worker/tasks/base/task_definition_test.go338
-rw-r--r--weed/worker/tasks/base/typed_task.go218
-rw-r--r--weed/worker/tasks/config_update_registry.go67
-rw-r--r--weed/worker/tasks/erasure_coding/config.go207
-rw-r--r--weed/worker/tasks/erasure_coding/detection.go140
-rw-r--r--weed/worker/tasks/erasure_coding/ec.go792
-rw-r--r--weed/worker/tasks/erasure_coding/ec_detector.go139
-rw-r--r--weed/worker/tasks/erasure_coding/ec_register.go109
-rw-r--r--weed/worker/tasks/erasure_coding/ec_scheduler.go114
-rw-r--r--weed/worker/tasks/erasure_coding/ui.go309
-rw-r--r--weed/worker/tasks/schema_provider.go51
-rw-r--r--weed/worker/tasks/task.go198
-rw-r--r--weed/worker/tasks/task_log_handler.go230
-rw-r--r--weed/worker/tasks/task_logger.go432
-rw-r--r--weed/worker/tasks/ui_base.go184
-rw-r--r--weed/worker/tasks/vacuum/config.go190
-rw-r--r--weed/worker/tasks/vacuum/detection.go112
-rw-r--r--weed/worker/tasks/vacuum/ui.go314
-rw-r--r--weed/worker/tasks/vacuum/vacuum.go195
-rw-r--r--weed/worker/tasks/vacuum/vacuum_detector.go132
-rw-r--r--weed/worker/tasks/vacuum/vacuum_register.go109
-rw-r--r--weed/worker/tasks/vacuum/vacuum_scheduler.go111
-rw-r--r--weed/worker/types/config_types.go4
-rw-r--r--weed/worker/types/data_types.go2
-rw-r--r--weed/worker/types/task_types.go74
-rw-r--r--weed/worker/types/task_ui.go264
-rw-r--r--weed/worker/types/typed_task_interface.go121
-rw-r--r--weed/worker/worker.go466
130 files changed, 27675 insertions, 4367 deletions
diff --git a/.gitignore b/.gitignore
index 027a56e59..b330bbd96 100644
--- a/.gitignore
+++ b/.gitignore
@@ -112,3 +112,6 @@ test/s3/retention/weed-server.pid
test/s3/retention/weed-test.log
/test/s3/versioning/test-volume-data
test/s3/versioning/weed-test.log
+/docker/admin_integration/data
+docker/agent_pub_record
+docker/admin_integration/weed-local
diff --git a/DESIGN.md b/DESIGN.md
new file mode 100644
index 000000000..d164467c3
--- /dev/null
+++ b/DESIGN.md
@@ -0,0 +1,413 @@
+# SeaweedFS Task Distribution System Design
+
+## Overview
+
+This document describes the design of a distributed task management system for SeaweedFS that handles Erasure Coding (EC) and vacuum operations through a scalable admin server and worker process architecture.
+
+## System Architecture
+
+### High-Level Components
+
+```
+┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
+│ Master │◄──►│ Admin Server │◄──►│ Workers │
+│ │ │ │ │ │
+│ - Volume Info │ │ - Task Discovery │ │ - Task Exec │
+│ - Shard Status │ │ - Task Assign │ │ - Progress │
+│ - Heartbeats │ │ - Progress Track │ │ - Error Report │
+└─────────────────┘ └──────────────────┘ └─────────────────┘
+ │ │ │
+ │ │ │
+ ▼ ▼ ▼
+┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
+│ Volume Servers │ │ Volume Monitor │ │ Task Execution │
+│ │ │ │ │ │
+│ - Store Volumes │ │ - Health Check │ │ - EC Convert │
+│ - EC Shards │ │ - Usage Stats │ │ - Vacuum Clean │
+│ - Report Status │ │ - State Sync │ │ - Status Report │
+└─────────────────┘ └──────────────────┘ └─────────────────┘
+```
+
+## 1. Admin Server Design
+
+### 1.1 Core Responsibilities
+
+- **Task Discovery**: Scan volumes to identify EC and vacuum candidates
+- **Worker Management**: Track available workers and their capabilities
+- **Task Assignment**: Match tasks to optimal workers
+- **Progress Tracking**: Monitor in-progress tasks for capacity planning
+- **State Reconciliation**: Sync with master server for volume state updates
+
+### 1.2 Task Discovery Engine
+
+```go
+type TaskDiscoveryEngine struct {
+ masterClient MasterClient
+ volumeScanner VolumeScanner
+ taskDetectors map[TaskType]TaskDetector
+ scanInterval time.Duration
+}
+
+type VolumeCandidate struct {
+ VolumeID uint32
+ Server string
+ Collection string
+ TaskType TaskType
+ Priority TaskPriority
+ Reason string
+ DetectedAt time.Time
+ Parameters map[string]interface{}
+}
+```
+
+**EC Detection Logic**:
+- Find volumes >= 95% full and idle for > 1 hour
+- Exclude volumes already in EC format
+- Exclude volumes with ongoing operations
+- Prioritize by collection and age
+
+**Vacuum Detection Logic**:
+- Find volumes with garbage ratio > 30%
+- Exclude read-only volumes
+- Exclude volumes with recent vacuum operations
+- Prioritize by garbage percentage
+
+### 1.3 Worker Registry & Management
+
+```go
+type WorkerRegistry struct {
+ workers map[string]*Worker
+ capabilities map[TaskType][]*Worker
+ lastHeartbeat map[string]time.Time
+ taskAssignment map[string]*Task
+ mutex sync.RWMutex
+}
+
+type Worker struct {
+ ID string
+ Address string
+ Capabilities []TaskType
+ MaxConcurrent int
+ CurrentLoad int
+ Status WorkerStatus
+ LastSeen time.Time
+ Performance WorkerMetrics
+}
+```
+
+### 1.4 Task Assignment Algorithm
+
+```go
+type TaskScheduler struct {
+ registry *WorkerRegistry
+ taskQueue *PriorityQueue
+ inProgressTasks map[string]*InProgressTask
+ volumeReservations map[uint32]*VolumeReservation
+}
+
+// Worker Selection Criteria:
+// 1. Has required capability (EC or Vacuum)
+// 2. Available capacity (CurrentLoad < MaxConcurrent)
+// 3. Best performance history for task type
+// 4. Lowest current load
+// 5. Geographically close to volume server (optional)
+```
+
+## 2. Worker Process Design
+
+### 2.1 Worker Architecture
+
+```go
+type MaintenanceWorker struct {
+ id string
+ config *WorkerConfig
+ adminClient AdminClient
+ taskExecutors map[TaskType]TaskExecutor
+ currentTasks map[string]*RunningTask
+ registry *TaskRegistry
+ heartbeatTicker *time.Ticker
+ requestTicker *time.Ticker
+}
+```
+
+### 2.2 Task Execution Framework
+
+```go
+type TaskExecutor interface {
+ Execute(ctx context.Context, task *Task) error
+ EstimateTime(task *Task) time.Duration
+ ValidateResources(task *Task) error
+ GetProgress() float64
+ Cancel() error
+}
+
+type ErasureCodingExecutor struct {
+ volumeClient VolumeServerClient
+ progress float64
+ cancelled bool
+}
+
+type VacuumExecutor struct {
+ volumeClient VolumeServerClient
+ progress float64
+ cancelled bool
+}
+```
+
+### 2.3 Worker Capabilities & Registration
+
+```go
+type WorkerCapabilities struct {
+ SupportedTasks []TaskType
+ MaxConcurrent int
+ ResourceLimits ResourceLimits
+ PreferredServers []string // Affinity for specific volume servers
+}
+
+type ResourceLimits struct {
+ MaxMemoryMB int64
+ MaxDiskSpaceMB int64
+ MaxNetworkMbps int64
+ MaxCPUPercent float64
+}
+```
+
+## 3. Task Lifecycle Management
+
+### 3.1 Task States
+
+```go
+type TaskState string
+
+const (
+ TaskStatePending TaskState = "pending"
+ TaskStateAssigned TaskState = "assigned"
+ TaskStateInProgress TaskState = "in_progress"
+ TaskStateCompleted TaskState = "completed"
+ TaskStateFailed TaskState = "failed"
+ TaskStateCancelled TaskState = "cancelled"
+ TaskStateStuck TaskState = "stuck" // Taking too long
+ TaskStateDuplicate TaskState = "duplicate" // Detected duplicate
+)
+```
+
+### 3.2 Progress Tracking & Monitoring
+
+```go
+type InProgressTask struct {
+ Task *Task
+ WorkerID string
+ StartedAt time.Time
+ LastUpdate time.Time
+ Progress float64
+ EstimatedEnd time.Time
+ VolumeReserved bool // Reserved for capacity planning
+}
+
+type TaskMonitor struct {
+ inProgressTasks map[string]*InProgressTask
+ timeoutChecker *time.Ticker
+ stuckDetector *time.Ticker
+ duplicateChecker *time.Ticker
+}
+```
+
+## 4. Volume Capacity Reconciliation
+
+### 4.1 Volume State Tracking
+
+```go
+type VolumeStateManager struct {
+ masterClient MasterClient
+ inProgressTasks map[uint32]*InProgressTask // VolumeID -> Task
+ committedChanges map[uint32]*VolumeChange // Changes not yet in master
+ reconcileInterval time.Duration
+}
+
+type VolumeChange struct {
+ VolumeID uint32
+ ChangeType ChangeType // "ec_encoding", "vacuum_completed"
+ OldCapacity int64
+ NewCapacity int64
+ TaskID string
+ CompletedAt time.Time
+ ReportedToMaster bool
+}
+```
+
+### 4.2 Shard Assignment Integration
+
+When the master needs to assign shards, it must consider:
+1. **Current volume state** from its own records
+2. **In-progress capacity changes** from admin server
+3. **Committed but unreported changes** from admin server
+
+```go
+type CapacityOracle struct {
+ adminServer AdminServerClient
+ masterState *MasterVolumeState
+ updateFreq time.Duration
+}
+
+func (o *CapacityOracle) GetAdjustedCapacity(volumeID uint32) int64 {
+ baseCapacity := o.masterState.GetCapacity(volumeID)
+
+ // Adjust for in-progress tasks
+ if task := o.adminServer.GetInProgressTask(volumeID); task != nil {
+ switch task.Type {
+ case TaskTypeErasureCoding:
+ // EC reduces effective capacity
+ return baseCapacity / 2 // Simplified
+ case TaskTypeVacuum:
+ // Vacuum may increase available space
+ return baseCapacity + int64(float64(baseCapacity) * 0.3)
+ }
+ }
+
+ // Adjust for completed but unreported changes
+ if change := o.adminServer.GetPendingChange(volumeID); change != nil {
+ return change.NewCapacity
+ }
+
+ return baseCapacity
+}
+```
+
+## 5. Error Handling & Recovery
+
+### 5.1 Worker Failure Scenarios
+
+```go
+type FailureHandler struct {
+ taskRescheduler *TaskRescheduler
+ workerMonitor *WorkerMonitor
+ alertManager *AlertManager
+}
+
+// Failure Scenarios:
+// 1. Worker becomes unresponsive (heartbeat timeout)
+// 2. Task execution fails (reported by worker)
+// 3. Task gets stuck (progress timeout)
+// 4. Duplicate task detection
+// 5. Resource exhaustion
+```
+
+### 5.2 Recovery Strategies
+
+**Worker Timeout Recovery**:
+- Mark worker as inactive after 3 missed heartbeats
+- Reschedule all assigned tasks to other workers
+- Cleanup any partial state
+
+**Task Stuck Recovery**:
+- Detect tasks with no progress for > 2x estimated time
+- Cancel stuck task and mark volume for cleanup
+- Reschedule if retry count < max_retries
+
+**Duplicate Task Prevention**:
+```go
+type DuplicateDetector struct {
+ activeFingerprints map[string]bool // VolumeID+TaskType
+ recentCompleted *LRUCache // Recently completed tasks
+}
+
+func (d *DuplicateDetector) IsTaskDuplicate(task *Task) bool {
+ fingerprint := fmt.Sprintf("%d-%s", task.VolumeID, task.Type)
+ return d.activeFingerprints[fingerprint] ||
+ d.recentCompleted.Contains(fingerprint)
+}
+```
+
+## 6. Simulation & Testing Framework
+
+### 6.1 Failure Simulation
+
+```go
+type TaskSimulator struct {
+ scenarios map[string]SimulationScenario
+}
+
+type SimulationScenario struct {
+ Name string
+ WorkerCount int
+ VolumeCount int
+ FailurePatterns []FailurePattern
+ Duration time.Duration
+}
+
+type FailurePattern struct {
+ Type FailureType // "worker_timeout", "task_stuck", "duplicate"
+ Probability float64 // 0.0 to 1.0
+ Timing TimingSpec // When during task execution
+ Duration time.Duration
+}
+```
+
+### 6.2 Test Scenarios
+
+**Scenario 1: Worker Timeout During EC**
+- Start EC task on 30GB volume
+- Kill worker at 50% progress
+- Verify task reassignment
+- Verify no duplicate EC operations
+
+**Scenario 2: Stuck Vacuum Task**
+- Start vacuum on high-garbage volume
+- Simulate worker hanging at 75% progress
+- Verify timeout detection and cleanup
+- Verify volume state consistency
+
+**Scenario 3: Duplicate Task Prevention**
+- Submit same EC task from multiple sources
+- Verify only one task executes
+- Verify proper conflict resolution
+
+**Scenario 4: Master-Admin State Divergence**
+- Create in-progress EC task
+- Simulate master restart
+- Verify state reconciliation
+- Verify shard assignment accounts for in-progress work
+
+## 7. Performance & Scalability
+
+### 7.1 Metrics & Monitoring
+
+```go
+type SystemMetrics struct {
+ TasksPerSecond float64
+ WorkerUtilization float64
+ AverageTaskTime time.Duration
+ FailureRate float64
+ QueueDepth int
+ VolumeStatesSync bool
+}
+```
+
+### 7.2 Scalability Considerations
+
+- **Horizontal Worker Scaling**: Add workers without admin server changes
+- **Admin Server HA**: Master-slave admin servers for fault tolerance
+- **Task Partitioning**: Partition tasks by collection or datacenter
+- **Batch Operations**: Group similar tasks for efficiency
+
+## 8. Implementation Plan
+
+### Phase 1: Core Infrastructure
+1. Admin server basic framework
+2. Worker registration and heartbeat
+3. Simple task assignment
+4. Basic progress tracking
+
+### Phase 2: Advanced Features
+1. Volume state reconciliation
+2. Sophisticated worker selection
+3. Failure detection and recovery
+4. Duplicate prevention
+
+### Phase 3: Optimization & Monitoring
+1. Performance metrics
+2. Load balancing algorithms
+3. Capacity planning integration
+4. Comprehensive monitoring
+
+This design provides a robust, scalable foundation for distributed task management in SeaweedFS while maintaining consistency with the existing architecture patterns. \ No newline at end of file
diff --git a/docker/Makefile b/docker/Makefile
index 777357758..c6f6a50ae 100644
--- a/docker/Makefile
+++ b/docker/Makefile
@@ -8,7 +8,7 @@ cgo ?= 0
binary:
export SWCOMMIT=$(shell git rev-parse --short HEAD)
export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(SWCOMMIT)"
- cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w -extldflags -static $(SWLDFLAGS)" && mv weed ../docker/
+ cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w -extldflags -static $(SWLDFLAGS)" -o weed_binary && mv weed_binary ../docker/weed
cd ../other/mq_client_example/agent_pub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_pub_record ../../../docker/
cd ../other/mq_client_example/agent_sub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_sub_record ../../../docker/
diff --git a/docker/admin_integration/Dockerfile.local b/docker/admin_integration/Dockerfile.local
new file mode 100644
index 000000000..9795b6ea3
--- /dev/null
+++ b/docker/admin_integration/Dockerfile.local
@@ -0,0 +1,18 @@
+FROM alpine:latest
+
+# Install required packages
+RUN apk add --no-cache \
+ ca-certificates \
+ fuse \
+ curl \
+ jq
+
+# Copy our locally built binary
+COPY weed-local /usr/bin/weed
+RUN chmod +x /usr/bin/weed
+
+# Create working directory
+WORKDIR /data
+
+# Default command
+ENTRYPOINT ["/usr/bin/weed"] \ No newline at end of file
diff --git a/docker/admin_integration/EC-TESTING-README.md b/docker/admin_integration/EC-TESTING-README.md
new file mode 100644
index 000000000..57e0a5985
--- /dev/null
+++ b/docker/admin_integration/EC-TESTING-README.md
@@ -0,0 +1,438 @@
+# SeaweedFS EC Worker Testing Environment
+
+This Docker Compose setup provides a comprehensive testing environment for SeaweedFS Erasure Coding (EC) workers using **official SeaweedFS commands**.
+
+## 📂 Directory Structure
+
+The testing environment is located in `docker/admin_integration/` and includes:
+
+```
+docker/admin_integration/
+├── Makefile # Main management interface
+├── docker-compose-ec-test.yml # Docker compose configuration
+├── EC-TESTING-README.md # This documentation
+└── run-ec-test.sh # Quick start script
+```
+
+## 🏗️ Architecture
+
+The testing environment uses **official SeaweedFS commands** and includes:
+
+- **1 Master Server** (port 9333) - Coordinates the cluster with 50MB volume size limit
+- **6 Volume Servers** (ports 8080-8085) - Distributed across 2 data centers and 3 racks for diversity
+- **1 Filer** (port 8888) - Provides file system interface
+- **1 Admin Server** (port 23646) - Detects volumes needing EC and manages workers using official `admin` command
+- **3 EC Workers** - Execute erasure coding tasks using official `worker` command with task-specific working directories
+- **1 Load Generator** - Continuously writes and deletes files using SeaweedFS shell commands
+- **1 Monitor** - Tracks cluster health and EC progress using shell scripts
+
+## ✨ New Features
+
+### **Task-Specific Working Directories**
+Each worker now creates dedicated subdirectories for different task types:
+- `/work/erasure_coding/` - For EC encoding tasks
+- `/work/vacuum/` - For vacuum cleanup tasks
+- `/work/balance/` - For volume balancing tasks
+
+This provides:
+- **Organization**: Each task type gets isolated working space
+- **Debugging**: Easy to find files/logs related to specific task types
+- **Cleanup**: Can clean up task-specific artifacts easily
+- **Concurrent Safety**: Different task types won't interfere with each other's files
+
+## 🚀 Quick Start
+
+### Prerequisites
+
+- Docker and Docker Compose installed
+- GNU Make installed
+- At least 4GB RAM available for containers
+- Ports 8080-8085, 8888, 9333, 23646 available
+
+### Start the Environment
+
+```bash
+# Navigate to the admin integration directory
+cd docker/admin_integration/
+
+# Show available commands
+make help
+
+# Start the complete testing environment
+make start
+```
+
+The `make start` command will:
+1. Start all services using official SeaweedFS images
+2. Configure workers with task-specific working directories
+3. Wait for services to be ready
+4. Display monitoring URLs and run health checks
+
+### Alternative Commands
+
+```bash
+# Quick start aliases
+make up # Same as 'make start'
+
+# Development mode (higher load for faster testing)
+make dev-start
+
+# Build images without starting
+make build
+```
+
+## 📋 Available Make Targets
+
+Run `make help` to see all available targets:
+
+### **🚀 Main Operations**
+- `make start` - Start the complete EC testing environment
+- `make stop` - Stop all services
+- `make restart` - Restart all services
+- `make clean` - Complete cleanup (containers, volumes, images)
+
+### **📊 Monitoring & Status**
+- `make health` - Check health of all services
+- `make status` - Show status of all containers
+- `make urls` - Display all monitoring URLs
+- `make monitor` - Open monitor dashboard in browser
+- `make monitor-status` - Show monitor status via API
+- `make volume-status` - Show volume status from master
+- `make admin-status` - Show admin server status
+- `make cluster-status` - Show complete cluster status
+
+### **📋 Logs Management**
+- `make logs` - Show logs from all services
+- `make logs-admin` - Show admin server logs
+- `make logs-workers` - Show all worker logs
+- `make logs-worker1/2/3` - Show specific worker logs
+- `make logs-load` - Show load generator logs
+- `make logs-monitor` - Show monitor logs
+- `make backup-logs` - Backup all logs to files
+
+### **⚖️ Scaling & Testing**
+- `make scale-workers WORKERS=5` - Scale workers to 5 instances
+- `make scale-load RATE=25` - Increase load generation rate
+- `make test-ec` - Run focused EC test scenario
+
+### **🔧 Development & Debug**
+- `make shell-admin` - Open shell in admin container
+- `make shell-worker1` - Open shell in worker container
+- `make debug` - Show debug information
+- `make troubleshoot` - Run troubleshooting checks
+
+## 📊 Monitoring URLs
+
+| Service | URL | Description |
+|---------|-----|-------------|
+| Master UI | http://localhost:9333 | Cluster status and topology |
+| Filer | http://localhost:8888 | File operations |
+| Admin Server | http://localhost:23646/ | Task management |
+| Monitor | http://localhost:9999/status | Complete cluster monitoring |
+| Volume Servers | http://localhost:8080-8085/status | Individual volume server stats |
+
+Quick access: `make urls` or `make monitor`
+
+## 🔄 How EC Testing Works
+
+### 1. Continuous Load Generation
+- **Write Rate**: 10 files/second (1-5MB each)
+- **Delete Rate**: 2 files/second
+- **Target**: Fill volumes to 50MB limit quickly
+
+### 2. Volume Detection
+- Admin server scans master every 30 seconds
+- Identifies volumes >40MB (80% of 50MB limit)
+- Queues EC tasks for eligible volumes
+
+### 3. EC Worker Assignment
+- **Worker 1**: EC specialist (max 2 concurrent tasks)
+- **Worker 2**: EC + Vacuum hybrid (max 2 concurrent tasks)
+- **Worker 3**: EC + Vacuum hybrid (max 1 concurrent task)
+
+### 4. Comprehensive EC Process
+Each EC task follows 6 phases:
+1. **Copy Volume Data** (5-15%) - Stream .dat/.idx files locally
+2. **Mark Read-Only** (20-25%) - Ensure data consistency
+3. **Local Encoding** (30-60%) - Create 14 shards (10+4 Reed-Solomon)
+4. **Calculate Placement** (65-70%) - Smart rack-aware distribution
+5. **Distribute Shards** (75-90%) - Upload to optimal servers
+6. **Verify & Cleanup** (95-100%) - Validate and clean temporary files
+
+### 5. Real-Time Monitoring
+- Volume analysis and EC candidate detection
+- Worker health and task progress
+- No data loss verification
+- Performance metrics
+
+## 📋 Key Features Tested
+
+### ✅ EC Implementation Features
+- [x] Local volume data copying with progress tracking
+- [x] Local Reed-Solomon encoding (10+4 shards)
+- [x] Intelligent shard placement with rack awareness
+- [x] Load balancing across available servers
+- [x] Backup server selection for redundancy
+- [x] Detailed step-by-step progress tracking
+- [x] Comprehensive error handling and recovery
+
+### ✅ Infrastructure Features
+- [x] Multi-datacenter topology (dc1, dc2)
+- [x] Rack diversity (rack1, rack2, rack3)
+- [x] Volume size limits (50MB)
+- [x] Worker capability matching
+- [x] Health monitoring and alerting
+- [x] Continuous workload simulation
+
+## 🛠️ Common Usage Patterns
+
+### Basic Testing Workflow
+```bash
+# Start environment
+make start
+
+# Watch progress
+make monitor-status
+
+# Check for EC candidates
+make volume-status
+
+# View worker activity
+make logs-workers
+
+# Stop when done
+make stop
+```
+
+### High-Load Testing
+```bash
+# Start with higher load
+make dev-start
+
+# Scale up workers and load
+make scale-workers WORKERS=5
+make scale-load RATE=50
+
+# Monitor intensive EC activity
+make logs-admin
+```
+
+### Debugging Issues
+```bash
+# Check port conflicts and system state
+make troubleshoot
+
+# View specific service logs
+make logs-admin
+make logs-worker1
+
+# Get shell access for debugging
+make shell-admin
+make shell-worker1
+
+# Check detailed status
+make debug
+```
+
+### Development Iteration
+```bash
+# Quick restart after code changes
+make restart
+
+# Rebuild and restart
+make clean
+make start
+
+# Monitor specific components
+make logs-monitor
+```
+
+## 📈 Expected Results
+
+### Successful EC Testing Shows:
+1. **Volume Growth**: Steady increase in volume sizes toward 50MB limit
+2. **EC Detection**: Admin server identifies volumes >40MB for EC
+3. **Task Assignment**: Workers receive and execute EC tasks
+4. **Shard Distribution**: 14 shards distributed across 6 volume servers
+5. **No Data Loss**: All files remain accessible during and after EC
+6. **Performance**: EC tasks complete within estimated timeframes
+
+### Sample Monitor Output:
+```bash
+# Check current status
+make monitor-status
+
+# Output example:
+{
+ "monitor": {
+ "uptime": "15m30s",
+ "master_addr": "master:9333",
+ "admin_addr": "admin:9900"
+ },
+ "stats": {
+ "VolumeCount": 12,
+ "ECTasksDetected": 3,
+ "WorkersActive": 3
+ }
+}
+```
+
+## 🔧 Configuration
+
+### Environment Variables
+
+You can customize the environment by setting variables:
+
+```bash
+# High load testing
+WRITE_RATE=25 DELETE_RATE=5 make start
+
+# Extended test duration
+TEST_DURATION=7200 make start # 2 hours
+```
+
+### Scaling Examples
+
+```bash
+# Scale workers
+make scale-workers WORKERS=6
+
+# Increase load generation
+make scale-load RATE=30
+
+# Combined scaling
+make scale-workers WORKERS=4
+make scale-load RATE=40
+```
+
+## 🧹 Cleanup Options
+
+```bash
+# Stop services only
+make stop
+
+# Remove containers but keep volumes
+make down
+
+# Remove data volumes only
+make clean-volumes
+
+# Remove built images only
+make clean-images
+
+# Complete cleanup (everything)
+make clean
+```
+
+## 🐛 Troubleshooting
+
+### Quick Diagnostics
+```bash
+# Run complete troubleshooting
+make troubleshoot
+
+# Check specific components
+make health
+make debug
+make status
+```
+
+### Common Issues
+
+**Services not starting:**
+```bash
+# Check port availability
+make troubleshoot
+
+# View startup logs
+make logs-master
+make logs-admin
+```
+
+**No EC tasks being created:**
+```bash
+# Check volume status
+make volume-status
+
+# Increase load to fill volumes faster
+make scale-load RATE=30
+
+# Check admin detection
+make logs-admin
+```
+
+**Workers not responding:**
+```bash
+# Check worker registration
+make admin-status
+
+# View worker logs
+make logs-workers
+
+# Restart workers
+make restart
+```
+
+### Performance Tuning
+
+**For faster testing:**
+```bash
+make dev-start # Higher default load
+make scale-load RATE=50 # Very high load
+```
+
+**For stress testing:**
+```bash
+make scale-workers WORKERS=8
+make scale-load RATE=100
+```
+
+## 📚 Technical Details
+
+### Network Architecture
+- Custom bridge network (172.20.0.0/16)
+- Service discovery via container names
+- Health checks for all services
+
+### Storage Layout
+- Each volume server: max 100 volumes
+- Data centers: dc1, dc2
+- Racks: rack1, rack2, rack3
+- Volume limit: 50MB per volume
+
+### EC Algorithm
+- Reed-Solomon RS(10,4)
+- 10 data shards + 4 parity shards
+- Rack-aware distribution
+- Backup server redundancy
+
+### Make Integration
+- Color-coded output for better readability
+- Comprehensive help system (`make help`)
+- Parallel execution support
+- Error handling and cleanup
+- Cross-platform compatibility
+
+## 🎯 Quick Reference
+
+```bash
+# Essential commands
+make help # Show all available targets
+make start # Start complete environment
+make health # Check all services
+make monitor # Open dashboard
+make logs-admin # View admin activity
+make clean # Complete cleanup
+
+# Monitoring
+make volume-status # Check for EC candidates
+make admin-status # Check task queue
+make monitor-status # Full cluster status
+
+# Scaling & Testing
+make test-ec # Run focused EC test
+make scale-load RATE=X # Increase load
+make troubleshoot # Diagnose issues
+```
+
+This environment provides a realistic testing scenario for SeaweedFS EC workers with actual data operations, comprehensive monitoring, and easy management through Make targets. \ No newline at end of file
diff --git a/docker/admin_integration/Makefile b/docker/admin_integration/Makefile
new file mode 100644
index 000000000..68fb0cec6
--- /dev/null
+++ b/docker/admin_integration/Makefile
@@ -0,0 +1,346 @@
+# SeaweedFS Admin Integration Test Makefile
+# Tests the admin server and worker functionality using official weed commands
+
+.PHONY: help build build-and-restart restart-workers start stop restart logs clean status test admin-ui worker-logs master-logs admin-logs vacuum-test vacuum-demo vacuum-status vacuum-data vacuum-data-high vacuum-data-low vacuum-continuous vacuum-clean vacuum-help
+.DEFAULT_GOAL := help
+
+COMPOSE_FILE := docker-compose-ec-test.yml
+PROJECT_NAME := admin_integration
+
+build: ## Build SeaweedFS with latest changes and create Docker image
+ @echo "🔨 Building SeaweedFS with latest changes..."
+ @echo "1️⃣ Generating admin templates..."
+ @cd ../../ && make admin-generate
+ @echo "2️⃣ Building Docker image with latest changes..."
+ @cd ../ && make build
+ @echo "3️⃣ Copying binary for local docker-compose..."
+ @cp ../weed ./weed-local
+ @echo "✅ Build complete! Updated image: chrislusf/seaweedfs:local"
+ @echo "💡 Run 'make restart' to apply changes to running services"
+
+build-and-restart: build ## Build with latest changes and restart services
+ @echo "🔄 Recreating services with new image..."
+ @echo "1️⃣ Recreating admin server with new image..."
+ @docker-compose -f $(COMPOSE_FILE) up -d admin
+ @sleep 5
+ @echo "2️⃣ Recreating workers to reconnect..."
+ @docker-compose -f $(COMPOSE_FILE) up -d worker1 worker2 worker3
+ @echo "✅ All services recreated with latest changes!"
+ @echo "🌐 Admin UI: http://localhost:23646/"
+ @echo "💡 Workers will reconnect to the new admin server"
+
+restart-workers: ## Restart all workers to reconnect to admin server
+ @echo "🔄 Restarting workers to reconnect to admin server..."
+ @docker-compose -f $(COMPOSE_FILE) restart worker1 worker2 worker3
+ @echo "✅ Workers restarted and will reconnect to admin server"
+
+help: ## Show this help message
+ @echo "SeaweedFS Admin Integration Test"
+ @echo "================================"
+ @echo "Tests admin server task distribution to workers using official weed commands"
+ @echo ""
+ @echo "🏗️ Cluster Management:"
+ @grep -E '^(start|stop|restart|clean|status|build):.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}'
+ @echo ""
+ @echo "🧪 Testing:"
+ @grep -E '^(test|demo|validate|quick-test):.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}'
+ @echo ""
+ @echo "🗑️ Vacuum Testing:"
+ @grep -E '^vacuum-.*:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}'
+ @echo ""
+ @echo "📜 Monitoring:"
+ @grep -E '^(logs|admin-logs|worker-logs|master-logs|admin-ui):.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}'
+ @echo ""
+ @echo "🚀 Quick Start:"
+ @echo " make start # Start cluster"
+ @echo " make vacuum-test # Test vacuum tasks"
+ @echo " make vacuum-help # Vacuum testing guide"
+ @echo ""
+ @echo "💡 For detailed vacuum testing: make vacuum-help"
+
+start: ## Start the complete SeaweedFS cluster with admin and workers
+ @echo "🚀 Starting SeaweedFS cluster with admin and workers..."
+ @docker-compose -f $(COMPOSE_FILE) up -d
+ @echo "✅ Cluster started!"
+ @echo ""
+ @echo "📊 Access points:"
+ @echo " • Admin UI: http://localhost:23646/"
+ @echo " • Master UI: http://localhost:9333/"
+ @echo " • Filer: http://localhost:8888/"
+ @echo ""
+ @echo "📈 Services starting up..."
+ @echo " • Master server: ✓"
+ @echo " • Volume servers: Starting (6 servers)..."
+ @echo " • Filer: Starting..."
+ @echo " • Admin server: Starting..."
+ @echo " • Workers: Starting (3 workers)..."
+ @echo ""
+ @echo "⏳ Use 'make status' to check startup progress"
+ @echo "💡 Use 'make logs' to watch the startup process"
+
+start-staged: ## Start services in proper order with delays
+ @echo "🚀 Starting SeaweedFS cluster in stages..."
+ @echo ""
+ @echo "Stage 1: Starting Master server..."
+ @docker-compose -f $(COMPOSE_FILE) up -d master
+ @sleep 10
+ @echo ""
+ @echo "Stage 2: Starting Volume servers..."
+ @docker-compose -f $(COMPOSE_FILE) up -d volume1 volume2 volume3 volume4 volume5 volume6
+ @sleep 15
+ @echo ""
+ @echo "Stage 3: Starting Filer..."
+ @docker-compose -f $(COMPOSE_FILE) up -d filer
+ @sleep 10
+ @echo ""
+ @echo "Stage 4: Starting Admin server..."
+ @docker-compose -f $(COMPOSE_FILE) up -d admin
+ @sleep 15
+ @echo ""
+ @echo "Stage 5: Starting Workers..."
+ @docker-compose -f $(COMPOSE_FILE) up -d worker1 worker2 worker3
+ @sleep 10
+ @echo ""
+ @echo "Stage 6: Starting Load generator and Monitor..."
+ @docker-compose -f $(COMPOSE_FILE) up -d load_generator monitor
+ @echo ""
+ @echo "✅ All services started!"
+ @echo ""
+ @echo "📊 Access points:"
+ @echo " • Admin UI: http://localhost:23646/"
+ @echo " • Master UI: http://localhost:9333/"
+ @echo " • Filer: http://localhost:8888/"
+ @echo ""
+ @echo "⏳ Services are initializing... Use 'make status' to check progress"
+
+stop: ## Stop all services
+ @echo "🛑 Stopping SeaweedFS cluster..."
+ @docker-compose -f $(COMPOSE_FILE) down
+ @echo "✅ Cluster stopped"
+
+restart: stop start ## Restart the entire cluster
+
+clean: ## Stop and remove all containers, networks, and volumes
+ @echo "🧹 Cleaning up SeaweedFS test environment..."
+ @docker-compose -f $(COMPOSE_FILE) down -v --remove-orphans
+ @docker system prune -f
+ @rm -rf data/
+ @echo "✅ Environment cleaned"
+
+status: ## Check the status of all services
+ @echo "📊 SeaweedFS Cluster Status"
+ @echo "=========================="
+ @docker-compose -f $(COMPOSE_FILE) ps
+ @echo ""
+ @echo "📋 Service Health:"
+ @echo "Master:"
+ @curl -s http://localhost:9333/cluster/status | jq '.IsLeader' 2>/dev/null || echo " ❌ Master not ready"
+ @echo "Admin:"
+ @curl -s http://localhost:23646/ | grep -q "Admin" && echo " ✅ Admin ready" || echo " ❌ Admin not ready"
+
+logs: ## Show logs from all services
+ @echo "📜 Following logs from all services..."
+ @echo "💡 Press Ctrl+C to stop following logs"
+ @docker-compose -f $(COMPOSE_FILE) logs -f
+
+admin-logs: ## Show logs from admin server only
+ @echo "📜 Admin server logs:"
+ @docker-compose -f $(COMPOSE_FILE) logs -f admin
+
+worker-logs: ## Show logs from all workers
+ @echo "📜 Worker logs:"
+ @docker-compose -f $(COMPOSE_FILE) logs -f worker1 worker2 worker3
+
+master-logs: ## Show logs from master server
+ @echo "📜 Master server logs:"
+ @docker-compose -f $(COMPOSE_FILE) logs -f master
+
+admin-ui: ## Open admin UI in browser (macOS)
+ @echo "🌐 Opening admin UI in browser..."
+ @open http://localhost:23646/ || echo "💡 Manually open: http://localhost:23646/"
+
+test: ## Run integration test to verify task assignment and completion
+ @echo "🧪 Running Admin-Worker Integration Test"
+ @echo "========================================"
+ @echo ""
+ @echo "1️⃣ Checking cluster health..."
+ @sleep 5
+ @curl -s http://localhost:9333/cluster/status | jq '.IsLeader' > /dev/null && echo "✅ Master healthy" || echo "❌ Master not ready"
+ @curl -s http://localhost:23646/ | grep -q "Admin" && echo "✅ Admin healthy" || echo "❌ Admin not ready"
+ @echo ""
+ @echo "2️⃣ Checking worker registration..."
+ @sleep 10
+ @echo "💡 Check admin UI for connected workers: http://localhost:23646/"
+ @echo ""
+ @echo "3️⃣ Generating load to trigger EC tasks..."
+ @echo "📝 Creating test files to fill volumes..."
+ @echo "Creating large files with random data to trigger EC (targeting ~60MB total to exceed 50MB limit)..."
+ @for i in {1..12}; do \
+ echo "Creating 5MB random file $$i..."; \
+ docker run --rm --network admin_integration_seaweed_net -v /tmp:/tmp --entrypoint sh chrislusf/seaweedfs:local -c "dd if=/dev/urandom of=/tmp/largefile$$i.dat bs=1M count=5 2>/dev/null && weed upload -master=master:9333 /tmp/largefile$$i.dat && rm /tmp/largefile$$i.dat"; \
+ sleep 3; \
+ done
+ @echo ""
+ @echo "4️⃣ Waiting for volumes to process large files and reach 50MB limit..."
+ @echo "This may take a few minutes as we're uploading 60MB of data..."
+ @sleep 60
+ @echo ""
+ @echo "5️⃣ Checking for EC task creation and assignment..."
+ @echo "💡 Monitor the admin UI to see:"
+ @echo " • Tasks being created for volumes needing EC"
+ @echo " • Workers picking up tasks"
+ @echo " • Task progress (pending → running → completed)"
+ @echo " • EC shards being distributed"
+ @echo ""
+ @echo "✅ Integration test setup complete!"
+ @echo "📊 Monitor progress at: http://localhost:23646/"
+
+quick-test: ## Quick verification that core services are running
+ @echo "⚡ Quick Health Check"
+ @echo "===================="
+ @echo "Master: $$(curl -s http://localhost:9333/cluster/status | jq -r '.IsLeader // "not ready"')"
+ @echo "Admin: $$(curl -s http://localhost:23646/ | grep -q "Admin" && echo "ready" || echo "not ready")"
+ @echo "Workers: $$(docker-compose -f $(COMPOSE_FILE) ps worker1 worker2 worker3 | grep -c Up) running"
+
+validate: ## Validate integration test configuration
+ @echo "🔍 Validating Integration Test Configuration"
+ @echo "==========================================="
+ @chmod +x test-integration.sh
+ @./test-integration.sh
+
+demo: start ## Start cluster and run demonstration
+ @echo "🎭 SeaweedFS Admin-Worker Demo"
+ @echo "============================="
+ @echo ""
+ @echo "⏳ Waiting for services to start..."
+ @sleep 45
+ @echo ""
+ @echo "🎯 Demo Overview:"
+ @echo " • 1 Master server (coordinates cluster)"
+ @echo " • 6 Volume servers (50MB volume limit)"
+ @echo " • 1 Admin server (task management)"
+ @echo " • 3 Workers (execute EC tasks)"
+ @echo " • Load generator (creates files continuously)"
+ @echo ""
+ @echo "📊 Watch the process:"
+ @echo " 1. Visit: http://localhost:23646/"
+ @echo " 2. Observe workers connecting"
+ @echo " 3. Watch tasks being created and assigned"
+ @echo " 4. See tasks progress from pending → completed"
+ @echo ""
+ @echo "🔄 The demo will:"
+ @echo " • Fill volumes to 50MB limit"
+ @echo " • Admin detects volumes needing EC"
+ @echo " • Workers receive and execute EC tasks"
+ @echo " • Tasks complete with shard distribution"
+ @echo ""
+ @echo "💡 Use 'make worker-logs' to see worker activity"
+ @echo "💡 Use 'make admin-logs' to see admin task management"
+
+# Vacuum Testing Targets
+vacuum-test: ## Create test data with garbage and verify vacuum detection
+ @echo "🧪 SeaweedFS Vacuum Task Testing"
+ @echo "================================"
+ @echo ""
+ @echo "1️⃣ Checking cluster health..."
+ @curl -s http://localhost:9333/cluster/status | jq '.IsLeader' > /dev/null && echo "✅ Master ready" || (echo "❌ Master not ready. Run 'make start' first." && exit 1)
+ @curl -s http://localhost:23646/ | grep -q "Admin" && echo "✅ Admin ready" || (echo "❌ Admin not ready. Run 'make start' first." && exit 1)
+ @echo ""
+ @echo "2️⃣ Creating test data with garbage..."
+ @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=25 -delete=0.5 -size=200
+ @echo ""
+ @echo "3️⃣ Configuration Instructions:"
+ @echo " Visit: http://localhost:23646/maintenance/config/vacuum"
+ @echo " Set for testing:"
+ @echo " • Enable Vacuum Tasks: ✅ Checked"
+ @echo " • Garbage Threshold: 0.20 (20%)"
+ @echo " • Scan Interval: [30] [Seconds]"
+ @echo " • Min Volume Age: [0] [Minutes]"
+ @echo " • Max Concurrent: 2"
+ @echo ""
+ @echo "4️⃣ Monitor vacuum tasks at: http://localhost:23646/maintenance"
+ @echo ""
+ @echo "💡 Use 'make vacuum-status' to check volume garbage ratios"
+
+vacuum-demo: ## Run automated vacuum testing demonstration
+ @echo "🎭 Vacuum Task Demo"
+ @echo "=================="
+ @echo ""
+ @echo "⚠️ This demo requires user interaction for configuration"
+ @echo "💡 Make sure cluster is running with 'make start'"
+ @echo ""
+ @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester sh -c "chmod +x demo_vacuum_testing.sh && ./demo_vacuum_testing.sh"
+
+vacuum-status: ## Check current volume status and garbage ratios
+ @echo "📊 Current Volume Status"
+ @echo "======================="
+ @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester sh -c "chmod +x check_volumes.sh && ./check_volumes.sh"
+
+vacuum-data: ## Create test data with configurable parameters
+ @echo "📁 Creating vacuum test data..."
+ @echo "Usage: make vacuum-data [FILES=20] [DELETE=0.4] [SIZE=100]"
+ @echo ""
+ @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go \
+ -files=$${FILES:-20} \
+ -delete=$${DELETE:-0.4} \
+ -size=$${SIZE:-100}
+
+vacuum-data-high: ## Create high garbage ratio test data (should trigger vacuum)
+ @echo "📁 Creating high garbage test data (70% garbage)..."
+ @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=30 -delete=0.7 -size=150
+
+vacuum-data-low: ## Create low garbage ratio test data (should NOT trigger vacuum)
+ @echo "📁 Creating low garbage test data (15% garbage)..."
+ @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=30 -delete=0.15 -size=150
+
+vacuum-continuous: ## Generate garbage continuously for testing
+ @echo "🔄 Generating continuous garbage for vacuum testing..."
+ @echo "Creating 5 rounds of test data with 30-second intervals..."
+ @for i in {1..5}; do \
+ echo "Round $$i: Creating garbage..."; \
+ docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=10 -delete=0.6 -size=100; \
+ echo "Waiting 30 seconds..."; \
+ sleep 30; \
+ done
+ @echo "✅ Continuous test complete. Check vacuum task activity!"
+
+vacuum-clean: ## Clean up vacuum test data (removes all volumes!)
+ @echo "🧹 Cleaning up vacuum test data..."
+ @echo "⚠️ WARNING: This will delete ALL volumes!"
+ @read -p "Are you sure? (y/N): " confirm && [ "$$confirm" = "y" ] || exit 1
+ @echo "Stopping cluster..."
+ @docker-compose -f $(COMPOSE_FILE) down
+ @echo "Removing volume data..."
+ @rm -rf data/volume*/
+ @echo "Restarting cluster..."
+ @docker-compose -f $(COMPOSE_FILE) up -d
+ @echo "✅ Clean up complete. Fresh volumes ready for testing."
+
+vacuum-help: ## Show vacuum testing help and examples
+ @echo "🧪 Vacuum Testing Commands (Docker-based)"
+ @echo "=========================================="
+ @echo ""
+ @echo "Quick Start:"
+ @echo " make start # Start SeaweedFS cluster with vacuum-tester"
+ @echo " make vacuum-test # Create test data and instructions"
+ @echo " make vacuum-status # Check volume status"
+ @echo ""
+ @echo "Data Generation:"
+ @echo " make vacuum-data-high # High garbage (should trigger)"
+ @echo " make vacuum-data-low # Low garbage (should NOT trigger)"
+ @echo " make vacuum-continuous # Continuous garbage generation"
+ @echo ""
+ @echo "Monitoring:"
+ @echo " make vacuum-status # Quick volume status check"
+ @echo " make vacuum-demo # Full guided demonstration"
+ @echo ""
+ @echo "Configuration:"
+ @echo " Visit: http://localhost:23646/maintenance/config/vacuum"
+ @echo " Monitor: http://localhost:23646/maintenance"
+ @echo ""
+ @echo "Custom Parameters:"
+ @echo " make vacuum-data FILES=50 DELETE=0.8 SIZE=200"
+ @echo ""
+ @echo "💡 All commands now run inside Docker containers"
+ @echo "Documentation:"
+ @echo " See: VACUUM_TEST_README.md for complete guide" \ No newline at end of file
diff --git a/docker/admin_integration/check_volumes.sh b/docker/admin_integration/check_volumes.sh
new file mode 100755
index 000000000..8cc6c14c5
--- /dev/null
+++ b/docker/admin_integration/check_volumes.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+echo "📊 Quick Volume Status Check"
+echo "============================"
+echo ""
+
+# Check if master is running
+MASTER_URL="${MASTER_HOST:-master:9333}"
+if ! curl -s http://$MASTER_URL/cluster/status > /dev/null; then
+ echo "❌ Master server not available at $MASTER_URL"
+ exit 1
+fi
+
+echo "🔍 Fetching volume status from master..."
+curl -s "http://$MASTER_URL/vol/status" | jq -r '
+if .Volumes and .Volumes.DataCenters then
+ .Volumes.DataCenters | to_entries[] | .value | to_entries[] | .value | to_entries[] | .value | if . then .[] else empty end |
+ "Volume \(.Id):
+ Size: \(.Size | if . < 1024 then "\(.) B" elif . < 1048576 then "\(. / 1024 | floor) KB" elif . < 1073741824 then "\(. / 1048576 * 100 | floor / 100) MB" else "\(. / 1073741824 * 100 | floor / 100) GB" end)
+ Files: \(.FileCount) active, \(.DeleteCount) deleted
+ Garbage: \(.DeletedByteCount | if . < 1024 then "\(.) B" elif . < 1048576 then "\(. / 1024 | floor) KB" elif . < 1073741824 then "\(. / 1048576 * 100 | floor / 100) MB" else "\(. / 1073741824 * 100 | floor / 100) GB" end) (\(if .Size > 0 then (.DeletedByteCount / .Size * 100 | floor) else 0 end)%)
+ Status: \(if (.DeletedByteCount / .Size * 100) > 30 then "🎯 NEEDS VACUUM" else "✅ OK" end)
+"
+else
+ "No volumes found"
+end'
+
+echo ""
+echo "💡 Legend:"
+echo " 🎯 NEEDS VACUUM: >30% garbage ratio"
+echo " ✅ OK: <30% garbage ratio"
+echo "" \ No newline at end of file
diff --git a/docker/admin_integration/create_vacuum_test_data.go b/docker/admin_integration/create_vacuum_test_data.go
new file mode 100644
index 000000000..46acdd4cd
--- /dev/null
+++ b/docker/admin_integration/create_vacuum_test_data.go
@@ -0,0 +1,280 @@
+package main
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "time"
+)
+
+var (
+ master = flag.String("master", "master:9333", "SeaweedFS master server address")
+ fileCount = flag.Int("files", 20, "Number of files to create")
+ deleteRatio = flag.Float64("delete", 0.4, "Ratio of files to delete (0.0-1.0)")
+ fileSizeKB = flag.Int("size", 100, "Size of each file in KB")
+)
+
+type AssignResult struct {
+ Fid string `json:"fid"`
+ Url string `json:"url"`
+ PublicUrl string `json:"publicUrl"`
+ Count int `json:"count"`
+ Error string `json:"error"`
+}
+
+func main() {
+ flag.Parse()
+
+ fmt.Println("🧪 Creating fake data for vacuum task testing...")
+ fmt.Printf("Master: %s\n", *master)
+ fmt.Printf("Files to create: %d\n", *fileCount)
+ fmt.Printf("Delete ratio: %.1f%%\n", *deleteRatio*100)
+ fmt.Printf("File size: %d KB\n", *fileSizeKB)
+ fmt.Println()
+
+ if *fileCount == 0 {
+ // Just check volume status
+ fmt.Println("📊 Checking volume status...")
+ checkVolumeStatus()
+ return
+ }
+
+ // Step 1: Create test files
+ fmt.Println("📁 Step 1: Creating test files...")
+ fids := createTestFiles()
+
+ // Step 2: Delete some files to create garbage
+ fmt.Println("🗑️ Step 2: Deleting files to create garbage...")
+ deleteFiles(fids)
+
+ // Step 3: Check volume status
+ fmt.Println("📊 Step 3: Checking volume status...")
+ checkVolumeStatus()
+
+ // Step 4: Configure vacuum for testing
+ fmt.Println("⚙️ Step 4: Instructions for testing...")
+ printTestingInstructions()
+}
+
+func createTestFiles() []string {
+ var fids []string
+
+ for i := 0; i < *fileCount; i++ {
+ // Generate random file content
+ fileData := make([]byte, *fileSizeKB*1024)
+ rand.Read(fileData)
+
+ // Get file ID assignment
+ assign, err := assignFileId()
+ if err != nil {
+ log.Printf("Failed to assign file ID for file %d: %v", i, err)
+ continue
+ }
+
+ // Upload file
+ err = uploadFile(assign, fileData, fmt.Sprintf("test_file_%d.dat", i))
+ if err != nil {
+ log.Printf("Failed to upload file %d: %v", i, err)
+ continue
+ }
+
+ fids = append(fids, assign.Fid)
+
+ if (i+1)%5 == 0 {
+ fmt.Printf(" Created %d/%d files...\n", i+1, *fileCount)
+ }
+ }
+
+ fmt.Printf("✅ Created %d files successfully\n\n", len(fids))
+ return fids
+}
+
+func deleteFiles(fids []string) {
+ deleteCount := int(float64(len(fids)) * *deleteRatio)
+
+ for i := 0; i < deleteCount; i++ {
+ err := deleteFile(fids[i])
+ if err != nil {
+ log.Printf("Failed to delete file %s: %v", fids[i], err)
+ continue
+ }
+
+ if (i+1)%5 == 0 {
+ fmt.Printf(" Deleted %d/%d files...\n", i+1, deleteCount)
+ }
+ }
+
+ fmt.Printf("✅ Deleted %d files (%.1f%% of total)\n\n", deleteCount, *deleteRatio*100)
+}
+
+func assignFileId() (*AssignResult, error) {
+ resp, err := http.Get(fmt.Sprintf("http://%s/dir/assign", *master))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result AssignResult
+ err = json.NewDecoder(resp.Body).Decode(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ if result.Error != "" {
+ return nil, fmt.Errorf("assignment error: %s", result.Error)
+ }
+
+ return &result, nil
+}
+
+func uploadFile(assign *AssignResult, data []byte, filename string) error {
+ url := fmt.Sprintf("http://%s/%s", assign.Url, assign.Fid)
+
+ body := &bytes.Buffer{}
+ body.Write(data)
+
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Set("Content-Type", "application/octet-stream")
+ if filename != "" {
+ req.Header.Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
+ }
+
+ client := &http.Client{Timeout: 30 * time.Second}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(body))
+ }
+
+ return nil
+}
+
+func deleteFile(fid string) error {
+ url := fmt.Sprintf("http://%s/%s", *master, fid)
+
+ req, err := http.NewRequest("DELETE", url, nil)
+ if err != nil {
+ return err
+ }
+
+ client := &http.Client{Timeout: 10 * time.Second}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+func checkVolumeStatus() {
+ // Get volume list from master
+ resp, err := http.Get(fmt.Sprintf("http://%s/vol/status", *master))
+ if err != nil {
+ log.Printf("Failed to get volume status: %v", err)
+ return
+ }
+ defer resp.Body.Close()
+
+ var volumes map[string]interface{}
+ err = json.NewDecoder(resp.Body).Decode(&volumes)
+ if err != nil {
+ log.Printf("Failed to decode volume status: %v", err)
+ return
+ }
+
+ fmt.Println("📊 Volume Status Summary:")
+
+ if vols, ok := volumes["Volumes"].([]interface{}); ok {
+ for _, vol := range vols {
+ if v, ok := vol.(map[string]interface{}); ok {
+ id := int(v["Id"].(float64))
+ size := uint64(v["Size"].(float64))
+ fileCount := int(v["FileCount"].(float64))
+ deleteCount := int(v["DeleteCount"].(float64))
+ deletedBytes := uint64(v["DeletedByteCount"].(float64))
+
+ garbageRatio := 0.0
+ if size > 0 {
+ garbageRatio = float64(deletedBytes) / float64(size) * 100
+ }
+
+ fmt.Printf(" Volume %d:\n", id)
+ fmt.Printf(" Size: %s\n", formatBytes(size))
+ fmt.Printf(" Files: %d (active), %d (deleted)\n", fileCount, deleteCount)
+ fmt.Printf(" Garbage: %s (%.1f%%)\n", formatBytes(deletedBytes), garbageRatio)
+
+ if garbageRatio > 30 {
+ fmt.Printf(" 🎯 This volume should trigger vacuum (>30%% garbage)\n")
+ }
+ fmt.Println()
+ }
+ }
+ }
+}
+
+func formatBytes(bytes uint64) string {
+ if bytes < 1024 {
+ return fmt.Sprintf("%d B", bytes)
+ } else if bytes < 1024*1024 {
+ return fmt.Sprintf("%.1f KB", float64(bytes)/1024)
+ } else if bytes < 1024*1024*1024 {
+ return fmt.Sprintf("%.1f MB", float64(bytes)/(1024*1024))
+ } else {
+ return fmt.Sprintf("%.1f GB", float64(bytes)/(1024*1024*1024))
+ }
+}
+
+func printTestingInstructions() {
+ fmt.Println("🧪 Testing Instructions:")
+ fmt.Println()
+ fmt.Println("1. Configure Vacuum for Testing:")
+ fmt.Println(" Visit: http://localhost:23646/maintenance/config/vacuum")
+ fmt.Println(" Set:")
+ fmt.Printf(" - Garbage Percentage Threshold: 20 (20%% - lower than default 30)\n")
+ fmt.Printf(" - Scan Interval: [30] [Seconds] (faster than default)\n")
+ fmt.Printf(" - Min Volume Age: [0] [Minutes] (no age requirement)\n")
+ fmt.Printf(" - Max Concurrent: 2\n")
+ fmt.Printf(" - Min Interval: 1m (faster repeat)\n")
+ fmt.Println()
+
+ fmt.Println("2. Monitor Vacuum Tasks:")
+ fmt.Println(" Visit: http://localhost:23646/maintenance")
+ fmt.Println(" Watch for vacuum tasks to appear in the queue")
+ fmt.Println()
+
+ fmt.Println("3. Manual Vacuum (Optional):")
+ fmt.Println(" curl -X POST 'http://localhost:9333/vol/vacuum?garbageThreshold=0.20'")
+ fmt.Println(" (Note: Master API still uses 0.0-1.0 decimal format)")
+ fmt.Println()
+
+ fmt.Println("4. Check Logs:")
+ fmt.Println(" Look for messages like:")
+ fmt.Println(" - 'Vacuum detector found X volumes needing vacuum'")
+ fmt.Println(" - 'Applied vacuum configuration'")
+ fmt.Println(" - 'Worker executing task: vacuum'")
+ fmt.Println()
+
+ fmt.Println("5. Verify Results:")
+ fmt.Println(" Re-run this script with -files=0 to check volume status")
+ fmt.Println(" Garbage ratios should decrease after vacuum operations")
+ fmt.Println()
+
+ fmt.Printf("🚀 Quick test command:\n")
+ fmt.Printf(" go run create_vacuum_test_data.go -files=0\n")
+ fmt.Println()
+}
diff --git a/docker/admin_integration/demo_vacuum_testing.sh b/docker/admin_integration/demo_vacuum_testing.sh
new file mode 100755
index 000000000..6835e14cc
--- /dev/null
+++ b/docker/admin_integration/demo_vacuum_testing.sh
@@ -0,0 +1,105 @@
+#!/bin/sh
+
+echo "🧪 SeaweedFS Vacuum Task Testing Demo"
+echo "======================================"
+echo ""
+
+# Check if SeaweedFS is running
+echo "📋 Checking SeaweedFS status..."
+MASTER_URL="${MASTER_HOST:-master:9333}"
+ADMIN_URL="${ADMIN_HOST:-admin:23646}"
+
+if ! curl -s http://$MASTER_URL/cluster/status > /dev/null; then
+ echo "❌ SeaweedFS master not running at $MASTER_URL"
+ echo " Please ensure Docker cluster is running: make start"
+ exit 1
+fi
+
+if ! curl -s http://volume1:8080/status > /dev/null; then
+ echo "❌ SeaweedFS volume servers not running"
+ echo " Please ensure Docker cluster is running: make start"
+ exit 1
+fi
+
+if ! curl -s http://$ADMIN_URL/ > /dev/null; then
+ echo "❌ SeaweedFS admin server not running at $ADMIN_URL"
+ echo " Please ensure Docker cluster is running: make start"
+ exit 1
+fi
+
+echo "✅ All SeaweedFS components are running"
+echo ""
+
+# Phase 1: Create test data
+echo "📁 Phase 1: Creating test data with garbage..."
+go run create_vacuum_test_data.go -master=$MASTER_URL -files=15 -delete=0.5 -size=150
+echo ""
+
+# Phase 2: Check initial status
+echo "📊 Phase 2: Checking initial volume status..."
+go run create_vacuum_test_data.go -master=$MASTER_URL -files=0
+echo ""
+
+# Phase 3: Configure vacuum
+echo "⚙️ Phase 3: Vacuum configuration instructions..."
+echo " 1. Visit: http://localhost:23646/maintenance/config/vacuum"
+echo " 2. Set these values for testing:"
+echo " - Enable Vacuum Tasks: ✅ Checked"
+echo " - Garbage Threshold: 0.30"
+echo " - Scan Interval: [30] [Seconds]"
+echo " - Min Volume Age: [0] [Minutes]"
+echo " - Max Concurrent: 2"
+echo " 3. Click 'Save Configuration'"
+echo ""
+
+read -p " Press ENTER after configuring vacuum settings..."
+echo ""
+
+# Phase 4: Monitor tasks
+echo "🎯 Phase 4: Monitoring vacuum tasks..."
+echo " Visit: http://localhost:23646/maintenance"
+echo " You should see vacuum tasks appear within 30 seconds"
+echo ""
+
+echo " Waiting 60 seconds for vacuum detection and execution..."
+for i in {60..1}; do
+ printf "\r Countdown: %02d seconds" $i
+ sleep 1
+done
+echo ""
+echo ""
+
+# Phase 5: Check results
+echo "📈 Phase 5: Checking results after vacuum..."
+go run create_vacuum_test_data.go -master=$MASTER_URL -files=0
+echo ""
+
+# Phase 6: Create more garbage for continuous testing
+echo "🔄 Phase 6: Creating additional garbage for continuous testing..."
+echo " Running 3 rounds of garbage creation..."
+
+for round in {1..3}; do
+ echo " Round $round: Creating garbage..."
+ go run create_vacuum_test_data.go -master=$MASTER_URL -files=8 -delete=0.6 -size=100
+ echo " Waiting 30 seconds before next round..."
+ sleep 30
+done
+
+echo ""
+echo "📊 Final volume status:"
+go run create_vacuum_test_data.go -master=$MASTER_URL -files=0
+echo ""
+
+echo "🎉 Demo Complete!"
+echo ""
+echo "🔍 Things to check:"
+echo " 1. Maintenance Queue: http://localhost:23646/maintenance"
+echo " 2. Volume Status: http://localhost:9333/vol/status"
+echo " 3. Admin Dashboard: http://localhost:23646"
+echo ""
+echo "💡 Next Steps:"
+echo " - Try different garbage thresholds (0.10, 0.50, 0.80)"
+echo " - Adjust scan intervals (10s, 1m, 5m)"
+echo " - Monitor logs for vacuum operations"
+echo " - Test with multiple volumes"
+echo "" \ No newline at end of file
diff --git a/docker/admin_integration/docker-compose-ec-test.yml b/docker/admin_integration/docker-compose-ec-test.yml
new file mode 100644
index 000000000..197c9bda5
--- /dev/null
+++ b/docker/admin_integration/docker-compose-ec-test.yml
@@ -0,0 +1,240 @@
+name: admin_integration
+
+networks:
+ seaweed_net:
+ driver: bridge
+
+services:
+ master:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - "9333:9333"
+ - "19333:19333"
+ command: "master -ip=master -mdir=/data -volumeSizeLimitMB=50"
+ environment:
+ - WEED_MASTER_VOLUME_GROWTH_COPY_1=1
+ - WEED_MASTER_VOLUME_GROWTH_COPY_2=2
+ - WEED_MASTER_VOLUME_GROWTH_COPY_OTHER=1
+ volumes:
+ - ./data/master:/data
+ networks:
+ - seaweed_net
+
+ volume1:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - "8080:8080"
+ - "18080:18080"
+ command: "volume -mserver=master:9333 -ip=volume1 -dir=/data -max=10"
+ depends_on:
+ - master
+ volumes:
+ - ./data/volume1:/data
+ networks:
+ - seaweed_net
+
+ volume2:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - "8081:8080"
+ - "18081:18080"
+ command: "volume -mserver=master:9333 -ip=volume2 -dir=/data -max=10"
+ depends_on:
+ - master
+ volumes:
+ - ./data/volume2:/data
+ networks:
+ - seaweed_net
+
+ volume3:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - "8082:8080"
+ - "18082:18080"
+ command: "volume -mserver=master:9333 -ip=volume3 -dir=/data -max=10"
+ depends_on:
+ - master
+ volumes:
+ - ./data/volume3:/data
+ networks:
+ - seaweed_net
+
+ volume4:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - "8083:8080"
+ - "18083:18080"
+ command: "volume -mserver=master:9333 -ip=volume4 -dir=/data -max=10"
+ depends_on:
+ - master
+ volumes:
+ - ./data/volume4:/data
+ networks:
+ - seaweed_net
+
+ volume5:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - "8084:8080"
+ - "18084:18080"
+ command: "volume -mserver=master:9333 -ip=volume5 -dir=/data -max=10"
+ depends_on:
+ - master
+ volumes:
+ - ./data/volume5:/data
+ networks:
+ - seaweed_net
+
+ volume6:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - "8085:8080"
+ - "18085:18080"
+ command: "volume -mserver=master:9333 -ip=volume6 -dir=/data -max=10"
+ depends_on:
+ - master
+ volumes:
+ - ./data/volume6:/data
+ networks:
+ - seaweed_net
+
+ filer:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - "8888:8888"
+ - "18888:18888"
+ command: "filer -master=master:9333 -ip=filer"
+ depends_on:
+ - master
+ volumes:
+ - ./data/filer:/data
+ networks:
+ - seaweed_net
+
+ admin:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - "23646:23646" # HTTP admin interface (default port)
+ - "33646:33646" # gRPC worker communication (23646 + 10000)
+ command: "admin -port=23646 -masters=master:9333 -dataDir=/data"
+ depends_on:
+ - master
+ - filer
+ volumes:
+ - ./data/admin:/data
+ networks:
+ - seaweed_net
+
+ worker1:
+ image: chrislusf/seaweedfs:local
+ command: "-v=2 worker -admin=admin:23646 -capabilities=erasure_coding,vacuum -maxConcurrent=2"
+ depends_on:
+ - admin
+ volumes:
+ - ./data/worker1:/data
+ networks:
+ - seaweed_net
+ environment:
+ - WORKER_ID=worker-1
+
+ worker2:
+ image: chrislusf/seaweedfs:local
+ command: "-v=2 worker -admin=admin:23646 -capabilities=erasure_coding,vacuum -maxConcurrent=2"
+ depends_on:
+ - admin
+ volumes:
+ - ./data/worker2:/data
+ networks:
+ - seaweed_net
+ environment:
+ - WORKER_ID=worker-2
+
+ worker3:
+ image: chrislusf/seaweedfs:local
+ command: "-v=2 worker -admin=admin:23646 -capabilities=erasure_coding,vacuum -maxConcurrent=2"
+ depends_on:
+ - admin
+ volumes:
+ - ./data/worker3:/data
+ networks:
+ - seaweed_net
+ environment:
+ - WORKER_ID=worker-3
+
+ load_generator:
+ image: chrislusf/seaweedfs:local
+ entrypoint: ["/bin/sh"]
+ command: >
+ -c "
+ echo 'Starting load generator...';
+ sleep 30;
+ echo 'Generating continuous load with 50MB volume limit...';
+ while true; do
+ echo 'Writing test files...';
+ echo 'Test file content at $(date)' | /usr/bin/weed upload -server=master:9333;
+ sleep 5;
+ echo 'Deleting some files...';
+ /usr/bin/weed shell -master=master:9333 <<< 'fs.rm /test_file_*' || true;
+ sleep 10;
+ done
+ "
+ depends_on:
+ - master
+ - filer
+ - admin
+ networks:
+ - seaweed_net
+
+ monitor:
+ image: alpine:latest
+ entrypoint: ["/bin/sh"]
+ command: >
+ -c "
+ apk add --no-cache curl jq;
+ echo 'Starting cluster monitor...';
+ sleep 30;
+ while true; do
+ echo '=== Cluster Status $(date) ===';
+ echo 'Master status:';
+ curl -s http://master:9333/cluster/status | jq '.IsLeader, .Peers' || echo 'Master not ready';
+ echo;
+ echo 'Admin status:';
+ curl -s http://admin:23646/ | grep -o 'Admin.*Interface' || echo 'Admin not ready';
+ echo;
+ echo 'Volume count by server:';
+ curl -s http://master:9333/vol/status | jq '.Volumes | length' || echo 'Volumes not ready';
+ echo;
+ sleep 60;
+ done
+ "
+ depends_on:
+ - master
+ - admin
+ - filer
+ networks:
+ - seaweed_net
+
+ vacuum-tester:
+ image: chrislusf/seaweedfs:local
+ entrypoint: ["/bin/sh"]
+ command: >
+ -c "
+ echo 'Installing dependencies for vacuum testing...';
+ apk add --no-cache jq curl go bash;
+ echo 'Vacuum tester ready...';
+ echo 'Use: docker-compose exec vacuum-tester sh';
+ echo 'Available commands: go, weed, curl, jq, bash, sh';
+ sleep infinity
+ "
+ depends_on:
+ - master
+ - admin
+ - filer
+ volumes:
+ - .:/testing
+ working_dir: /testing
+ networks:
+ - seaweed_net
+ environment:
+ - MASTER_HOST=master:9333
+ - ADMIN_HOST=admin:23646 \ No newline at end of file
diff --git a/docker/admin_integration/test-integration.sh b/docker/admin_integration/test-integration.sh
new file mode 100755
index 000000000..b355b1dfd
--- /dev/null
+++ b/docker/admin_integration/test-integration.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+set -e
+
+echo "🧪 Testing SeaweedFS Admin-Worker Integration"
+echo "============================================="
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+cd "$(dirname "$0")"
+
+echo -e "${BLUE}1. Validating docker-compose configuration...${NC}"
+if docker-compose -f docker-compose-ec-test.yml config > /dev/null; then
+ echo -e "${GREEN}✅ Docker compose configuration is valid${NC}"
+else
+ echo -e "${RED}❌ Docker compose configuration is invalid${NC}"
+ exit 1
+fi
+
+echo -e "${BLUE}2. Checking if required ports are available...${NC}"
+for port in 9333 8080 8081 8082 8083 8084 8085 8888 23646; do
+ if lsof -i :$port > /dev/null 2>&1; then
+ echo -e "${YELLOW}⚠️ Port $port is in use${NC}"
+ else
+ echo -e "${GREEN}✅ Port $port is available${NC}"
+ fi
+done
+
+echo -e "${BLUE}3. Testing worker command syntax...${NC}"
+# Test that the worker command in docker-compose has correct syntax
+if docker-compose -f docker-compose-ec-test.yml config | grep -q "workingDir=/work"; then
+ echo -e "${GREEN}✅ Worker working directory option is properly configured${NC}"
+else
+ echo -e "${RED}❌ Worker working directory option is missing${NC}"
+ exit 1
+fi
+
+echo -e "${BLUE}4. Verifying admin server configuration...${NC}"
+if docker-compose -f docker-compose-ec-test.yml config | grep -q "admin:23646"; then
+ echo -e "${GREEN}✅ Admin server port configuration is correct${NC}"
+else
+ echo -e "${RED}❌ Admin server port configuration is incorrect${NC}"
+ exit 1
+fi
+
+echo -e "${BLUE}5. Checking service dependencies...${NC}"
+if docker-compose -f docker-compose-ec-test.yml config | grep -q "depends_on"; then
+ echo -e "${GREEN}✅ Service dependencies are configured${NC}"
+else
+ echo -e "${YELLOW}⚠️ Service dependencies may not be configured${NC}"
+fi
+
+echo ""
+echo -e "${GREEN}🎉 Integration test configuration is ready!${NC}"
+echo ""
+echo -e "${BLUE}To start the integration test:${NC}"
+echo " make start # Start all services"
+echo " make health # Check service health"
+echo " make logs # View logs"
+echo " make stop # Stop all services"
+echo ""
+echo -e "${BLUE}Key features verified:${NC}"
+echo " ✅ Official SeaweedFS images are used"
+echo " ✅ Worker working directories are configured"
+echo " ✅ Admin-worker communication on correct ports"
+echo " ✅ Task-specific directories will be created"
+echo " ✅ Load generator will trigger EC tasks"
+echo " ✅ Monitor will track progress" \ No newline at end of file
diff --git a/weed/admin/config/schema.go b/weed/admin/config/schema.go
new file mode 100644
index 000000000..54fb615f9
--- /dev/null
+++ b/weed/admin/config/schema.go
@@ -0,0 +1,360 @@
+package config
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+)
+
+// ConfigWithDefaults defines an interface for configurations that can apply their own defaults
+type ConfigWithDefaults interface {
+ // ApplySchemaDefaults applies default values using the provided schema
+ ApplySchemaDefaults(schema *Schema) error
+ // Validate validates the configuration
+ Validate() error
+}
+
+// FieldType defines the type of a configuration field
+type FieldType string
+
+const (
+ FieldTypeBool FieldType = "bool"
+ FieldTypeInt FieldType = "int"
+ FieldTypeDuration FieldType = "duration"
+ FieldTypeInterval FieldType = "interval"
+ FieldTypeString FieldType = "string"
+ FieldTypeFloat FieldType = "float"
+)
+
+// FieldUnit defines the unit for display purposes
+type FieldUnit string
+
+const (
+ UnitSeconds FieldUnit = "seconds"
+ UnitMinutes FieldUnit = "minutes"
+ UnitHours FieldUnit = "hours"
+ UnitDays FieldUnit = "days"
+ UnitCount FieldUnit = "count"
+ UnitNone FieldUnit = ""
+)
+
+// Field defines a configuration field with all its metadata
+type Field struct {
+ // Field identification
+ Name string `json:"name"`
+ JSONName string `json:"json_name"`
+ Type FieldType `json:"type"`
+
+ // Default value and validation
+ DefaultValue interface{} `json:"default_value"`
+ MinValue interface{} `json:"min_value,omitempty"`
+ MaxValue interface{} `json:"max_value,omitempty"`
+ Required bool `json:"required"`
+
+ // UI display
+ DisplayName string `json:"display_name"`
+ Description string `json:"description"`
+ HelpText string `json:"help_text"`
+ Placeholder string `json:"placeholder"`
+ Unit FieldUnit `json:"unit"`
+
+ // Form rendering
+ InputType string `json:"input_type"` // "checkbox", "number", "text", "interval", etc.
+ CSSClasses string `json:"css_classes,omitempty"`
+}
+
+// GetDisplayValue returns the value formatted for display in the specified unit
+func (f *Field) GetDisplayValue(value interface{}) interface{} {
+ if (f.Type == FieldTypeDuration || f.Type == FieldTypeInterval) && f.Unit != UnitSeconds {
+ if duration, ok := value.(time.Duration); ok {
+ switch f.Unit {
+ case UnitMinutes:
+ return int(duration.Minutes())
+ case UnitHours:
+ return int(duration.Hours())
+ case UnitDays:
+ return int(duration.Hours() / 24)
+ }
+ }
+ if seconds, ok := value.(int); ok {
+ switch f.Unit {
+ case UnitMinutes:
+ return seconds / 60
+ case UnitHours:
+ return seconds / 3600
+ case UnitDays:
+ return seconds / (24 * 3600)
+ }
+ }
+ }
+ return value
+}
+
+// GetIntervalDisplayValue returns the value and unit for interval fields
+func (f *Field) GetIntervalDisplayValue(value interface{}) (int, string) {
+ if f.Type != FieldTypeInterval {
+ return 0, "minutes"
+ }
+
+ seconds := 0
+ if duration, ok := value.(time.Duration); ok {
+ seconds = int(duration.Seconds())
+ } else if s, ok := value.(int); ok {
+ seconds = s
+ }
+
+ return SecondsToIntervalValueUnit(seconds)
+}
+
+// SecondsToIntervalValueUnit converts seconds to the most appropriate interval unit
+func SecondsToIntervalValueUnit(totalSeconds int) (int, string) {
+ if totalSeconds == 0 {
+ return 0, "minutes"
+ }
+
+ // Check if it's evenly divisible by days
+ if totalSeconds%(24*3600) == 0 {
+ return totalSeconds / (24 * 3600), "days"
+ }
+
+ // Check if it's evenly divisible by hours
+ if totalSeconds%3600 == 0 {
+ return totalSeconds / 3600, "hours"
+ }
+
+ // Default to minutes
+ return totalSeconds / 60, "minutes"
+}
+
+// IntervalValueUnitToSeconds converts interval value and unit to seconds
+func IntervalValueUnitToSeconds(value int, unit string) int {
+ switch unit {
+ case "days":
+ return value * 24 * 3600
+ case "hours":
+ return value * 3600
+ case "minutes":
+ return value * 60
+ default:
+ return value * 60 // Default to minutes
+ }
+}
+
+// ParseDisplayValue converts a display value back to the storage format
+func (f *Field) ParseDisplayValue(displayValue interface{}) interface{} {
+ if (f.Type == FieldTypeDuration || f.Type == FieldTypeInterval) && f.Unit != UnitSeconds {
+ if val, ok := displayValue.(int); ok {
+ switch f.Unit {
+ case UnitMinutes:
+ return val * 60
+ case UnitHours:
+ return val * 3600
+ case UnitDays:
+ return val * 24 * 3600
+ }
+ }
+ }
+ return displayValue
+}
+
+// ParseIntervalFormData parses form data for interval fields (value + unit)
+func (f *Field) ParseIntervalFormData(valueStr, unitStr string) (int, error) {
+ if f.Type != FieldTypeInterval {
+ return 0, fmt.Errorf("field %s is not an interval field", f.Name)
+ }
+
+ value := 0
+ if valueStr != "" {
+ var err error
+ value, err = fmt.Sscanf(valueStr, "%d", &value)
+ if err != nil {
+ return 0, fmt.Errorf("invalid interval value: %s", valueStr)
+ }
+ }
+
+ return IntervalValueUnitToSeconds(value, unitStr), nil
+}
+
+// ValidateValue validates a value against the field constraints
+func (f *Field) ValidateValue(value interface{}) error {
+ if f.Required && (value == nil || value == "" || value == 0) {
+ return fmt.Errorf("%s is required", f.DisplayName)
+ }
+
+ if f.MinValue != nil {
+ if !f.compareValues(value, f.MinValue, ">=") {
+ return fmt.Errorf("%s must be >= %v", f.DisplayName, f.MinValue)
+ }
+ }
+
+ if f.MaxValue != nil {
+ if !f.compareValues(value, f.MaxValue, "<=") {
+ return fmt.Errorf("%s must be <= %v", f.DisplayName, f.MaxValue)
+ }
+ }
+
+ return nil
+}
+
+// compareValues compares two values based on the operator
+func (f *Field) compareValues(a, b interface{}, op string) bool {
+ switch f.Type {
+ case FieldTypeInt:
+ aVal, aOk := a.(int)
+ bVal, bOk := b.(int)
+ if !aOk || !bOk {
+ return false
+ }
+ switch op {
+ case ">=":
+ return aVal >= bVal
+ case "<=":
+ return aVal <= bVal
+ }
+ case FieldTypeFloat:
+ aVal, aOk := a.(float64)
+ bVal, bOk := b.(float64)
+ if !aOk || !bOk {
+ return false
+ }
+ switch op {
+ case ">=":
+ return aVal >= bVal
+ case "<=":
+ return aVal <= bVal
+ }
+ }
+ return true
+}
+
+// Schema provides common functionality for configuration schemas
+type Schema struct {
+ Fields []*Field `json:"fields"`
+}
+
+// GetFieldByName returns a field by its JSON name
+func (s *Schema) GetFieldByName(jsonName string) *Field {
+ for _, field := range s.Fields {
+ if field.JSONName == jsonName {
+ return field
+ }
+ }
+ return nil
+}
+
+// ApplyDefaultsToConfig applies defaults to a configuration that implements ConfigWithDefaults
+func (s *Schema) ApplyDefaultsToConfig(config ConfigWithDefaults) error {
+ return config.ApplySchemaDefaults(s)
+}
+
+// ApplyDefaultsToProtobuf applies defaults to protobuf types using reflection
+func (s *Schema) ApplyDefaultsToProtobuf(config interface{}) error {
+ return s.applyDefaultsReflection(config)
+}
+
+// applyDefaultsReflection applies default values using reflection (internal use only)
+// Used for protobuf types and embedded struct handling
+func (s *Schema) applyDefaultsReflection(config interface{}) error {
+ configValue := reflect.ValueOf(config)
+ if configValue.Kind() == reflect.Ptr {
+ configValue = configValue.Elem()
+ }
+
+ if configValue.Kind() != reflect.Struct {
+ return fmt.Errorf("config must be a struct or pointer to struct")
+ }
+
+ configType := configValue.Type()
+
+ for i := 0; i < configValue.NumField(); i++ {
+ field := configValue.Field(i)
+ fieldType := configType.Field(i)
+
+ // Handle embedded structs recursively (before JSON tag check)
+ if field.Kind() == reflect.Struct && fieldType.Anonymous {
+ if !field.CanAddr() {
+ return fmt.Errorf("embedded struct %s is not addressable - config must be a pointer", fieldType.Name)
+ }
+ err := s.applyDefaultsReflection(field.Addr().Interface())
+ if err != nil {
+ return fmt.Errorf("failed to apply defaults to embedded struct %s: %v", fieldType.Name, err)
+ }
+ continue
+ }
+
+ // Get JSON tag name
+ jsonTag := fieldType.Tag.Get("json")
+ if jsonTag == "" {
+ continue
+ }
+
+ // Remove options like ",omitempty"
+ if commaIdx := strings.Index(jsonTag, ","); commaIdx >= 0 {
+ jsonTag = jsonTag[:commaIdx]
+ }
+
+ // Find corresponding schema field
+ schemaField := s.GetFieldByName(jsonTag)
+ if schemaField == nil {
+ continue
+ }
+
+ // Apply default if field is zero value
+ if field.CanSet() && field.IsZero() {
+ defaultValue := reflect.ValueOf(schemaField.DefaultValue)
+ if defaultValue.Type().ConvertibleTo(field.Type()) {
+ field.Set(defaultValue.Convert(field.Type()))
+ }
+ }
+ }
+
+ return nil
+}
+
+// ValidateConfig validates a configuration against the schema
+func (s *Schema) ValidateConfig(config interface{}) []error {
+ var errors []error
+
+ configValue := reflect.ValueOf(config)
+ if configValue.Kind() == reflect.Ptr {
+ configValue = configValue.Elem()
+ }
+
+ if configValue.Kind() != reflect.Struct {
+ errors = append(errors, fmt.Errorf("config must be a struct or pointer to struct"))
+ return errors
+ }
+
+ configType := configValue.Type()
+
+ for i := 0; i < configValue.NumField(); i++ {
+ field := configValue.Field(i)
+ fieldType := configType.Field(i)
+
+ // Get JSON tag name
+ jsonTag := fieldType.Tag.Get("json")
+ if jsonTag == "" {
+ continue
+ }
+
+ // Remove options like ",omitempty"
+ if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 {
+ jsonTag = jsonTag[:commaIdx]
+ }
+
+ // Find corresponding schema field
+ schemaField := s.GetFieldByName(jsonTag)
+ if schemaField == nil {
+ continue
+ }
+
+ // Validate field value
+ fieldValue := field.Interface()
+ if err := schemaField.ValidateValue(fieldValue); err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ return errors
+}
diff --git a/weed/admin/config/schema_test.go b/weed/admin/config/schema_test.go
new file mode 100644
index 000000000..3d0d74a38
--- /dev/null
+++ b/weed/admin/config/schema_test.go
@@ -0,0 +1,226 @@
+package config
+
+import (
+ "testing"
+)
+
+// Test structs that mirror the actual configuration structure
+type TestBaseConfigForSchema struct {
+ Enabled bool `json:"enabled"`
+ ScanIntervalSeconds int `json:"scan_interval_seconds"`
+ MaxConcurrent int `json:"max_concurrent"`
+}
+
+// ApplySchemaDefaults implements ConfigWithDefaults for test struct
+func (c *TestBaseConfigForSchema) ApplySchemaDefaults(schema *Schema) error {
+ return schema.ApplyDefaultsToProtobuf(c)
+}
+
+// Validate implements ConfigWithDefaults for test struct
+func (c *TestBaseConfigForSchema) Validate() error {
+ return nil
+}
+
+type TestTaskConfigForSchema struct {
+ TestBaseConfigForSchema
+ TaskSpecificField float64 `json:"task_specific_field"`
+ AnotherSpecificField string `json:"another_specific_field"`
+}
+
+// ApplySchemaDefaults implements ConfigWithDefaults for test struct
+func (c *TestTaskConfigForSchema) ApplySchemaDefaults(schema *Schema) error {
+ return schema.ApplyDefaultsToProtobuf(c)
+}
+
+// Validate implements ConfigWithDefaults for test struct
+func (c *TestTaskConfigForSchema) Validate() error {
+ return nil
+}
+
+func createTestSchema() *Schema {
+ return &Schema{
+ Fields: []*Field{
+ {
+ Name: "enabled",
+ JSONName: "enabled",
+ Type: FieldTypeBool,
+ DefaultValue: true,
+ },
+ {
+ Name: "scan_interval_seconds",
+ JSONName: "scan_interval_seconds",
+ Type: FieldTypeInt,
+ DefaultValue: 1800,
+ },
+ {
+ Name: "max_concurrent",
+ JSONName: "max_concurrent",
+ Type: FieldTypeInt,
+ DefaultValue: 3,
+ },
+ {
+ Name: "task_specific_field",
+ JSONName: "task_specific_field",
+ Type: FieldTypeFloat,
+ DefaultValue: 0.25,
+ },
+ {
+ Name: "another_specific_field",
+ JSONName: "another_specific_field",
+ Type: FieldTypeString,
+ DefaultValue: "default_value",
+ },
+ },
+ }
+}
+
+func TestApplyDefaults_WithEmbeddedStruct(t *testing.T) {
+ schema := createTestSchema()
+
+ // Start with zero values
+ config := &TestTaskConfigForSchema{}
+
+ err := schema.ApplyDefaultsToConfig(config)
+ if err != nil {
+ t.Fatalf("ApplyDefaultsToConfig failed: %v", err)
+ }
+
+ // Verify embedded struct fields got default values
+ if config.Enabled != true {
+ t.Errorf("Expected Enabled=true (default), got %v", config.Enabled)
+ }
+
+ if config.ScanIntervalSeconds != 1800 {
+ t.Errorf("Expected ScanIntervalSeconds=1800 (default), got %v", config.ScanIntervalSeconds)
+ }
+
+ if config.MaxConcurrent != 3 {
+ t.Errorf("Expected MaxConcurrent=3 (default), got %v", config.MaxConcurrent)
+ }
+
+ // Verify task-specific fields got default values
+ if config.TaskSpecificField != 0.25 {
+ t.Errorf("Expected TaskSpecificField=0.25 (default), got %v", config.TaskSpecificField)
+ }
+
+ if config.AnotherSpecificField != "default_value" {
+ t.Errorf("Expected AnotherSpecificField='default_value' (default), got %v", config.AnotherSpecificField)
+ }
+}
+
+func TestApplyDefaults_PartiallySet(t *testing.T) {
+ schema := createTestSchema()
+
+ // Start with some pre-set values
+ config := &TestTaskConfigForSchema{
+ TestBaseConfigForSchema: TestBaseConfigForSchema{
+ Enabled: true, // Non-zero value, should not be overridden
+ ScanIntervalSeconds: 0, // Should get default
+ MaxConcurrent: 5, // Non-zero value, should not be overridden
+ },
+ TaskSpecificField: 0.0, // Should get default
+ AnotherSpecificField: "custom", // Non-zero value, should not be overridden
+ }
+
+ err := schema.ApplyDefaultsToConfig(config)
+ if err != nil {
+ t.Fatalf("ApplyDefaultsToConfig failed: %v", err)
+ }
+
+ // Verify already-set values are preserved
+ if config.Enabled != true {
+ t.Errorf("Expected Enabled=true (pre-set), got %v", config.Enabled)
+ }
+
+ if config.MaxConcurrent != 5 {
+ t.Errorf("Expected MaxConcurrent=5 (pre-set), got %v", config.MaxConcurrent)
+ }
+
+ if config.AnotherSpecificField != "custom" {
+ t.Errorf("Expected AnotherSpecificField='custom' (pre-set), got %v", config.AnotherSpecificField)
+ }
+
+ // Verify zero values got defaults
+ if config.ScanIntervalSeconds != 1800 {
+ t.Errorf("Expected ScanIntervalSeconds=1800 (default), got %v", config.ScanIntervalSeconds)
+ }
+
+ if config.TaskSpecificField != 0.25 {
+ t.Errorf("Expected TaskSpecificField=0.25 (default), got %v", config.TaskSpecificField)
+ }
+}
+
+func TestApplyDefaults_NonPointer(t *testing.T) {
+ schema := createTestSchema()
+ config := TestTaskConfigForSchema{}
+ // This should fail since we need a pointer to modify the struct
+ err := schema.ApplyDefaultsToProtobuf(config)
+ if err == nil {
+ t.Fatal("Expected error for non-pointer config, but got nil")
+ }
+}
+
+func TestApplyDefaults_NonStruct(t *testing.T) {
+ schema := createTestSchema()
+ var config interface{} = "not a struct"
+ err := schema.ApplyDefaultsToProtobuf(config)
+ if err == nil {
+ t.Fatal("Expected error for non-struct config, but got nil")
+ }
+}
+
+func TestApplyDefaults_EmptySchema(t *testing.T) {
+ schema := &Schema{Fields: []*Field{}}
+ config := &TestTaskConfigForSchema{}
+
+ err := schema.ApplyDefaultsToConfig(config)
+ if err != nil {
+ t.Fatalf("ApplyDefaultsToConfig failed for empty schema: %v", err)
+ }
+
+ // All fields should remain at zero values since no defaults are defined
+ if config.Enabled != false {
+ t.Errorf("Expected Enabled=false (zero value), got %v", config.Enabled)
+ }
+}
+
+func TestApplyDefaults_MissingSchemaField(t *testing.T) {
+ // Schema with fewer fields than the struct
+ schema := &Schema{
+ Fields: []*Field{
+ {
+ Name: "enabled",
+ JSONName: "enabled",
+ Type: FieldTypeBool,
+ DefaultValue: true,
+ },
+ // Note: missing scan_interval_seconds and other fields
+ },
+ }
+
+ config := &TestTaskConfigForSchema{}
+ err := schema.ApplyDefaultsToConfig(config)
+ if err != nil {
+ t.Fatalf("ApplyDefaultsToConfig failed: %v", err)
+ }
+
+ // Only the field with a schema definition should get a default
+ if config.Enabled != true {
+ t.Errorf("Expected Enabled=true (has schema), got %v", config.Enabled)
+ }
+
+ // Fields without schema should remain at zero values
+ if config.ScanIntervalSeconds != 0 {
+ t.Errorf("Expected ScanIntervalSeconds=0 (no schema), got %v", config.ScanIntervalSeconds)
+ }
+}
+
+func BenchmarkApplyDefaults(b *testing.B) {
+ schema := createTestSchema()
+ config := &TestTaskConfigForSchema{}
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = schema.ApplyDefaultsToConfig(config)
+ }
+}
diff --git a/weed/admin/dash/admin_server.go b/weed/admin/dash/admin_server.go
index 6ebade19f..376f3edc7 100644
--- a/weed/admin/dash/admin_server.go
+++ b/weed/admin/dash/admin_server.go
@@ -25,6 +25,7 @@ import (
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/s3api"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks"
)
type AdminServer struct {
@@ -126,30 +127,67 @@ func NewAdminServer(masters string, templateFS http.FileSystem, dataDir string)
}
}
- // Initialize maintenance system with persistent configuration
+ // Initialize maintenance system - always initialize even without persistent storage
+ var maintenanceConfig *maintenance.MaintenanceConfig
if server.configPersistence.IsConfigured() {
- maintenanceConfig, err := server.configPersistence.LoadMaintenanceConfig()
+ var err error
+ maintenanceConfig, err = server.configPersistence.LoadMaintenanceConfig()
if err != nil {
glog.Errorf("Failed to load maintenance configuration: %v", err)
maintenanceConfig = maintenance.DefaultMaintenanceConfig()
}
- server.InitMaintenanceManager(maintenanceConfig)
- // Start maintenance manager if enabled
- if maintenanceConfig.Enabled {
- go func() {
- if err := server.StartMaintenanceManager(); err != nil {
- glog.Errorf("Failed to start maintenance manager: %v", err)
- }
- }()
+ // Apply new defaults to handle schema changes (like enabling by default)
+ schema := maintenance.GetMaintenanceConfigSchema()
+ if err := schema.ApplyDefaultsToProtobuf(maintenanceConfig); err != nil {
+ glog.Warningf("Failed to apply schema defaults to loaded config: %v", err)
+ }
+
+ // Force enable maintenance system for new default behavior
+ // This handles the case where old configs had Enabled=false as default
+ if !maintenanceConfig.Enabled {
+ glog.V(1).Infof("Enabling maintenance system (new default behavior)")
+ maintenanceConfig.Enabled = true
}
+
+ glog.V(1).Infof("Maintenance system initialized with persistent configuration (enabled: %v)", maintenanceConfig.Enabled)
} else {
- glog.V(1).Infof("No data directory configured, maintenance system will run in memory-only mode")
+ maintenanceConfig = maintenance.DefaultMaintenanceConfig()
+ glog.V(1).Infof("No data directory configured, maintenance system will run in memory-only mode (enabled: %v)", maintenanceConfig.Enabled)
+ }
+
+ // Always initialize maintenance manager
+ server.InitMaintenanceManager(maintenanceConfig)
+
+ // Load saved task configurations from persistence
+ server.loadTaskConfigurationsFromPersistence()
+
+ // Start maintenance manager if enabled
+ if maintenanceConfig.Enabled {
+ go func() {
+ // Give master client a bit of time to connect before starting scans
+ time.Sleep(2 * time.Second)
+ if err := server.StartMaintenanceManager(); err != nil {
+ glog.Errorf("Failed to start maintenance manager: %v", err)
+ }
+ }()
}
return server
}
+// loadTaskConfigurationsFromPersistence loads saved task configurations from protobuf files
+func (s *AdminServer) loadTaskConfigurationsFromPersistence() {
+ if s.configPersistence == nil || !s.configPersistence.IsConfigured() {
+ glog.V(1).Infof("Config persistence not available, using default task configurations")
+ return
+ }
+
+ // Load task configurations dynamically using the config update registry
+ configUpdateRegistry := tasks.GetGlobalConfigUpdateRegistry()
+ configUpdateRegistry.UpdateAllConfigs(s.configPersistence)
+}
+
// GetCredentialManager returns the credential manager
func (s *AdminServer) GetCredentialManager() *credential.CredentialManager {
return s.credentialManager
@@ -852,6 +890,15 @@ func (as *AdminServer) CancelMaintenanceTask(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"success": true, "message": "Task cancelled"})
}
+// cancelMaintenanceTask cancels a pending maintenance task
+func (as *AdminServer) cancelMaintenanceTask(taskID string) error {
+ if as.maintenanceManager == nil {
+ return fmt.Errorf("maintenance manager not initialized")
+ }
+
+ return as.maintenanceManager.CancelTask(taskID)
+}
+
// GetMaintenanceWorkersAPI returns all maintenance workers
func (as *AdminServer) GetMaintenanceWorkersAPI(c *gin.Context) {
workers, err := as.getMaintenanceWorkers()
@@ -899,13 +946,21 @@ func (as *AdminServer) GetMaintenanceConfigAPI(c *gin.Context) {
// UpdateMaintenanceConfigAPI updates maintenance configuration via API
func (as *AdminServer) UpdateMaintenanceConfigAPI(c *gin.Context) {
- var config MaintenanceConfig
- if err := c.ShouldBindJSON(&config); err != nil {
+ // Parse JSON into a generic map first to handle type conversions
+ var jsonConfig map[string]interface{}
+ if err := c.ShouldBindJSON(&jsonConfig); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
- err := as.updateMaintenanceConfig(&config)
+ // Convert JSON map to protobuf configuration
+ config, err := convertJSONToMaintenanceConfig(jsonConfig)
+ if err != nil {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse configuration: " + err.Error()})
+ return
+ }
+
+ err = as.updateMaintenanceConfig(config)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
@@ -951,17 +1006,36 @@ func (as *AdminServer) getMaintenanceQueueData() (*maintenance.MaintenanceQueueD
}, nil
}
+// GetMaintenanceQueueStats returns statistics for the maintenance queue (exported for handlers)
+func (as *AdminServer) GetMaintenanceQueueStats() (*maintenance.QueueStats, error) {
+ return as.getMaintenanceQueueStats()
+}
+
// getMaintenanceQueueStats returns statistics for the maintenance queue
func (as *AdminServer) getMaintenanceQueueStats() (*maintenance.QueueStats, error) {
- // This would integrate with the maintenance queue to get real statistics
- // For now, return mock data
- return &maintenance.QueueStats{
- PendingTasks: 5,
- RunningTasks: 2,
- CompletedToday: 15,
- FailedToday: 1,
- TotalTasks: 23,
- }, nil
+ if as.maintenanceManager == nil {
+ return &maintenance.QueueStats{
+ PendingTasks: 0,
+ RunningTasks: 0,
+ CompletedToday: 0,
+ FailedToday: 0,
+ TotalTasks: 0,
+ }, nil
+ }
+
+ // Get real statistics from maintenance manager
+ stats := as.maintenanceManager.GetStats()
+
+ // Convert MaintenanceStats to QueueStats
+ queueStats := &maintenance.QueueStats{
+ PendingTasks: stats.TasksByStatus[maintenance.TaskStatusPending],
+ RunningTasks: stats.TasksByStatus[maintenance.TaskStatusAssigned] + stats.TasksByStatus[maintenance.TaskStatusInProgress],
+ CompletedToday: stats.CompletedToday,
+ FailedToday: stats.FailedToday,
+ TotalTasks: stats.TotalTasks,
+ }
+
+ return queueStats, nil
}
// getMaintenanceTasks returns all maintenance tasks
@@ -1000,15 +1074,6 @@ func (as *AdminServer) getMaintenanceTask(taskID string) (*MaintenanceTask, erro
return nil, fmt.Errorf("task %s not found", taskID)
}
-// cancelMaintenanceTask cancels a pending maintenance task
-func (as *AdminServer) cancelMaintenanceTask(taskID string) error {
- if as.maintenanceManager == nil {
- return fmt.Errorf("maintenance manager not initialized")
- }
-
- return as.maintenanceManager.CancelTask(taskID)
-}
-
// getMaintenanceWorkers returns all maintenance workers
func (as *AdminServer) getMaintenanceWorkers() ([]*maintenance.MaintenanceWorker, error) {
if as.maintenanceManager == nil {
@@ -1110,11 +1175,14 @@ func (as *AdminServer) getMaintenanceConfig() (*maintenance.MaintenanceConfigDat
// Load configuration from persistent storage
config, err := as.configPersistence.LoadMaintenanceConfig()
if err != nil {
- glog.Errorf("Failed to load maintenance configuration: %v", err)
// Fallback to default configuration
- config = DefaultMaintenanceConfig()
+ config = maintenance.DefaultMaintenanceConfig()
}
+ // Note: Do NOT apply schema defaults to existing config as it overrides saved values
+ // Only apply defaults when creating new configs or handling fallback cases
+ // The schema defaults should only be used in the UI for new installations
+
// Get system stats from maintenance manager if available
var systemStats *MaintenanceStats
if as.maintenanceManager != nil {
@@ -1139,18 +1207,25 @@ func (as *AdminServer) getMaintenanceConfig() (*maintenance.MaintenanceConfigDat
}
}
- return &MaintenanceConfigData{
+ configData := &MaintenanceConfigData{
Config: config,
IsEnabled: config.Enabled,
LastScanTime: systemStats.LastScanTime,
NextScanTime: systemStats.NextScanTime,
SystemStats: systemStats,
MenuItems: maintenance.BuildMaintenanceMenuItems(),
- }, nil
+ }
+
+ return configData, nil
}
// updateMaintenanceConfig updates maintenance configuration
func (as *AdminServer) updateMaintenanceConfig(config *maintenance.MaintenanceConfig) error {
+ // Use ConfigField validation instead of standalone validation
+ if err := maintenance.ValidateMaintenanceConfigWithSchema(config); err != nil {
+ return fmt.Errorf("configuration validation failed: %v", err)
+ }
+
// Save configuration to persistent storage
if err := as.configPersistence.SaveMaintenanceConfig(config); err != nil {
return fmt.Errorf("failed to save maintenance configuration: %w", err)
@@ -1175,7 +1250,14 @@ func (as *AdminServer) triggerMaintenanceScan() error {
return fmt.Errorf("maintenance manager not initialized")
}
- return as.maintenanceManager.TriggerScan()
+ glog.V(1).Infof("Triggering maintenance scan")
+ err := as.maintenanceManager.TriggerScan()
+ if err != nil {
+ glog.Errorf("Failed to trigger maintenance scan: %v", err)
+ return err
+ }
+ glog.V(1).Infof("Maintenance scan triggered successfully")
+ return nil
}
// TriggerTopicRetentionPurgeAPI triggers topic retention purge via HTTP API
@@ -1265,14 +1347,11 @@ func (as *AdminServer) GetMaintenanceWorkersData() (*MaintenanceWorkersData, err
}
// StartWorkerGrpcServer starts the worker gRPC server
-func (s *AdminServer) StartWorkerGrpcServer(httpPort int) error {
+func (s *AdminServer) StartWorkerGrpcServer(grpcPort int) error {
if s.workerGrpcServer != nil {
return fmt.Errorf("worker gRPC server is already running")
}
- // Calculate gRPC port (HTTP port + 10000)
- grpcPort := httpPort + 10000
-
s.workerGrpcServer = NewWorkerGrpcServer(s)
return s.workerGrpcServer.StartWithTLS(grpcPort)
}
@@ -1412,7 +1491,7 @@ func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool,
}
// Create gRPC connection
- conn, err := grpc.Dial(brokerAddress, s.grpcDialOption)
+ conn, err := grpc.NewClient(brokerAddress, s.grpcDialOption)
if err != nil {
return fmt.Errorf("failed to connect to broker: %w", err)
}
@@ -1501,3 +1580,161 @@ func extractVersioningFromEntry(entry *filer_pb.Entry) bool {
enabled, _ := s3api.LoadVersioningFromExtended(entry)
return enabled
}
+
+// GetConfigPersistence returns the config persistence manager
+func (as *AdminServer) GetConfigPersistence() *ConfigPersistence {
+ return as.configPersistence
+}
+
+// convertJSONToMaintenanceConfig converts JSON map to protobuf MaintenanceConfig
+func convertJSONToMaintenanceConfig(jsonConfig map[string]interface{}) (*maintenance.MaintenanceConfig, error) {
+ config := &maintenance.MaintenanceConfig{}
+
+ // Helper function to get int32 from interface{}
+ getInt32 := func(key string) (int32, error) {
+ if val, ok := jsonConfig[key]; ok {
+ switch v := val.(type) {
+ case int:
+ return int32(v), nil
+ case int32:
+ return v, nil
+ case int64:
+ return int32(v), nil
+ case float64:
+ return int32(v), nil
+ default:
+ return 0, fmt.Errorf("invalid type for %s: expected number, got %T", key, v)
+ }
+ }
+ return 0, nil
+ }
+
+ // Helper function to get bool from interface{}
+ getBool := func(key string) bool {
+ if val, ok := jsonConfig[key]; ok {
+ if b, ok := val.(bool); ok {
+ return b
+ }
+ }
+ return false
+ }
+
+ var err error
+
+ // Convert basic fields
+ config.Enabled = getBool("enabled")
+
+ if config.ScanIntervalSeconds, err = getInt32("scan_interval_seconds"); err != nil {
+ return nil, err
+ }
+ if config.WorkerTimeoutSeconds, err = getInt32("worker_timeout_seconds"); err != nil {
+ return nil, err
+ }
+ if config.TaskTimeoutSeconds, err = getInt32("task_timeout_seconds"); err != nil {
+ return nil, err
+ }
+ if config.RetryDelaySeconds, err = getInt32("retry_delay_seconds"); err != nil {
+ return nil, err
+ }
+ if config.MaxRetries, err = getInt32("max_retries"); err != nil {
+ return nil, err
+ }
+ if config.CleanupIntervalSeconds, err = getInt32("cleanup_interval_seconds"); err != nil {
+ return nil, err
+ }
+ if config.TaskRetentionSeconds, err = getInt32("task_retention_seconds"); err != nil {
+ return nil, err
+ }
+
+ // Convert policy if present
+ if policyData, ok := jsonConfig["policy"]; ok {
+ if policyMap, ok := policyData.(map[string]interface{}); ok {
+ policy := &maintenance.MaintenancePolicy{}
+
+ if globalMaxConcurrent, err := getInt32FromMap(policyMap, "global_max_concurrent"); err != nil {
+ return nil, err
+ } else {
+ policy.GlobalMaxConcurrent = globalMaxConcurrent
+ }
+
+ if defaultRepeatIntervalSeconds, err := getInt32FromMap(policyMap, "default_repeat_interval_seconds"); err != nil {
+ return nil, err
+ } else {
+ policy.DefaultRepeatIntervalSeconds = defaultRepeatIntervalSeconds
+ }
+
+ if defaultCheckIntervalSeconds, err := getInt32FromMap(policyMap, "default_check_interval_seconds"); err != nil {
+ return nil, err
+ } else {
+ policy.DefaultCheckIntervalSeconds = defaultCheckIntervalSeconds
+ }
+
+ // Convert task policies if present
+ if taskPoliciesData, ok := policyMap["task_policies"]; ok {
+ if taskPoliciesMap, ok := taskPoliciesData.(map[string]interface{}); ok {
+ policy.TaskPolicies = make(map[string]*maintenance.TaskPolicy)
+
+ for taskType, taskPolicyData := range taskPoliciesMap {
+ if taskPolicyMap, ok := taskPolicyData.(map[string]interface{}); ok {
+ taskPolicy := &maintenance.TaskPolicy{}
+
+ taskPolicy.Enabled = getBoolFromMap(taskPolicyMap, "enabled")
+
+ if maxConcurrent, err := getInt32FromMap(taskPolicyMap, "max_concurrent"); err != nil {
+ return nil, err
+ } else {
+ taskPolicy.MaxConcurrent = maxConcurrent
+ }
+
+ if repeatIntervalSeconds, err := getInt32FromMap(taskPolicyMap, "repeat_interval_seconds"); err != nil {
+ return nil, err
+ } else {
+ taskPolicy.RepeatIntervalSeconds = repeatIntervalSeconds
+ }
+
+ if checkIntervalSeconds, err := getInt32FromMap(taskPolicyMap, "check_interval_seconds"); err != nil {
+ return nil, err
+ } else {
+ taskPolicy.CheckIntervalSeconds = checkIntervalSeconds
+ }
+
+ policy.TaskPolicies[taskType] = taskPolicy
+ }
+ }
+ }
+ }
+
+ config.Policy = policy
+ }
+ }
+
+ return config, nil
+}
+
+// Helper functions for map conversion
+func getInt32FromMap(m map[string]interface{}, key string) (int32, error) {
+ if val, ok := m[key]; ok {
+ switch v := val.(type) {
+ case int:
+ return int32(v), nil
+ case int32:
+ return v, nil
+ case int64:
+ return int32(v), nil
+ case float64:
+ return int32(v), nil
+ default:
+ return 0, fmt.Errorf("invalid type for %s: expected number, got %T", key, v)
+ }
+ }
+ return 0, nil
+}
+
+func getBoolFromMap(m map[string]interface{}, key string) bool {
+ if val, ok := m[key]; ok {
+ if b, ok := val.(bool); ok {
+ return b
+ }
+ }
+ return false
+}
diff --git a/weed/admin/dash/collection_management.go b/weed/admin/dash/collection_management.go
index a70c82918..03c1e452b 100644
--- a/weed/admin/dash/collection_management.go
+++ b/weed/admin/dash/collection_management.go
@@ -12,6 +12,7 @@ import (
func (s *AdminServer) GetClusterCollections() (*ClusterCollectionsData, error) {
var collections []CollectionInfo
var totalVolumes int
+ var totalEcVolumes int
var totalFiles int64
var totalSize int64
collectionMap := make(map[string]*CollectionInfo)
@@ -28,6 +29,7 @@ func (s *AdminServer) GetClusterCollections() (*ClusterCollectionsData, error) {
for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos {
for _, diskInfo := range node.DiskInfos {
+ // Process regular volumes
for _, volInfo := range diskInfo.VolumeInfos {
// Extract collection name from volume info
collectionName := volInfo.Collection
@@ -69,12 +71,13 @@ func (s *AdminServer) GetClusterCollections() (*ClusterCollectionsData, error) {
totalSize += int64(volInfo.Size)
} else {
newCollection := CollectionInfo{
- Name: collectionName,
- DataCenter: dc.Id,
- VolumeCount: 1,
- FileCount: int64(volInfo.FileCount),
- TotalSize: int64(volInfo.Size),
- DiskTypes: []string{diskType},
+ Name: collectionName,
+ DataCenter: dc.Id,
+ VolumeCount: 1,
+ EcVolumeCount: 0,
+ FileCount: int64(volInfo.FileCount),
+ TotalSize: int64(volInfo.Size),
+ DiskTypes: []string{diskType},
}
collectionMap[collectionName] = &newCollection
totalVolumes++
@@ -82,6 +85,63 @@ func (s *AdminServer) GetClusterCollections() (*ClusterCollectionsData, error) {
totalSize += int64(volInfo.Size)
}
}
+
+ // Process EC volumes
+ ecVolumeMap := make(map[uint32]bool) // Track unique EC volumes to avoid double counting
+ for _, ecShardInfo := range diskInfo.EcShardInfos {
+ // Extract collection name from EC shard info
+ collectionName := ecShardInfo.Collection
+ if collectionName == "" {
+ collectionName = "default" // Default collection for EC volumes without explicit collection
+ }
+
+ // Only count each EC volume once (not per shard)
+ if !ecVolumeMap[ecShardInfo.Id] {
+ ecVolumeMap[ecShardInfo.Id] = true
+
+ // Get disk type from disk info, default to hdd if empty
+ diskType := diskInfo.Type
+ if diskType == "" {
+ diskType = "hdd"
+ }
+
+ // Get or create collection info
+ if collection, exists := collectionMap[collectionName]; exists {
+ collection.EcVolumeCount++
+
+ // Update data center if this collection spans multiple DCs
+ if collection.DataCenter != dc.Id && collection.DataCenter != "multi" {
+ collection.DataCenter = "multi"
+ }
+
+ // Add disk type if not already present
+ diskTypeExists := false
+ for _, existingDiskType := range collection.DiskTypes {
+ if existingDiskType == diskType {
+ diskTypeExists = true
+ break
+ }
+ }
+ if !diskTypeExists {
+ collection.DiskTypes = append(collection.DiskTypes, diskType)
+ }
+
+ totalEcVolumes++
+ } else {
+ newCollection := CollectionInfo{
+ Name: collectionName,
+ DataCenter: dc.Id,
+ VolumeCount: 0,
+ EcVolumeCount: 1,
+ FileCount: 0,
+ TotalSize: 0,
+ DiskTypes: []string{diskType},
+ }
+ collectionMap[collectionName] = &newCollection
+ totalEcVolumes++
+ }
+ }
+ }
}
}
}
@@ -112,6 +172,7 @@ func (s *AdminServer) GetClusterCollections() (*ClusterCollectionsData, error) {
Collections: []CollectionInfo{},
TotalCollections: 0,
TotalVolumes: 0,
+ TotalEcVolumes: 0,
TotalFiles: 0,
TotalSize: 0,
LastUpdated: time.Now(),
@@ -122,8 +183,203 @@ func (s *AdminServer) GetClusterCollections() (*ClusterCollectionsData, error) {
Collections: collections,
TotalCollections: len(collections),
TotalVolumes: totalVolumes,
+ TotalEcVolumes: totalEcVolumes,
TotalFiles: totalFiles,
TotalSize: totalSize,
LastUpdated: time.Now(),
}, nil
}
+
+// GetCollectionDetails retrieves detailed information for a specific collection including volumes and EC volumes
+func (s *AdminServer) GetCollectionDetails(collectionName string, page int, pageSize int, sortBy string, sortOrder string) (*CollectionDetailsData, error) {
+ // Set defaults
+ if page < 1 {
+ page = 1
+ }
+ if pageSize < 1 || pageSize > 1000 {
+ pageSize = 25
+ }
+ if sortBy == "" {
+ sortBy = "volume_id"
+ }
+ if sortOrder == "" {
+ sortOrder = "asc"
+ }
+
+ var regularVolumes []VolumeWithTopology
+ var ecVolumes []EcVolumeWithShards
+ var totalFiles int64
+ var totalSize int64
+ dataCenters := make(map[string]bool)
+ diskTypes := make(map[string]bool)
+
+ // Get regular volumes for this collection
+ regularVolumeData, err := s.GetClusterVolumes(1, 10000, "volume_id", "asc", collectionName) // Get all volumes
+ if err != nil {
+ return nil, err
+ }
+
+ regularVolumes = regularVolumeData.Volumes
+ totalSize = regularVolumeData.TotalSize
+
+ // Calculate total files from regular volumes
+ for _, vol := range regularVolumes {
+ totalFiles += int64(vol.FileCount)
+ }
+
+ // Collect data centers and disk types from regular volumes
+ for _, vol := range regularVolumes {
+ dataCenters[vol.DataCenter] = true
+ diskTypes[vol.DiskType] = true
+ }
+
+ // Get EC volumes for this collection
+ ecVolumeData, err := s.GetClusterEcVolumes(1, 10000, "volume_id", "asc", collectionName) // Get all EC volumes
+ if err != nil {
+ return nil, err
+ }
+
+ ecVolumes = ecVolumeData.EcVolumes
+
+ // Collect data centers from EC volumes
+ for _, ecVol := range ecVolumes {
+ for _, dc := range ecVol.DataCenters {
+ dataCenters[dc] = true
+ }
+ }
+
+ // Combine all volumes for sorting and pagination
+ type VolumeForSorting struct {
+ Type string // "regular" or "ec"
+ RegularVolume *VolumeWithTopology
+ EcVolume *EcVolumeWithShards
+ }
+
+ var allVolumes []VolumeForSorting
+ for i := range regularVolumes {
+ allVolumes = append(allVolumes, VolumeForSorting{
+ Type: "regular",
+ RegularVolume: &regularVolumes[i],
+ })
+ }
+ for i := range ecVolumes {
+ allVolumes = append(allVolumes, VolumeForSorting{
+ Type: "ec",
+ EcVolume: &ecVolumes[i],
+ })
+ }
+
+ // Sort all volumes
+ sort.Slice(allVolumes, func(i, j int) bool {
+ var less bool
+ switch sortBy {
+ case "volume_id":
+ var idI, idJ uint32
+ if allVolumes[i].Type == "regular" {
+ idI = allVolumes[i].RegularVolume.Id
+ } else {
+ idI = allVolumes[i].EcVolume.VolumeID
+ }
+ if allVolumes[j].Type == "regular" {
+ idJ = allVolumes[j].RegularVolume.Id
+ } else {
+ idJ = allVolumes[j].EcVolume.VolumeID
+ }
+ less = idI < idJ
+ case "type":
+ // Sort by type first (regular before ec), then by volume ID
+ if allVolumes[i].Type == allVolumes[j].Type {
+ var idI, idJ uint32
+ if allVolumes[i].Type == "regular" {
+ idI = allVolumes[i].RegularVolume.Id
+ } else {
+ idI = allVolumes[i].EcVolume.VolumeID
+ }
+ if allVolumes[j].Type == "regular" {
+ idJ = allVolumes[j].RegularVolume.Id
+ } else {
+ idJ = allVolumes[j].EcVolume.VolumeID
+ }
+ less = idI < idJ
+ } else {
+ less = allVolumes[i].Type < allVolumes[j].Type // "ec" < "regular"
+ }
+ default:
+ // Default to volume ID sort
+ var idI, idJ uint32
+ if allVolumes[i].Type == "regular" {
+ idI = allVolumes[i].RegularVolume.Id
+ } else {
+ idI = allVolumes[i].EcVolume.VolumeID
+ }
+ if allVolumes[j].Type == "regular" {
+ idJ = allVolumes[j].RegularVolume.Id
+ } else {
+ idJ = allVolumes[j].EcVolume.VolumeID
+ }
+ less = idI < idJ
+ }
+
+ if sortOrder == "desc" {
+ return !less
+ }
+ return less
+ })
+
+ // Apply pagination
+ totalVolumesAndEc := len(allVolumes)
+ totalPages := (totalVolumesAndEc + pageSize - 1) / pageSize
+ startIndex := (page - 1) * pageSize
+ endIndex := startIndex + pageSize
+ if endIndex > totalVolumesAndEc {
+ endIndex = totalVolumesAndEc
+ }
+
+ if startIndex >= totalVolumesAndEc {
+ startIndex = 0
+ endIndex = 0
+ }
+
+ // Extract paginated results
+ var paginatedRegularVolumes []VolumeWithTopology
+ var paginatedEcVolumes []EcVolumeWithShards
+
+ for i := startIndex; i < endIndex; i++ {
+ if allVolumes[i].Type == "regular" {
+ paginatedRegularVolumes = append(paginatedRegularVolumes, *allVolumes[i].RegularVolume)
+ } else {
+ paginatedEcVolumes = append(paginatedEcVolumes, *allVolumes[i].EcVolume)
+ }
+ }
+
+ // Convert maps to slices
+ var dcList []string
+ for dc := range dataCenters {
+ dcList = append(dcList, dc)
+ }
+ sort.Strings(dcList)
+
+ var diskTypeList []string
+ for diskType := range diskTypes {
+ diskTypeList = append(diskTypeList, diskType)
+ }
+ sort.Strings(diskTypeList)
+
+ return &CollectionDetailsData{
+ CollectionName: collectionName,
+ RegularVolumes: paginatedRegularVolumes,
+ EcVolumes: paginatedEcVolumes,
+ TotalVolumes: len(regularVolumes),
+ TotalEcVolumes: len(ecVolumes),
+ TotalFiles: totalFiles,
+ TotalSize: totalSize,
+ DataCenters: dcList,
+ DiskTypes: diskTypeList,
+ LastUpdated: time.Now(),
+ Page: page,
+ PageSize: pageSize,
+ TotalPages: totalPages,
+ SortBy: sortBy,
+ SortOrder: sortOrder,
+ }, nil
+}
diff --git a/weed/admin/dash/config_persistence.go b/weed/admin/dash/config_persistence.go
index a2f74f4e7..b6b3074ab 100644
--- a/weed/admin/dash/config_persistence.go
+++ b/weed/admin/dash/config_persistence.go
@@ -1,23 +1,50 @@
package dash
import (
- "encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
)
const (
- // Configuration file names
- MaintenanceConfigFile = "maintenance.json"
- AdminConfigFile = "admin.json"
+ // Configuration subdirectory
+ ConfigSubdir = "conf"
+
+ // Configuration file names (protobuf binary)
+ MaintenanceConfigFile = "maintenance.pb"
+ VacuumTaskConfigFile = "task_vacuum.pb"
+ ECTaskConfigFile = "task_erasure_coding.pb"
+ BalanceTaskConfigFile = "task_balance.pb"
+ ReplicationTaskConfigFile = "task_replication.pb"
+
+ // JSON reference files
+ MaintenanceConfigJSONFile = "maintenance.json"
+ VacuumTaskConfigJSONFile = "task_vacuum.json"
+ ECTaskConfigJSONFile = "task_erasure_coding.json"
+ BalanceTaskConfigJSONFile = "task_balance.json"
+ ReplicationTaskConfigJSONFile = "task_replication.json"
+
ConfigDirPermissions = 0755
ConfigFilePermissions = 0644
)
+// Task configuration types
+type (
+ VacuumTaskConfig = worker_pb.VacuumTaskConfig
+ ErasureCodingTaskConfig = worker_pb.ErasureCodingTaskConfig
+ BalanceTaskConfig = worker_pb.BalanceTaskConfig
+ ReplicationTaskConfig = worker_pb.ReplicationTaskConfig
+)
+
// ConfigPersistence handles saving and loading configuration files
type ConfigPersistence struct {
dataDir string
@@ -30,122 +57,67 @@ func NewConfigPersistence(dataDir string) *ConfigPersistence {
}
}
-// SaveMaintenanceConfig saves maintenance configuration to JSON file
+// SaveMaintenanceConfig saves maintenance configuration to protobuf file and JSON reference
func (cp *ConfigPersistence) SaveMaintenanceConfig(config *MaintenanceConfig) error {
if cp.dataDir == "" {
return fmt.Errorf("no data directory specified, cannot save configuration")
}
- configPath := filepath.Join(cp.dataDir, MaintenanceConfigFile)
-
- // Create directory if it doesn't exist
- if err := os.MkdirAll(cp.dataDir, ConfigDirPermissions); err != nil {
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ if err := os.MkdirAll(confDir, ConfigDirPermissions); err != nil {
return fmt.Errorf("failed to create config directory: %w", err)
}
- // Marshal configuration to JSON
- configData, err := json.MarshalIndent(config, "", " ")
- if err != nil {
- return fmt.Errorf("failed to marshal maintenance config: %w", err)
- }
-
- // Write to file
- if err := os.WriteFile(configPath, configData, ConfigFilePermissions); err != nil {
- return fmt.Errorf("failed to write maintenance config file: %w", err)
- }
-
- glog.V(1).Infof("Saved maintenance configuration to %s", configPath)
- return nil
-}
-
-// LoadMaintenanceConfig loads maintenance configuration from JSON file
-func (cp *ConfigPersistence) LoadMaintenanceConfig() (*MaintenanceConfig, error) {
- if cp.dataDir == "" {
- glog.V(1).Infof("No data directory specified, using default maintenance configuration")
- return DefaultMaintenanceConfig(), nil
- }
-
- configPath := filepath.Join(cp.dataDir, MaintenanceConfigFile)
-
- // Check if file exists
- if _, err := os.Stat(configPath); os.IsNotExist(err) {
- glog.V(1).Infof("Maintenance config file does not exist, using defaults: %s", configPath)
- return DefaultMaintenanceConfig(), nil
- }
-
- // Read file
- configData, err := os.ReadFile(configPath)
+ // Save as protobuf (primary format)
+ pbConfigPath := filepath.Join(confDir, MaintenanceConfigFile)
+ pbData, err := proto.Marshal(config)
if err != nil {
- return nil, fmt.Errorf("failed to read maintenance config file: %w", err)
+ return fmt.Errorf("failed to marshal maintenance config to protobuf: %w", err)
}
- // Unmarshal JSON
- var config MaintenanceConfig
- if err := json.Unmarshal(configData, &config); err != nil {
- return nil, fmt.Errorf("failed to unmarshal maintenance config: %w", err)
+ if err := os.WriteFile(pbConfigPath, pbData, ConfigFilePermissions); err != nil {
+ return fmt.Errorf("failed to write protobuf config file: %w", err)
}
- glog.V(1).Infof("Loaded maintenance configuration from %s", configPath)
- return &config, nil
-}
-
-// SaveAdminConfig saves general admin configuration to JSON file
-func (cp *ConfigPersistence) SaveAdminConfig(config map[string]interface{}) error {
- if cp.dataDir == "" {
- return fmt.Errorf("no data directory specified, cannot save configuration")
- }
-
- configPath := filepath.Join(cp.dataDir, AdminConfigFile)
-
- // Create directory if it doesn't exist
- if err := os.MkdirAll(cp.dataDir, ConfigDirPermissions); err != nil {
- return fmt.Errorf("failed to create config directory: %w", err)
- }
-
- // Marshal configuration to JSON
- configData, err := json.MarshalIndent(config, "", " ")
+ // Save JSON reference copy for debugging
+ jsonConfigPath := filepath.Join(confDir, MaintenanceConfigJSONFile)
+ jsonData, err := protojson.MarshalOptions{
+ Multiline: true,
+ Indent: " ",
+ EmitUnpopulated: true,
+ }.Marshal(config)
if err != nil {
- return fmt.Errorf("failed to marshal admin config: %w", err)
+ return fmt.Errorf("failed to marshal maintenance config to JSON: %w", err)
}
- // Write to file
- if err := os.WriteFile(configPath, configData, ConfigFilePermissions); err != nil {
- return fmt.Errorf("failed to write admin config file: %w", err)
+ if err := os.WriteFile(jsonConfigPath, jsonData, ConfigFilePermissions); err != nil {
+ return fmt.Errorf("failed to write JSON reference file: %w", err)
}
- glog.V(1).Infof("Saved admin configuration to %s", configPath)
return nil
}
-// LoadAdminConfig loads general admin configuration from JSON file
-func (cp *ConfigPersistence) LoadAdminConfig() (map[string]interface{}, error) {
+// LoadMaintenanceConfig loads maintenance configuration from protobuf file
+func (cp *ConfigPersistence) LoadMaintenanceConfig() (*MaintenanceConfig, error) {
if cp.dataDir == "" {
- glog.V(1).Infof("No data directory specified, using default admin configuration")
- return make(map[string]interface{}), nil
- }
-
- configPath := filepath.Join(cp.dataDir, AdminConfigFile)
-
- // Check if file exists
- if _, err := os.Stat(configPath); os.IsNotExist(err) {
- glog.V(1).Infof("Admin config file does not exist, using defaults: %s", configPath)
- return make(map[string]interface{}), nil
+ return DefaultMaintenanceConfig(), nil
}
- // Read file
- configData, err := os.ReadFile(configPath)
- if err != nil {
- return nil, fmt.Errorf("failed to read admin config file: %w", err)
- }
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ configPath := filepath.Join(confDir, MaintenanceConfigFile)
- // Unmarshal JSON
- var config map[string]interface{}
- if err := json.Unmarshal(configData, &config); err != nil {
- return nil, fmt.Errorf("failed to unmarshal admin config: %w", err)
+ // Try to load from protobuf file
+ if configData, err := os.ReadFile(configPath); err == nil {
+ var config MaintenanceConfig
+ if err := proto.Unmarshal(configData, &config); err == nil {
+ // Always populate policy from separate task configuration files
+ config.Policy = buildPolicyFromTaskConfigs()
+ return &config, nil
+ }
}
- glog.V(1).Infof("Loaded admin configuration from %s", configPath)
- return config, nil
+ // File doesn't exist or failed to load, use defaults
+ return DefaultMaintenanceConfig(), nil
}
// GetConfigPath returns the path to a configuration file
@@ -153,24 +125,35 @@ func (cp *ConfigPersistence) GetConfigPath(filename string) string {
if cp.dataDir == "" {
return ""
}
- return filepath.Join(cp.dataDir, filename)
+
+ // All configs go in conf subdirectory
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ return filepath.Join(confDir, filename)
}
-// ListConfigFiles returns all configuration files in the data directory
+// ListConfigFiles returns all configuration files in the conf subdirectory
func (cp *ConfigPersistence) ListConfigFiles() ([]string, error) {
if cp.dataDir == "" {
return nil, fmt.Errorf("no data directory specified")
}
- files, err := os.ReadDir(cp.dataDir)
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ files, err := os.ReadDir(confDir)
if err != nil {
+ // If conf directory doesn't exist, return empty list
+ if os.IsNotExist(err) {
+ return []string{}, nil
+ }
return nil, fmt.Errorf("failed to read config directory: %w", err)
}
var configFiles []string
for _, file := range files {
- if !file.IsDir() && filepath.Ext(file.Name()) == ".json" {
- configFiles = append(configFiles, file.Name())
+ if !file.IsDir() {
+ ext := filepath.Ext(file.Name())
+ if ext == ".json" || ext == ".pb" {
+ configFiles = append(configFiles, file.Name())
+ }
}
}
@@ -183,7 +166,7 @@ func (cp *ConfigPersistence) BackupConfig(filename string) error {
return fmt.Errorf("no data directory specified")
}
- configPath := filepath.Join(cp.dataDir, filename)
+ configPath := cp.GetConfigPath(filename)
if _, err := os.Stat(configPath); os.IsNotExist(err) {
return fmt.Errorf("config file does not exist: %s", filename)
}
@@ -191,7 +174,10 @@ func (cp *ConfigPersistence) BackupConfig(filename string) error {
// Create backup filename with timestamp
timestamp := time.Now().Format("2006-01-02_15-04-05")
backupName := fmt.Sprintf("%s.backup_%s", filename, timestamp)
- backupPath := filepath.Join(cp.dataDir, backupName)
+
+ // Determine backup directory (conf subdirectory)
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ backupPath := filepath.Join(confDir, backupName)
// Copy file
configData, err := os.ReadFile(configPath)
@@ -213,7 +199,10 @@ func (cp *ConfigPersistence) RestoreConfig(filename, backupName string) error {
return fmt.Errorf("no data directory specified")
}
- backupPath := filepath.Join(cp.dataDir, backupName)
+ // Determine backup path (conf subdirectory)
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ backupPath := filepath.Join(confDir, backupName)
+
if _, err := os.Stat(backupPath); os.IsNotExist(err) {
return fmt.Errorf("backup file does not exist: %s", backupName)
}
@@ -225,7 +214,7 @@ func (cp *ConfigPersistence) RestoreConfig(filename, backupName string) error {
}
// Write to config file
- configPath := filepath.Join(cp.dataDir, filename)
+ configPath := cp.GetConfigPath(filename)
if err := os.WriteFile(configPath, backupData, ConfigFilePermissions); err != nil {
return fmt.Errorf("failed to restore config: %w", err)
}
@@ -234,6 +223,364 @@ func (cp *ConfigPersistence) RestoreConfig(filename, backupName string) error {
return nil
}
+// SaveVacuumTaskConfig saves vacuum task configuration to protobuf file
+func (cp *ConfigPersistence) SaveVacuumTaskConfig(config *VacuumTaskConfig) error {
+ return cp.saveTaskConfig(VacuumTaskConfigFile, config)
+}
+
+// SaveVacuumTaskPolicy saves complete vacuum task policy to protobuf file
+func (cp *ConfigPersistence) SaveVacuumTaskPolicy(policy *worker_pb.TaskPolicy) error {
+ return cp.saveTaskConfig(VacuumTaskConfigFile, policy)
+}
+
+// LoadVacuumTaskConfig loads vacuum task configuration from protobuf file
+func (cp *ConfigPersistence) LoadVacuumTaskConfig() (*VacuumTaskConfig, error) {
+ // Load as TaskPolicy and extract vacuum config
+ if taskPolicy, err := cp.LoadVacuumTaskPolicy(); err == nil && taskPolicy != nil {
+ if vacuumConfig := taskPolicy.GetVacuumConfig(); vacuumConfig != nil {
+ return vacuumConfig, nil
+ }
+ }
+
+ // Return default config if no valid config found
+ return &VacuumTaskConfig{
+ GarbageThreshold: 0.3,
+ MinVolumeAgeHours: 24,
+ MinIntervalSeconds: 7 * 24 * 60 * 60, // 7 days
+ }, nil
+}
+
+// LoadVacuumTaskPolicy loads complete vacuum task policy from protobuf file
+func (cp *ConfigPersistence) LoadVacuumTaskPolicy() (*worker_pb.TaskPolicy, error) {
+ if cp.dataDir == "" {
+ // Return default policy if no data directory
+ return &worker_pb.TaskPolicy{
+ Enabled: true,
+ MaxConcurrent: 2,
+ RepeatIntervalSeconds: 24 * 3600, // 24 hours in seconds
+ CheckIntervalSeconds: 6 * 3600, // 6 hours in seconds
+ TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{
+ VacuumConfig: &worker_pb.VacuumTaskConfig{
+ GarbageThreshold: 0.3,
+ MinVolumeAgeHours: 24,
+ MinIntervalSeconds: 7 * 24 * 60 * 60, // 7 days
+ },
+ },
+ }, nil
+ }
+
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ configPath := filepath.Join(confDir, VacuumTaskConfigFile)
+
+ // Check if file exists
+ if _, err := os.Stat(configPath); os.IsNotExist(err) {
+ // Return default policy if file doesn't exist
+ return &worker_pb.TaskPolicy{
+ Enabled: true,
+ MaxConcurrent: 2,
+ RepeatIntervalSeconds: 24 * 3600, // 24 hours in seconds
+ CheckIntervalSeconds: 6 * 3600, // 6 hours in seconds
+ TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{
+ VacuumConfig: &worker_pb.VacuumTaskConfig{
+ GarbageThreshold: 0.3,
+ MinVolumeAgeHours: 24,
+ MinIntervalSeconds: 7 * 24 * 60 * 60, // 7 days
+ },
+ },
+ }, nil
+ }
+
+ // Read file
+ configData, err := os.ReadFile(configPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read vacuum task config file: %w", err)
+ }
+
+ // Try to unmarshal as TaskPolicy
+ var policy worker_pb.TaskPolicy
+ if err := proto.Unmarshal(configData, &policy); err == nil {
+ // Validate that it's actually a TaskPolicy with vacuum config
+ if policy.GetVacuumConfig() != nil {
+ glog.V(1).Infof("Loaded vacuum task policy from %s", configPath)
+ return &policy, nil
+ }
+ }
+
+ return nil, fmt.Errorf("failed to unmarshal vacuum task configuration")
+}
+
+// SaveErasureCodingTaskConfig saves EC task configuration to protobuf file
+func (cp *ConfigPersistence) SaveErasureCodingTaskConfig(config *ErasureCodingTaskConfig) error {
+ return cp.saveTaskConfig(ECTaskConfigFile, config)
+}
+
+// SaveErasureCodingTaskPolicy saves complete EC task policy to protobuf file
+func (cp *ConfigPersistence) SaveErasureCodingTaskPolicy(policy *worker_pb.TaskPolicy) error {
+ return cp.saveTaskConfig(ECTaskConfigFile, policy)
+}
+
+// LoadErasureCodingTaskConfig loads EC task configuration from protobuf file
+func (cp *ConfigPersistence) LoadErasureCodingTaskConfig() (*ErasureCodingTaskConfig, error) {
+ // Load as TaskPolicy and extract EC config
+ if taskPolicy, err := cp.LoadErasureCodingTaskPolicy(); err == nil && taskPolicy != nil {
+ if ecConfig := taskPolicy.GetErasureCodingConfig(); ecConfig != nil {
+ return ecConfig, nil
+ }
+ }
+
+ // Return default config if no valid config found
+ return &ErasureCodingTaskConfig{
+ FullnessRatio: 0.9,
+ QuietForSeconds: 3600,
+ MinVolumeSizeMb: 1024,
+ CollectionFilter: "",
+ }, nil
+}
+
+// LoadErasureCodingTaskPolicy loads complete EC task policy from protobuf file
+func (cp *ConfigPersistence) LoadErasureCodingTaskPolicy() (*worker_pb.TaskPolicy, error) {
+ if cp.dataDir == "" {
+ // Return default policy if no data directory
+ return &worker_pb.TaskPolicy{
+ Enabled: true,
+ MaxConcurrent: 1,
+ RepeatIntervalSeconds: 168 * 3600, // 1 week in seconds
+ CheckIntervalSeconds: 24 * 3600, // 24 hours in seconds
+ TaskConfig: &worker_pb.TaskPolicy_ErasureCodingConfig{
+ ErasureCodingConfig: &worker_pb.ErasureCodingTaskConfig{
+ FullnessRatio: 0.9,
+ QuietForSeconds: 3600,
+ MinVolumeSizeMb: 1024,
+ CollectionFilter: "",
+ },
+ },
+ }, nil
+ }
+
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ configPath := filepath.Join(confDir, ECTaskConfigFile)
+
+ // Check if file exists
+ if _, err := os.Stat(configPath); os.IsNotExist(err) {
+ // Return default policy if file doesn't exist
+ return &worker_pb.TaskPolicy{
+ Enabled: true,
+ MaxConcurrent: 1,
+ RepeatIntervalSeconds: 168 * 3600, // 1 week in seconds
+ CheckIntervalSeconds: 24 * 3600, // 24 hours in seconds
+ TaskConfig: &worker_pb.TaskPolicy_ErasureCodingConfig{
+ ErasureCodingConfig: &worker_pb.ErasureCodingTaskConfig{
+ FullnessRatio: 0.9,
+ QuietForSeconds: 3600,
+ MinVolumeSizeMb: 1024,
+ CollectionFilter: "",
+ },
+ },
+ }, nil
+ }
+
+ // Read file
+ configData, err := os.ReadFile(configPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read EC task config file: %w", err)
+ }
+
+ // Try to unmarshal as TaskPolicy
+ var policy worker_pb.TaskPolicy
+ if err := proto.Unmarshal(configData, &policy); err == nil {
+ // Validate that it's actually a TaskPolicy with EC config
+ if policy.GetErasureCodingConfig() != nil {
+ glog.V(1).Infof("Loaded EC task policy from %s", configPath)
+ return &policy, nil
+ }
+ }
+
+ return nil, fmt.Errorf("failed to unmarshal EC task configuration")
+}
+
+// SaveBalanceTaskConfig saves balance task configuration to protobuf file
+func (cp *ConfigPersistence) SaveBalanceTaskConfig(config *BalanceTaskConfig) error {
+ return cp.saveTaskConfig(BalanceTaskConfigFile, config)
+}
+
+// SaveBalanceTaskPolicy saves complete balance task policy to protobuf file
+func (cp *ConfigPersistence) SaveBalanceTaskPolicy(policy *worker_pb.TaskPolicy) error {
+ return cp.saveTaskConfig(BalanceTaskConfigFile, policy)
+}
+
+// LoadBalanceTaskConfig loads balance task configuration from protobuf file
+func (cp *ConfigPersistence) LoadBalanceTaskConfig() (*BalanceTaskConfig, error) {
+ // Load as TaskPolicy and extract balance config
+ if taskPolicy, err := cp.LoadBalanceTaskPolicy(); err == nil && taskPolicy != nil {
+ if balanceConfig := taskPolicy.GetBalanceConfig(); balanceConfig != nil {
+ return balanceConfig, nil
+ }
+ }
+
+ // Return default config if no valid config found
+ return &BalanceTaskConfig{
+ ImbalanceThreshold: 0.1,
+ MinServerCount: 2,
+ }, nil
+}
+
+// LoadBalanceTaskPolicy loads complete balance task policy from protobuf file
+func (cp *ConfigPersistence) LoadBalanceTaskPolicy() (*worker_pb.TaskPolicy, error) {
+ if cp.dataDir == "" {
+ // Return default policy if no data directory
+ return &worker_pb.TaskPolicy{
+ Enabled: true,
+ MaxConcurrent: 1,
+ RepeatIntervalSeconds: 6 * 3600, // 6 hours in seconds
+ CheckIntervalSeconds: 12 * 3600, // 12 hours in seconds
+ TaskConfig: &worker_pb.TaskPolicy_BalanceConfig{
+ BalanceConfig: &worker_pb.BalanceTaskConfig{
+ ImbalanceThreshold: 0.1,
+ MinServerCount: 2,
+ },
+ },
+ }, nil
+ }
+
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ configPath := filepath.Join(confDir, BalanceTaskConfigFile)
+
+ // Check if file exists
+ if _, err := os.Stat(configPath); os.IsNotExist(err) {
+ // Return default policy if file doesn't exist
+ return &worker_pb.TaskPolicy{
+ Enabled: true,
+ MaxConcurrent: 1,
+ RepeatIntervalSeconds: 6 * 3600, // 6 hours in seconds
+ CheckIntervalSeconds: 12 * 3600, // 12 hours in seconds
+ TaskConfig: &worker_pb.TaskPolicy_BalanceConfig{
+ BalanceConfig: &worker_pb.BalanceTaskConfig{
+ ImbalanceThreshold: 0.1,
+ MinServerCount: 2,
+ },
+ },
+ }, nil
+ }
+
+ // Read file
+ configData, err := os.ReadFile(configPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read balance task config file: %w", err)
+ }
+
+ // Try to unmarshal as TaskPolicy
+ var policy worker_pb.TaskPolicy
+ if err := proto.Unmarshal(configData, &policy); err == nil {
+ // Validate that it's actually a TaskPolicy with balance config
+ if policy.GetBalanceConfig() != nil {
+ glog.V(1).Infof("Loaded balance task policy from %s", configPath)
+ return &policy, nil
+ }
+ }
+
+ return nil, fmt.Errorf("failed to unmarshal balance task configuration")
+}
+
+// SaveReplicationTaskConfig saves replication task configuration to protobuf file
+func (cp *ConfigPersistence) SaveReplicationTaskConfig(config *ReplicationTaskConfig) error {
+ return cp.saveTaskConfig(ReplicationTaskConfigFile, config)
+}
+
+// LoadReplicationTaskConfig loads replication task configuration from protobuf file
+func (cp *ConfigPersistence) LoadReplicationTaskConfig() (*ReplicationTaskConfig, error) {
+ var config ReplicationTaskConfig
+ err := cp.loadTaskConfig(ReplicationTaskConfigFile, &config)
+ if err != nil {
+ // Return default config if file doesn't exist
+ if os.IsNotExist(err) {
+ return &ReplicationTaskConfig{
+ TargetReplicaCount: 1,
+ }, nil
+ }
+ return nil, err
+ }
+ return &config, nil
+}
+
+// saveTaskConfig is a generic helper for saving task configurations with both protobuf and JSON reference
+func (cp *ConfigPersistence) saveTaskConfig(filename string, config proto.Message) error {
+ if cp.dataDir == "" {
+ return fmt.Errorf("no data directory specified, cannot save task configuration")
+ }
+
+ // Create conf subdirectory path
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ configPath := filepath.Join(confDir, filename)
+
+ // Generate JSON reference filename
+ jsonFilename := filename[:len(filename)-3] + ".json" // Replace .pb with .json
+ jsonPath := filepath.Join(confDir, jsonFilename)
+
+ // Create conf directory if it doesn't exist
+ if err := os.MkdirAll(confDir, ConfigDirPermissions); err != nil {
+ return fmt.Errorf("failed to create config directory: %w", err)
+ }
+
+ // Marshal configuration to protobuf binary format
+ configData, err := proto.Marshal(config)
+ if err != nil {
+ return fmt.Errorf("failed to marshal task config: %w", err)
+ }
+
+ // Write protobuf file
+ if err := os.WriteFile(configPath, configData, ConfigFilePermissions); err != nil {
+ return fmt.Errorf("failed to write task config file: %w", err)
+ }
+
+ // Marshal configuration to JSON for reference
+ marshaler := protojson.MarshalOptions{
+ Multiline: true,
+ Indent: " ",
+ EmitUnpopulated: true,
+ }
+ jsonData, err := marshaler.Marshal(config)
+ if err != nil {
+ glog.Warningf("Failed to marshal task config to JSON reference: %v", err)
+ } else {
+ // Write JSON reference file
+ if err := os.WriteFile(jsonPath, jsonData, ConfigFilePermissions); err != nil {
+ glog.Warningf("Failed to write task config JSON reference: %v", err)
+ }
+ }
+
+ glog.V(1).Infof("Saved task configuration to %s (with JSON reference)", configPath)
+ return nil
+}
+
+// loadTaskConfig is a generic helper for loading task configurations from conf subdirectory
+func (cp *ConfigPersistence) loadTaskConfig(filename string, config proto.Message) error {
+ if cp.dataDir == "" {
+ return os.ErrNotExist // Will trigger default config return
+ }
+
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ configPath := filepath.Join(confDir, filename)
+
+ // Check if file exists
+ if _, err := os.Stat(configPath); os.IsNotExist(err) {
+ return err // Will trigger default config return
+ }
+
+ // Read file
+ configData, err := os.ReadFile(configPath)
+ if err != nil {
+ return fmt.Errorf("failed to read task config file: %w", err)
+ }
+
+ // Unmarshal protobuf binary data
+ if err := proto.Unmarshal(configData, config); err != nil {
+ return fmt.Errorf("failed to unmarshal task config: %w", err)
+ }
+
+ glog.V(1).Infof("Loaded task configuration from %s", configPath)
+ return nil
+}
+
// GetDataDir returns the data directory path
func (cp *ConfigPersistence) GetDataDir() string {
return cp.dataDir
@@ -249,6 +596,7 @@ func (cp *ConfigPersistence) GetConfigInfo() map[string]interface{} {
info := map[string]interface{}{
"data_dir_configured": cp.IsConfigured(),
"data_dir": cp.dataDir,
+ "config_subdir": ConfigSubdir,
}
if cp.IsConfigured() {
@@ -256,10 +604,18 @@ func (cp *ConfigPersistence) GetConfigInfo() map[string]interface{} {
if _, err := os.Stat(cp.dataDir); err == nil {
info["data_dir_exists"] = true
- // List config files
- configFiles, err := cp.ListConfigFiles()
- if err == nil {
- info["config_files"] = configFiles
+ // Check if conf subdirectory exists
+ confDir := filepath.Join(cp.dataDir, ConfigSubdir)
+ if _, err := os.Stat(confDir); err == nil {
+ info["conf_dir_exists"] = true
+
+ // List config files
+ configFiles, err := cp.ListConfigFiles()
+ if err == nil {
+ info["config_files"] = configFiles
+ }
+ } else {
+ info["conf_dir_exists"] = false
}
} else {
info["data_dir_exists"] = false
@@ -268,3 +624,67 @@ func (cp *ConfigPersistence) GetConfigInfo() map[string]interface{} {
return info
}
+
+// buildPolicyFromTaskConfigs loads task configurations from separate files and builds a MaintenancePolicy
+func buildPolicyFromTaskConfigs() *worker_pb.MaintenancePolicy {
+ policy := &worker_pb.MaintenancePolicy{
+ GlobalMaxConcurrent: 4,
+ DefaultRepeatIntervalSeconds: 6 * 3600, // 6 hours in seconds
+ DefaultCheckIntervalSeconds: 12 * 3600, // 12 hours in seconds
+ TaskPolicies: make(map[string]*worker_pb.TaskPolicy),
+ }
+
+ // Load vacuum task configuration
+ if vacuumConfig := vacuum.LoadConfigFromPersistence(nil); vacuumConfig != nil {
+ policy.TaskPolicies["vacuum"] = &worker_pb.TaskPolicy{
+ Enabled: vacuumConfig.Enabled,
+ MaxConcurrent: int32(vacuumConfig.MaxConcurrent),
+ RepeatIntervalSeconds: int32(vacuumConfig.ScanIntervalSeconds),
+ CheckIntervalSeconds: int32(vacuumConfig.ScanIntervalSeconds),
+ TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{
+ VacuumConfig: &worker_pb.VacuumTaskConfig{
+ GarbageThreshold: float64(vacuumConfig.GarbageThreshold),
+ MinVolumeAgeHours: int32(vacuumConfig.MinVolumeAgeSeconds / 3600), // Convert seconds to hours
+ MinIntervalSeconds: int32(vacuumConfig.MinIntervalSeconds),
+ },
+ },
+ }
+ }
+
+ // Load erasure coding task configuration
+ if ecConfig := erasure_coding.LoadConfigFromPersistence(nil); ecConfig != nil {
+ policy.TaskPolicies["erasure_coding"] = &worker_pb.TaskPolicy{
+ Enabled: ecConfig.Enabled,
+ MaxConcurrent: int32(ecConfig.MaxConcurrent),
+ RepeatIntervalSeconds: int32(ecConfig.ScanIntervalSeconds),
+ CheckIntervalSeconds: int32(ecConfig.ScanIntervalSeconds),
+ TaskConfig: &worker_pb.TaskPolicy_ErasureCodingConfig{
+ ErasureCodingConfig: &worker_pb.ErasureCodingTaskConfig{
+ FullnessRatio: float64(ecConfig.FullnessRatio),
+ QuietForSeconds: int32(ecConfig.QuietForSeconds),
+ MinVolumeSizeMb: int32(ecConfig.MinSizeMB),
+ CollectionFilter: ecConfig.CollectionFilter,
+ },
+ },
+ }
+ }
+
+ // Load balance task configuration
+ if balanceConfig := balance.LoadConfigFromPersistence(nil); balanceConfig != nil {
+ policy.TaskPolicies["balance"] = &worker_pb.TaskPolicy{
+ Enabled: balanceConfig.Enabled,
+ MaxConcurrent: int32(balanceConfig.MaxConcurrent),
+ RepeatIntervalSeconds: int32(balanceConfig.ScanIntervalSeconds),
+ CheckIntervalSeconds: int32(balanceConfig.ScanIntervalSeconds),
+ TaskConfig: &worker_pb.TaskPolicy_BalanceConfig{
+ BalanceConfig: &worker_pb.BalanceTaskConfig{
+ ImbalanceThreshold: float64(balanceConfig.ImbalanceThreshold),
+ MinServerCount: int32(balanceConfig.MinServerCount),
+ },
+ },
+ }
+ }
+
+ glog.V(1).Infof("Built maintenance policy from separate task configs - %d task policies loaded", len(policy.TaskPolicies))
+ return policy
+}
diff --git a/weed/admin/dash/ec_shard_management.go b/weed/admin/dash/ec_shard_management.go
new file mode 100644
index 000000000..272890cf0
--- /dev/null
+++ b/weed/admin/dash/ec_shard_management.go
@@ -0,0 +1,734 @@
+package dash
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb"
+ "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
+ "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
+)
+
+// GetClusterEcShards retrieves cluster EC shards data with pagination, sorting, and filtering
+func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string, sortOrder string, collection string) (*ClusterEcShardsData, error) {
+ // Set defaults
+ if page < 1 {
+ page = 1
+ }
+ if pageSize < 1 || pageSize > 1000 {
+ pageSize = 100
+ }
+ if sortBy == "" {
+ sortBy = "volume_id"
+ }
+ if sortOrder == "" {
+ sortOrder = "asc"
+ }
+
+ var ecShards []EcShardWithInfo
+ volumeShardsMap := make(map[uint32]map[int]bool) // volumeId -> set of shards present
+ volumesWithAllShards := 0
+ volumesWithMissingShards := 0
+
+ // Get detailed EC shard information via gRPC
+ err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
+ resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ if err != nil {
+ return err
+ }
+
+ if resp.TopologyInfo != nil {
+ for _, dc := range resp.TopologyInfo.DataCenterInfos {
+ for _, rack := range dc.RackInfos {
+ for _, node := range rack.DataNodeInfos {
+ for _, diskInfo := range node.DiskInfos {
+ // Process EC shard information
+ for _, ecShardInfo := range diskInfo.EcShardInfos {
+ volumeId := ecShardInfo.Id
+
+ // Initialize volume shards map if needed
+ if volumeShardsMap[volumeId] == nil {
+ volumeShardsMap[volumeId] = make(map[int]bool)
+ }
+
+ // Create individual shard entries for each shard this server has
+ shardBits := ecShardInfo.EcIndexBits
+ for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
+ if (shardBits & (1 << uint(shardId))) != 0 {
+ // Mark this shard as present for this volume
+ volumeShardsMap[volumeId][shardId] = true
+
+ ecShard := EcShardWithInfo{
+ VolumeID: volumeId,
+ ShardID: uint32(shardId),
+ Collection: ecShardInfo.Collection,
+ Size: 0, // EC shards don't have individual size in the API response
+ Server: node.Id,
+ DataCenter: dc.Id,
+ Rack: rack.Id,
+ DiskType: diskInfo.Type,
+ ModifiedTime: 0, // Not available in current API
+ EcIndexBits: ecShardInfo.EcIndexBits,
+ ShardCount: getShardCount(ecShardInfo.EcIndexBits),
+ }
+ ecShards = append(ecShards, ecShard)
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Calculate volume-level completeness (across all servers)
+ volumeCompleteness := make(map[uint32]bool)
+ volumeMissingShards := make(map[uint32][]int)
+
+ for volumeId, shardsPresent := range volumeShardsMap {
+ var missingShards []int
+ shardCount := len(shardsPresent)
+
+ // Find which shards are missing for this volume across ALL servers
+ for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
+ if !shardsPresent[shardId] {
+ missingShards = append(missingShards, shardId)
+ }
+ }
+
+ isComplete := (shardCount == erasure_coding.TotalShardsCount)
+ volumeCompleteness[volumeId] = isComplete
+ volumeMissingShards[volumeId] = missingShards
+
+ if isComplete {
+ volumesWithAllShards++
+ } else {
+ volumesWithMissingShards++
+ }
+ }
+
+ // Update completeness info for each shard based on volume-level completeness
+ for i := range ecShards {
+ volumeId := ecShards[i].VolumeID
+ ecShards[i].IsComplete = volumeCompleteness[volumeId]
+ ecShards[i].MissingShards = volumeMissingShards[volumeId]
+ }
+
+ // Filter by collection if specified
+ if collection != "" {
+ var filteredShards []EcShardWithInfo
+ for _, shard := range ecShards {
+ if shard.Collection == collection {
+ filteredShards = append(filteredShards, shard)
+ }
+ }
+ ecShards = filteredShards
+ }
+
+ // Sort the results
+ sortEcShards(ecShards, sortBy, sortOrder)
+
+ // Calculate statistics for conditional display
+ dataCenters := make(map[string]bool)
+ racks := make(map[string]bool)
+ collections := make(map[string]bool)
+
+ for _, shard := range ecShards {
+ dataCenters[shard.DataCenter] = true
+ racks[shard.Rack] = true
+ if shard.Collection != "" {
+ collections[shard.Collection] = true
+ }
+ }
+
+ // Pagination
+ totalShards := len(ecShards)
+ totalPages := (totalShards + pageSize - 1) / pageSize
+ startIndex := (page - 1) * pageSize
+ endIndex := startIndex + pageSize
+ if endIndex > totalShards {
+ endIndex = totalShards
+ }
+
+ if startIndex >= totalShards {
+ startIndex = 0
+ endIndex = 0
+ }
+
+ paginatedShards := ecShards[startIndex:endIndex]
+
+ // Build response
+ data := &ClusterEcShardsData{
+ EcShards: paginatedShards,
+ TotalShards: totalShards,
+ TotalVolumes: len(volumeShardsMap),
+ LastUpdated: time.Now(),
+
+ // Pagination
+ CurrentPage: page,
+ TotalPages: totalPages,
+ PageSize: pageSize,
+
+ // Sorting
+ SortBy: sortBy,
+ SortOrder: sortOrder,
+
+ // Statistics
+ DataCenterCount: len(dataCenters),
+ RackCount: len(racks),
+ CollectionCount: len(collections),
+
+ // Conditional display flags
+ ShowDataCenterColumn: len(dataCenters) > 1,
+ ShowRackColumn: len(racks) > 1,
+ ShowCollectionColumn: len(collections) > 1 || collection != "",
+
+ // Filtering
+ FilterCollection: collection,
+
+ // EC specific statistics
+ ShardsPerVolume: make(map[uint32]int), // This will be recalculated below
+ VolumesWithAllShards: volumesWithAllShards,
+ VolumesWithMissingShards: volumesWithMissingShards,
+ }
+
+ // Recalculate ShardsPerVolume for the response
+ for volumeId, shardsPresent := range volumeShardsMap {
+ data.ShardsPerVolume[volumeId] = len(shardsPresent)
+ }
+
+ // Set single values when only one exists
+ if len(dataCenters) == 1 {
+ for dc := range dataCenters {
+ data.SingleDataCenter = dc
+ break
+ }
+ }
+ if len(racks) == 1 {
+ for rack := range racks {
+ data.SingleRack = rack
+ break
+ }
+ }
+ if len(collections) == 1 {
+ for col := range collections {
+ data.SingleCollection = col
+ break
+ }
+ }
+
+ return data, nil
+}
+
+// GetClusterEcVolumes retrieves cluster EC volumes data grouped by volume ID with shard locations
+func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string, sortOrder string, collection string) (*ClusterEcVolumesData, error) {
+ // Set defaults
+ if page < 1 {
+ page = 1
+ }
+ if pageSize < 1 || pageSize > 1000 {
+ pageSize = 100
+ }
+ if sortBy == "" {
+ sortBy = "volume_id"
+ }
+ if sortOrder == "" {
+ sortOrder = "asc"
+ }
+
+ volumeData := make(map[uint32]*EcVolumeWithShards)
+ totalShards := 0
+
+ // Get detailed EC shard information via gRPC
+ err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
+ resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ if err != nil {
+ return err
+ }
+
+ if resp.TopologyInfo != nil {
+ for _, dc := range resp.TopologyInfo.DataCenterInfos {
+ for _, rack := range dc.RackInfos {
+ for _, node := range rack.DataNodeInfos {
+ for _, diskInfo := range node.DiskInfos {
+ // Process EC shard information
+ for _, ecShardInfo := range diskInfo.EcShardInfos {
+ volumeId := ecShardInfo.Id
+
+ // Initialize volume data if needed
+ if volumeData[volumeId] == nil {
+ volumeData[volumeId] = &EcVolumeWithShards{
+ VolumeID: volumeId,
+ Collection: ecShardInfo.Collection,
+ TotalShards: 0,
+ IsComplete: false,
+ MissingShards: []int{},
+ ShardLocations: make(map[int]string),
+ ShardSizes: make(map[int]int64),
+ DataCenters: []string{},
+ Servers: []string{},
+ Racks: []string{},
+ }
+ }
+
+ volume := volumeData[volumeId]
+
+ // Track data centers and servers
+ dcExists := false
+ for _, existingDc := range volume.DataCenters {
+ if existingDc == dc.Id {
+ dcExists = true
+ break
+ }
+ }
+ if !dcExists {
+ volume.DataCenters = append(volume.DataCenters, dc.Id)
+ }
+
+ serverExists := false
+ for _, existingServer := range volume.Servers {
+ if existingServer == node.Id {
+ serverExists = true
+ break
+ }
+ }
+ if !serverExists {
+ volume.Servers = append(volume.Servers, node.Id)
+ }
+
+ // Track racks
+ rackExists := false
+ for _, existingRack := range volume.Racks {
+ if existingRack == rack.Id {
+ rackExists = true
+ break
+ }
+ }
+ if !rackExists {
+ volume.Racks = append(volume.Racks, rack.Id)
+ }
+
+ // Process each shard this server has for this volume
+ shardBits := ecShardInfo.EcIndexBits
+ for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
+ if (shardBits & (1 << uint(shardId))) != 0 {
+ // Record shard location
+ volume.ShardLocations[shardId] = node.Id
+ totalShards++
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Collect shard size information from volume servers
+ for volumeId, volume := range volumeData {
+ // Group servers by volume to minimize gRPC calls
+ serverHasVolume := make(map[string]bool)
+ for _, server := range volume.Servers {
+ serverHasVolume[server] = true
+ }
+
+ // Query each server for shard sizes
+ for server := range serverHasVolume {
+ err := s.WithVolumeServerClient(pb.ServerAddress(server), func(client volume_server_pb.VolumeServerClient) error {
+ resp, err := client.VolumeEcShardsInfo(context.Background(), &volume_server_pb.VolumeEcShardsInfoRequest{
+ VolumeId: volumeId,
+ })
+ if err != nil {
+ glog.V(1).Infof("Failed to get EC shard info from %s for volume %d: %v", server, volumeId, err)
+ return nil // Continue with other servers, don't fail the entire request
+ }
+
+ // Update shard sizes
+ for _, shardInfo := range resp.EcShardInfos {
+ volume.ShardSizes[int(shardInfo.ShardId)] = shardInfo.Size
+ }
+
+ return nil
+ })
+ if err != nil {
+ glog.V(1).Infof("Failed to connect to volume server %s: %v", server, err)
+ }
+ }
+ }
+
+ // Calculate completeness for each volume
+ completeVolumes := 0
+ incompleteVolumes := 0
+
+ for _, volume := range volumeData {
+ volume.TotalShards = len(volume.ShardLocations)
+
+ // Find missing shards
+ var missingShards []int
+ for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
+ if _, exists := volume.ShardLocations[shardId]; !exists {
+ missingShards = append(missingShards, shardId)
+ }
+ }
+
+ volume.MissingShards = missingShards
+ volume.IsComplete = (len(missingShards) == 0)
+
+ if volume.IsComplete {
+ completeVolumes++
+ } else {
+ incompleteVolumes++
+ }
+ }
+
+ // Convert map to slice
+ var ecVolumes []EcVolumeWithShards
+ for _, volume := range volumeData {
+ // Filter by collection if specified
+ if collection == "" || volume.Collection == collection {
+ ecVolumes = append(ecVolumes, *volume)
+ }
+ }
+
+ // Sort the results
+ sortEcVolumes(ecVolumes, sortBy, sortOrder)
+
+ // Calculate statistics for conditional display
+ dataCenters := make(map[string]bool)
+ collections := make(map[string]bool)
+
+ for _, volume := range ecVolumes {
+ for _, dc := range volume.DataCenters {
+ dataCenters[dc] = true
+ }
+ if volume.Collection != "" {
+ collections[volume.Collection] = true
+ }
+ }
+
+ // Pagination
+ totalVolumes := len(ecVolumes)
+ totalPages := (totalVolumes + pageSize - 1) / pageSize
+ startIndex := (page - 1) * pageSize
+ endIndex := startIndex + pageSize
+ if endIndex > totalVolumes {
+ endIndex = totalVolumes
+ }
+
+ if startIndex >= totalVolumes {
+ startIndex = 0
+ endIndex = 0
+ }
+
+ paginatedVolumes := ecVolumes[startIndex:endIndex]
+
+ // Build response
+ data := &ClusterEcVolumesData{
+ EcVolumes: paginatedVolumes,
+ TotalVolumes: totalVolumes,
+ LastUpdated: time.Now(),
+
+ // Pagination
+ Page: page,
+ PageSize: pageSize,
+ TotalPages: totalPages,
+
+ // Sorting
+ SortBy: sortBy,
+ SortOrder: sortOrder,
+
+ // Filtering
+ Collection: collection,
+
+ // Conditional display flags
+ ShowDataCenterColumn: len(dataCenters) > 1,
+ ShowRackColumn: false, // We don't track racks in this view for simplicity
+ ShowCollectionColumn: len(collections) > 1 || collection != "",
+
+ // Statistics
+ CompleteVolumes: completeVolumes,
+ IncompleteVolumes: incompleteVolumes,
+ TotalShards: totalShards,
+ }
+
+ return data, nil
+}
+
+// sortEcVolumes sorts EC volumes based on the specified field and order
+func sortEcVolumes(volumes []EcVolumeWithShards, sortBy string, sortOrder string) {
+ sort.Slice(volumes, func(i, j int) bool {
+ var less bool
+ switch sortBy {
+ case "volume_id":
+ less = volumes[i].VolumeID < volumes[j].VolumeID
+ case "collection":
+ if volumes[i].Collection == volumes[j].Collection {
+ less = volumes[i].VolumeID < volumes[j].VolumeID
+ } else {
+ less = volumes[i].Collection < volumes[j].Collection
+ }
+ case "total_shards":
+ if volumes[i].TotalShards == volumes[j].TotalShards {
+ less = volumes[i].VolumeID < volumes[j].VolumeID
+ } else {
+ less = volumes[i].TotalShards < volumes[j].TotalShards
+ }
+ case "completeness":
+ // Complete volumes first, then by volume ID
+ if volumes[i].IsComplete == volumes[j].IsComplete {
+ less = volumes[i].VolumeID < volumes[j].VolumeID
+ } else {
+ less = volumes[i].IsComplete && !volumes[j].IsComplete
+ }
+ default:
+ less = volumes[i].VolumeID < volumes[j].VolumeID
+ }
+
+ if sortOrder == "desc" {
+ return !less
+ }
+ return less
+ })
+}
+
+// getShardCount returns the number of shards represented by the bitmap
+func getShardCount(ecIndexBits uint32) int {
+ count := 0
+ for i := 0; i < erasure_coding.TotalShardsCount; i++ {
+ if (ecIndexBits & (1 << uint(i))) != 0 {
+ count++
+ }
+ }
+ return count
+}
+
+// getMissingShards returns a slice of missing shard IDs for a volume
+func getMissingShards(ecIndexBits uint32) []int {
+ var missing []int
+ for i := 0; i < erasure_coding.TotalShardsCount; i++ {
+ if (ecIndexBits & (1 << uint(i))) == 0 {
+ missing = append(missing, i)
+ }
+ }
+ return missing
+}
+
+// sortEcShards sorts EC shards based on the specified field and order
+func sortEcShards(shards []EcShardWithInfo, sortBy string, sortOrder string) {
+ sort.Slice(shards, func(i, j int) bool {
+ var less bool
+ switch sortBy {
+ case "shard_id":
+ less = shards[i].ShardID < shards[j].ShardID
+ case "server":
+ if shards[i].Server == shards[j].Server {
+ less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID
+ } else {
+ less = shards[i].Server < shards[j].Server
+ }
+ case "data_center":
+ if shards[i].DataCenter == shards[j].DataCenter {
+ less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID
+ } else {
+ less = shards[i].DataCenter < shards[j].DataCenter
+ }
+ case "rack":
+ if shards[i].Rack == shards[j].Rack {
+ less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID
+ } else {
+ less = shards[i].Rack < shards[j].Rack
+ }
+ default:
+ less = shards[i].ShardID < shards[j].ShardID
+ }
+
+ if sortOrder == "desc" {
+ return !less
+ }
+ return less
+ })
+}
+
+// GetEcVolumeDetails retrieves detailed information about a specific EC volume
+func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrder string) (*EcVolumeDetailsData, error) {
+ // Set defaults
+ if sortBy == "" {
+ sortBy = "shard_id"
+ }
+ if sortOrder == "" {
+ sortOrder = "asc"
+ }
+
+ var shards []EcShardWithInfo
+ var collection string
+ dataCenters := make(map[string]bool)
+ servers := make(map[string]bool)
+
+ // Get detailed EC shard information for the specific volume via gRPC
+ err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
+ resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ if err != nil {
+ return err
+ }
+
+ if resp.TopologyInfo != nil {
+ for _, dc := range resp.TopologyInfo.DataCenterInfos {
+ for _, rack := range dc.RackInfos {
+ for _, node := range rack.DataNodeInfos {
+ for _, diskInfo := range node.DiskInfos {
+ // Process EC shard information for this specific volume
+ for _, ecShardInfo := range diskInfo.EcShardInfos {
+ if ecShardInfo.Id == volumeID {
+ collection = ecShardInfo.Collection
+ dataCenters[dc.Id] = true
+ servers[node.Id] = true
+
+ // Create individual shard entries for each shard this server has
+ shardBits := ecShardInfo.EcIndexBits
+ for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
+ if (shardBits & (1 << uint(shardId))) != 0 {
+ ecShard := EcShardWithInfo{
+ VolumeID: ecShardInfo.Id,
+ ShardID: uint32(shardId),
+ Collection: ecShardInfo.Collection,
+ Size: 0, // EC shards don't have individual size in the API response
+ Server: node.Id,
+ DataCenter: dc.Id,
+ Rack: rack.Id,
+ DiskType: diskInfo.Type,
+ ModifiedTime: 0, // Not available in current API
+ EcIndexBits: ecShardInfo.EcIndexBits,
+ ShardCount: getShardCount(ecShardInfo.EcIndexBits),
+ }
+ shards = append(shards, ecShard)
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(shards) == 0 {
+ return nil, fmt.Errorf("EC volume %d not found", volumeID)
+ }
+
+ // Collect shard size information from volume servers
+ shardSizeMap := make(map[string]map[uint32]uint64) // server -> shardId -> size
+ for _, shard := range shards {
+ server := shard.Server
+ if _, exists := shardSizeMap[server]; !exists {
+ // Query this server for shard sizes
+ err := s.WithVolumeServerClient(pb.ServerAddress(server), func(client volume_server_pb.VolumeServerClient) error {
+ resp, err := client.VolumeEcShardsInfo(context.Background(), &volume_server_pb.VolumeEcShardsInfoRequest{
+ VolumeId: volumeID,
+ })
+ if err != nil {
+ glog.V(1).Infof("Failed to get EC shard info from %s for volume %d: %v", server, volumeID, err)
+ return nil // Continue with other servers, don't fail the entire request
+ }
+
+ // Store shard sizes for this server
+ shardSizeMap[server] = make(map[uint32]uint64)
+ for _, shardInfo := range resp.EcShardInfos {
+ shardSizeMap[server][shardInfo.ShardId] = uint64(shardInfo.Size)
+ }
+
+ return nil
+ })
+ if err != nil {
+ glog.V(1).Infof("Failed to connect to volume server %s: %v", server, err)
+ }
+ }
+ }
+
+ // Update shard sizes in the shards array
+ for i := range shards {
+ server := shards[i].Server
+ shardId := shards[i].ShardID
+ if serverSizes, exists := shardSizeMap[server]; exists {
+ if size, exists := serverSizes[shardId]; exists {
+ shards[i].Size = size
+ }
+ }
+ }
+
+ // Calculate completeness based on unique shard IDs
+ foundShards := make(map[int]bool)
+ for _, shard := range shards {
+ foundShards[int(shard.ShardID)] = true
+ }
+
+ totalUniqueShards := len(foundShards)
+ isComplete := (totalUniqueShards == erasure_coding.TotalShardsCount)
+
+ // Calculate missing shards
+ var missingShards []int
+ for i := 0; i < erasure_coding.TotalShardsCount; i++ {
+ if !foundShards[i] {
+ missingShards = append(missingShards, i)
+ }
+ }
+
+ // Update completeness info for each shard
+ for i := range shards {
+ shards[i].IsComplete = isComplete
+ shards[i].MissingShards = missingShards
+ }
+
+ // Sort shards based on parameters
+ sortEcShards(shards, sortBy, sortOrder)
+
+ // Convert maps to slices
+ var dcList []string
+ for dc := range dataCenters {
+ dcList = append(dcList, dc)
+ }
+ var serverList []string
+ for server := range servers {
+ serverList = append(serverList, server)
+ }
+
+ data := &EcVolumeDetailsData{
+ VolumeID: volumeID,
+ Collection: collection,
+ Shards: shards,
+ TotalShards: totalUniqueShards,
+ IsComplete: isComplete,
+ MissingShards: missingShards,
+ DataCenters: dcList,
+ Servers: serverList,
+ LastUpdated: time.Now(),
+ SortBy: sortBy,
+ SortOrder: sortOrder,
+ }
+
+ return data, nil
+}
diff --git a/weed/admin/dash/middleware.go b/weed/admin/dash/middleware.go
index ce538d7ca..a4cfedfd0 100644
--- a/weed/admin/dash/middleware.go
+++ b/weed/admin/dash/middleware.go
@@ -25,3 +25,26 @@ func RequireAuth() gin.HandlerFunc {
c.Next()
}
}
+
+// RequireAuthAPI checks if user is authenticated for API endpoints
+// Returns JSON error instead of redirecting to login page
+func RequireAuthAPI() gin.HandlerFunc {
+ return func(c *gin.Context) {
+ session := sessions.Default(c)
+ authenticated := session.Get("authenticated")
+ username := session.Get("username")
+
+ if authenticated != true || username == nil {
+ c.JSON(http.StatusUnauthorized, gin.H{
+ "error": "Authentication required",
+ "message": "Please log in to access this endpoint",
+ })
+ c.Abort()
+ return
+ }
+
+ // Set username in context for use in handlers
+ c.Set("username", username)
+ c.Next()
+ }
+}
diff --git a/weed/admin/dash/types.go b/weed/admin/dash/types.go
index 60f499229..f098fad8c 100644
--- a/weed/admin/dash/types.go
+++ b/weed/admin/dash/types.go
@@ -135,6 +135,84 @@ type ClusterVolumesData struct {
FilterCollection string `json:"filter_collection"`
}
+// ClusterEcShardsData represents the data for the cluster EC shards page
+type ClusterEcShardsData struct {
+ Username string `json:"username"`
+ EcShards []EcShardWithInfo `json:"ec_shards"`
+ TotalShards int `json:"total_shards"`
+ TotalVolumes int `json:"total_volumes"`
+ LastUpdated time.Time `json:"last_updated"`
+
+ // Pagination
+ CurrentPage int `json:"current_page"`
+ TotalPages int `json:"total_pages"`
+ PageSize int `json:"page_size"`
+
+ // Sorting
+ SortBy string `json:"sort_by"`
+ SortOrder string `json:"sort_order"`
+
+ // Statistics
+ DataCenterCount int `json:"datacenter_count"`
+ RackCount int `json:"rack_count"`
+ CollectionCount int `json:"collection_count"`
+
+ // Conditional display flags
+ ShowDataCenterColumn bool `json:"show_datacenter_column"`
+ ShowRackColumn bool `json:"show_rack_column"`
+ ShowCollectionColumn bool `json:"show_collection_column"`
+
+ // Single values when only one exists
+ SingleDataCenter string `json:"single_datacenter"`
+ SingleRack string `json:"single_rack"`
+ SingleCollection string `json:"single_collection"`
+
+ // Filtering
+ FilterCollection string `json:"filter_collection"`
+
+ // EC specific statistics
+ ShardsPerVolume map[uint32]int `json:"shards_per_volume"` // VolumeID -> shard count
+ VolumesWithAllShards int `json:"volumes_with_all_shards"` // Volumes with all 14 shards
+ VolumesWithMissingShards int `json:"volumes_with_missing_shards"` // Volumes missing shards
+}
+
+// EcShardWithInfo represents an EC shard with its topology information
+type EcShardWithInfo struct {
+ VolumeID uint32 `json:"volume_id"`
+ ShardID uint32 `json:"shard_id"`
+ Collection string `json:"collection"`
+ Size uint64 `json:"size"`
+ Server string `json:"server"`
+ DataCenter string `json:"datacenter"`
+ Rack string `json:"rack"`
+ DiskType string `json:"disk_type"`
+ ModifiedTime int64 `json:"modified_time"`
+
+ // EC specific fields
+ EcIndexBits uint32 `json:"ec_index_bits"` // Bitmap of which shards this server has
+ ShardCount int `json:"shard_count"` // Number of shards this server has for this volume
+ IsComplete bool `json:"is_complete"` // True if this volume has all 14 shards
+ MissingShards []int `json:"missing_shards"` // List of missing shard IDs
+}
+
+// EcVolumeDetailsData represents the data for the EC volume details page
+type EcVolumeDetailsData struct {
+ Username string `json:"username"`
+ VolumeID uint32 `json:"volume_id"`
+ Collection string `json:"collection"`
+ Shards []EcShardWithInfo `json:"shards"`
+ TotalShards int `json:"total_shards"`
+ IsComplete bool `json:"is_complete"`
+ MissingShards []int `json:"missing_shards"`
+ DataCenters []string `json:"datacenters"`
+ Servers []string `json:"servers"`
+ LastUpdated time.Time `json:"last_updated"`
+
+ // Sorting
+ SortBy string `json:"sort_by"`
+ SortOrder string `json:"sort_order"`
+}
+
type VolumeDetailsData struct {
Volume VolumeWithTopology `json:"volume"`
Replicas []VolumeWithTopology `json:"replicas"`
@@ -145,12 +223,13 @@ type VolumeDetailsData struct {
// Collection management structures
type CollectionInfo struct {
- Name string `json:"name"`
- DataCenter string `json:"datacenter"`
- VolumeCount int `json:"volume_count"`
- FileCount int64 `json:"file_count"`
- TotalSize int64 `json:"total_size"`
- DiskTypes []string `json:"disk_types"`
+ Name string `json:"name"`
+ DataCenter string `json:"datacenter"`
+ VolumeCount int `json:"volume_count"`
+ EcVolumeCount int `json:"ec_volume_count"`
+ FileCount int64 `json:"file_count"`
+ TotalSize int64 `json:"total_size"`
+ DiskTypes []string `json:"disk_types"`
}
type ClusterCollectionsData struct {
@@ -158,6 +237,7 @@ type ClusterCollectionsData struct {
Collections []CollectionInfo `json:"collections"`
TotalCollections int `json:"total_collections"`
TotalVolumes int `json:"total_volumes"`
+ TotalEcVolumes int `json:"total_ec_volumes"`
TotalFiles int64 `json:"total_files"`
TotalSize int64 `json:"total_size"`
LastUpdated time.Time `json:"last_updated"`
@@ -376,3 +456,74 @@ type MaintenanceWorkersData struct {
}
// Maintenance system types are now in weed/admin/maintenance package
+
+// EcVolumeWithShards represents an EC volume with its shard distribution
+type EcVolumeWithShards struct {
+ VolumeID uint32 `json:"volume_id"`
+ Collection string `json:"collection"`
+ TotalShards int `json:"total_shards"`
+ IsComplete bool `json:"is_complete"`
+ MissingShards []int `json:"missing_shards"`
+ ShardLocations map[int]string `json:"shard_locations"` // shardId -> server
+ ShardSizes map[int]int64 `json:"shard_sizes"` // shardId -> size in bytes
+ DataCenters []string `json:"data_centers"`
+ Servers []string `json:"servers"`
+ Racks []string `json:"racks"`
+ ModifiedTime int64 `json:"modified_time"`
+}
+
+// ClusterEcVolumesData represents the response for clustered EC volumes view
+type ClusterEcVolumesData struct {
+ EcVolumes []EcVolumeWithShards `json:"ec_volumes"`
+ TotalVolumes int `json:"total_volumes"`
+ LastUpdated time.Time `json:"last_updated"`
+
+ // Pagination
+ Page int `json:"page"`
+ PageSize int `json:"page_size"`
+ TotalPages int `json:"total_pages"`
+
+ // Sorting
+ SortBy string `json:"sort_by"`
+ SortOrder string `json:"sort_order"`
+
+ // Filtering
+ Collection string `json:"collection"`
+
+ // Conditional display flags
+ ShowDataCenterColumn bool `json:"show_datacenter_column"`
+ ShowRackColumn bool `json:"show_rack_column"`
+ ShowCollectionColumn bool `json:"show_collection_column"`
+
+ // Statistics
+ CompleteVolumes int `json:"complete_volumes"`
+ IncompleteVolumes int `json:"incomplete_volumes"`
+ TotalShards int `json:"total_shards"`
+
+ // User context
+ Username string `json:"username"`
+}
+
+// Collection detail page structures
+type CollectionDetailsData struct {
+ Username string `json:"username"`
+ CollectionName string `json:"collection_name"`
+ RegularVolumes []VolumeWithTopology `json:"regular_volumes"`
+ EcVolumes []EcVolumeWithShards `json:"ec_volumes"`
+ TotalVolumes int `json:"total_volumes"`
+ TotalEcVolumes int `json:"total_ec_volumes"`
+ TotalFiles int64 `json:"total_files"`
+ TotalSize int64 `json:"total_size"`
+ DataCenters []string `json:"data_centers"`
+ DiskTypes []string `json:"disk_types"`
+ LastUpdated time.Time `json:"last_updated"`
+
+ // Pagination
+ Page int `json:"page"`
+ PageSize int `json:"page_size"`
+ TotalPages int `json:"total_pages"`
+
+ // Sorting
+ SortBy string `json:"sort_by"`
+ SortOrder string `json:"sort_order"`
+}
diff --git a/weed/admin/dash/worker_grpc_server.go b/weed/admin/dash/worker_grpc_server.go
index 36f97261a..3b4312235 100644
--- a/weed/admin/dash/worker_grpc_server.go
+++ b/weed/admin/dash/worker_grpc_server.go
@@ -319,27 +319,41 @@ func (s *WorkerGrpcServer) handleHeartbeat(conn *WorkerConnection, heartbeat *wo
// handleTaskRequest processes task requests from workers
func (s *WorkerGrpcServer) handleTaskRequest(conn *WorkerConnection, request *worker_pb.TaskRequest) {
+ // glog.Infof("DEBUG handleTaskRequest: Worker %s requesting tasks with capabilities %v", conn.workerID, conn.capabilities)
+
if s.adminServer.maintenanceManager == nil {
+ glog.Infof("DEBUG handleTaskRequest: maintenance manager is nil")
return
}
// Get next task from maintenance manager
task := s.adminServer.maintenanceManager.GetNextTask(conn.workerID, conn.capabilities)
+ // glog.Infof("DEBUG handleTaskRequest: GetNextTask returned task: %v", task != nil)
if task != nil {
+ glog.Infof("DEBUG handleTaskRequest: Assigning task %s (type: %s) to worker %s", task.ID, task.Type, conn.workerID)
+
+ // Use typed params directly - master client should already be configured in the params
+ var taskParams *worker_pb.TaskParams
+ if task.TypedParams != nil {
+ taskParams = task.TypedParams
+ } else {
+ // Create basic params if none exist
+ taskParams = &worker_pb.TaskParams{
+ VolumeId: task.VolumeID,
+ Server: task.Server,
+ Collection: task.Collection,
+ }
+ }
+
// Send task assignment
assignment := &worker_pb.AdminMessage{
Timestamp: time.Now().Unix(),
Message: &worker_pb.AdminMessage_TaskAssignment{
TaskAssignment: &worker_pb.TaskAssignment{
- TaskId: task.ID,
- TaskType: string(task.Type),
- Params: &worker_pb.TaskParams{
- VolumeId: task.VolumeID,
- Server: task.Server,
- Collection: task.Collection,
- Parameters: convertTaskParameters(task.Parameters),
- },
+ TaskId: task.ID,
+ TaskType: string(task.Type),
+ Params: taskParams,
Priority: int32(task.Priority),
CreatedTime: time.Now().Unix(),
},
@@ -348,10 +362,12 @@ func (s *WorkerGrpcServer) handleTaskRequest(conn *WorkerConnection, request *wo
select {
case conn.outgoing <- assignment:
- glog.V(2).Infof("Assigned task %s to worker %s", task.ID, conn.workerID)
+ glog.Infof("DEBUG handleTaskRequest: Successfully assigned task %s to worker %s", task.ID, conn.workerID)
case <-time.After(time.Second):
glog.Warningf("Failed to send task assignment to worker %s", conn.workerID)
}
+ } else {
+ // glog.Infof("DEBUG handleTaskRequest: No tasks available for worker %s", conn.workerID)
}
}
diff --git a/weed/admin/handlers/admin_handlers.go b/weed/admin/handlers/admin_handlers.go
index 76a123a4f..d28dc9e53 100644
--- a/weed/admin/handlers/admin_handlers.go
+++ b/weed/admin/handlers/admin_handlers.go
@@ -78,6 +78,9 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username,
protected.GET("/cluster/volumes", h.clusterHandlers.ShowClusterVolumes)
protected.GET("/cluster/volumes/:id/:server", h.clusterHandlers.ShowVolumeDetails)
protected.GET("/cluster/collections", h.clusterHandlers.ShowClusterCollections)
+ protected.GET("/cluster/collections/:name", h.clusterHandlers.ShowCollectionDetails)
+ protected.GET("/cluster/ec-shards", h.clusterHandlers.ShowClusterEcShards)
+ protected.GET("/cluster/ec-volumes/:id", h.clusterHandlers.ShowEcVolumeDetails)
// Message Queue management routes
protected.GET("/mq/brokers", h.mqHandlers.ShowBrokers)
@@ -93,7 +96,8 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username,
protected.POST("/maintenance/config/:taskType", h.maintenanceHandlers.UpdateTaskConfig)
// API routes for AJAX calls
- api := protected.Group("/api")
+ api := r.Group("/api")
+ api.Use(dash.RequireAuthAPI()) // Use API-specific auth middleware
{
api.GET("/cluster/topology", h.clusterHandlers.GetClusterTopology)
api.GET("/cluster/masters", h.clusterHandlers.GetMasters)
@@ -198,6 +202,9 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username,
r.GET("/cluster/volumes", h.clusterHandlers.ShowClusterVolumes)
r.GET("/cluster/volumes/:id/:server", h.clusterHandlers.ShowVolumeDetails)
r.GET("/cluster/collections", h.clusterHandlers.ShowClusterCollections)
+ r.GET("/cluster/collections/:name", h.clusterHandlers.ShowCollectionDetails)
+ r.GET("/cluster/ec-shards", h.clusterHandlers.ShowClusterEcShards)
+ r.GET("/cluster/ec-volumes/:id", h.clusterHandlers.ShowEcVolumeDetails)
// Message Queue management routes
r.GET("/mq/brokers", h.mqHandlers.ShowBrokers)
diff --git a/weed/admin/handlers/cluster_handlers.go b/weed/admin/handlers/cluster_handlers.go
index 03f7e88a0..32b89acd1 100644
--- a/weed/admin/handlers/cluster_handlers.go
+++ b/weed/admin/handlers/cluster_handlers.go
@@ -1,6 +1,7 @@
package handlers
import (
+ "math"
"net/http"
"strconv"
@@ -161,6 +162,129 @@ func (h *ClusterHandlers) ShowClusterCollections(c *gin.Context) {
}
}
+// ShowCollectionDetails renders the collection detail page
+func (h *ClusterHandlers) ShowCollectionDetails(c *gin.Context) {
+ collectionName := c.Param("name")
+ if collectionName == "" {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Collection name is required"})
+ return
+ }
+
+ // Parse query parameters
+ page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
+ pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "25"))
+ sortBy := c.DefaultQuery("sort_by", "volume_id")
+ sortOrder := c.DefaultQuery("sort_order", "asc")
+
+ // Get collection details data (volumes and EC volumes)
+ collectionDetailsData, err := h.adminServer.GetCollectionDetails(collectionName, page, pageSize, sortBy, sortOrder)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get collection details: " + err.Error()})
+ return
+ }
+
+ // Set username
+ username := c.GetString("username")
+ if username == "" {
+ username = "admin"
+ }
+ collectionDetailsData.Username = username
+
+ // Render HTML template
+ c.Header("Content-Type", "text/html")
+ collectionDetailsComponent := app.CollectionDetails(*collectionDetailsData)
+ layoutComponent := layout.Layout(c, collectionDetailsComponent)
+ err = layoutComponent.Render(c.Request.Context(), c.Writer)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
+ return
+ }
+}
+
+// ShowClusterEcShards handles the cluster EC shards page (individual shards view)
+func (h *ClusterHandlers) ShowClusterEcShards(c *gin.Context) {
+ // Parse query parameters
+ page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
+ pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "100"))
+ sortBy := c.DefaultQuery("sort_by", "volume_id")
+ sortOrder := c.DefaultQuery("sort_order", "asc")
+ collection := c.DefaultQuery("collection", "")
+
+ // Get data from admin server
+ data, err := h.adminServer.GetClusterEcVolumes(page, pageSize, sortBy, sortOrder, collection)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Set username
+ username := c.GetString("username")
+ if username == "" {
+ username = "admin"
+ }
+ data.Username = username
+
+ // Render template
+ c.Header("Content-Type", "text/html")
+ ecVolumesComponent := app.ClusterEcVolumes(*data)
+ layoutComponent := layout.Layout(c, ecVolumesComponent)
+ err = layoutComponent.Render(c.Request.Context(), c.Writer)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
+ return
+ }
+}
+
+// ShowEcVolumeDetails renders the EC volume details page
+func (h *ClusterHandlers) ShowEcVolumeDetails(c *gin.Context) {
+ volumeIDStr := c.Param("id")
+
+ if volumeIDStr == "" {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID is required"})
+ return
+ }
+
+ volumeID, err := strconv.Atoi(volumeIDStr)
+ if err != nil {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid volume ID"})
+ return
+ }
+
+ // Check that volumeID is within uint32 range
+ if volumeID < 0 || volumeID > int(math.MaxUint32) {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID out of range"})
+ return
+ }
+
+ // Parse sorting parameters
+ sortBy := c.DefaultQuery("sort_by", "shard_id")
+ sortOrder := c.DefaultQuery("sort_order", "asc")
+
+ // Get EC volume details
+ ecVolumeDetails, err := h.adminServer.GetEcVolumeDetails(uint32(volumeID), sortBy, sortOrder)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get EC volume details: " + err.Error()})
+ return
+ }
+
+ // Set username
+ username := c.GetString("username")
+ if username == "" {
+ username = "admin"
+ }
+ ecVolumeDetails.Username = username
+
+ // Render HTML template
+ c.Header("Content-Type", "text/html")
+ ecVolumeDetailsComponent := app.EcVolumeDetails(*ecVolumeDetails)
+ layoutComponent := layout.Layout(c, ecVolumeDetailsComponent)
+ err = layoutComponent.Render(c.Request.Context(), c.Writer)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
+ return
+ }
+}
+
// ShowClusterMasters renders the cluster masters page
func (h *ClusterHandlers) ShowClusterMasters(c *gin.Context) {
// Get cluster masters data
diff --git a/weed/admin/handlers/maintenance_handlers.go b/weed/admin/handlers/maintenance_handlers.go
index 4b1f91387..1e2337272 100644
--- a/weed/admin/handlers/maintenance_handlers.go
+++ b/weed/admin/handlers/maintenance_handlers.go
@@ -1,16 +1,24 @@
package handlers
import (
+ "fmt"
"net/http"
+ "reflect"
+ "strconv"
+ "strings"
"time"
"github.com/gin-gonic/gin"
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"github.com/seaweedfs/seaweedfs/weed/admin/view/app"
- "github.com/seaweedfs/seaweedfs/weed/admin/view/components"
"github.com/seaweedfs/seaweedfs/weed/admin/view/layout"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
@@ -30,19 +38,31 @@ func NewMaintenanceHandlers(adminServer *dash.AdminServer) *MaintenanceHandlers
func (h *MaintenanceHandlers) ShowMaintenanceQueue(c *gin.Context) {
data, err := h.getMaintenanceQueueData()
if err != nil {
+ glog.Infof("DEBUG ShowMaintenanceQueue: error getting data: %v", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
+ glog.Infof("DEBUG ShowMaintenanceQueue: got data with %d tasks", len(data.Tasks))
+ if data.Stats != nil {
+ glog.Infof("DEBUG ShowMaintenanceQueue: stats = {pending: %d, running: %d, completed: %d}",
+ data.Stats.PendingTasks, data.Stats.RunningTasks, data.Stats.CompletedToday)
+ } else {
+ glog.Infof("DEBUG ShowMaintenanceQueue: stats is nil")
+ }
+
// Render HTML template
c.Header("Content-Type", "text/html")
maintenanceComponent := app.MaintenanceQueue(data)
layoutComponent := layout.Layout(c, maintenanceComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
+ glog.Infof("DEBUG ShowMaintenanceQueue: render error: %v", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
+
+ glog.Infof("DEBUG ShowMaintenanceQueue: template rendered successfully")
}
// ShowMaintenanceWorkers displays the maintenance workers page
@@ -72,9 +92,12 @@ func (h *MaintenanceHandlers) ShowMaintenanceConfig(c *gin.Context) {
return
}
- // Render HTML template
+ // Get the schema for dynamic form rendering
+ schema := maintenance.GetMaintenanceConfigSchema()
+
+ // Render HTML template using schema-driven approach
c.Header("Content-Type", "text/html")
- configComponent := app.MaintenanceConfig(config)
+ configComponent := app.MaintenanceConfigSchema(config, schema)
layoutComponent := layout.Layout(c, configComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
@@ -87,20 +110,20 @@ func (h *MaintenanceHandlers) ShowMaintenanceConfig(c *gin.Context) {
func (h *MaintenanceHandlers) ShowTaskConfig(c *gin.Context) {
taskTypeName := c.Param("taskType")
- // Get the task type
- taskType := maintenance.GetMaintenanceTaskType(taskTypeName)
- if taskType == "" {
- c.JSON(http.StatusNotFound, gin.H{"error": "Task type not found"})
+ // Get the schema for this task type
+ schema := tasks.GetTaskConfigSchema(taskTypeName)
+ if schema == nil {
+ c.JSON(http.StatusNotFound, gin.H{"error": "Task type not found or no schema available"})
return
}
- // Get the UI provider for this task type
+ // Get the UI provider for current configuration
uiRegistry := tasks.GetGlobalUIRegistry()
typesRegistry := tasks.GetGlobalTypesRegistry()
var provider types.TaskUIProvider
for workerTaskType := range typesRegistry.GetAllDetectors() {
- if string(workerTaskType) == string(taskType) {
+ if string(workerTaskType) == taskTypeName {
provider = uiRegistry.GetProvider(workerTaskType)
break
}
@@ -111,73 +134,23 @@ func (h *MaintenanceHandlers) ShowTaskConfig(c *gin.Context) {
return
}
- // Try to get templ UI provider first - temporarily disabled
- // templUIProvider := getTemplUIProvider(taskType)
- var configSections []components.ConfigSectionData
-
- // Temporarily disabled templ UI provider
- // if templUIProvider != nil {
- // // Use the new templ-based UI provider
- // currentConfig := templUIProvider.GetCurrentConfig()
- // sections, err := templUIProvider.RenderConfigSections(currentConfig)
- // if err != nil {
- // c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render configuration sections: " + err.Error()})
- // return
- // }
- // configSections = sections
- // } else {
- // Fallback to basic configuration for providers that haven't been migrated yet
- configSections = []components.ConfigSectionData{
- {
- Title: "Configuration Settings",
- Icon: "fas fa-cogs",
- Description: "Configure task detection and scheduling parameters",
- Fields: []interface{}{
- components.CheckboxFieldData{
- FormFieldData: components.FormFieldData{
- Name: "enabled",
- Label: "Enable Task",
- Description: "Whether this task type should be enabled",
- },
- Checked: true,
- },
- components.NumberFieldData{
- FormFieldData: components.FormFieldData{
- Name: "max_concurrent",
- Label: "Max Concurrent Tasks",
- Description: "Maximum number of concurrent tasks",
- Required: true,
- },
- Value: 2,
- Step: "1",
- Min: floatPtr(1),
- },
- components.DurationFieldData{
- FormFieldData: components.FormFieldData{
- Name: "scan_interval",
- Label: "Scan Interval",
- Description: "How often to scan for tasks",
- Required: true,
- },
- Value: "30m",
- },
- },
- },
- }
- // } // End of disabled templ UI provider else block
-
- // Create task configuration data using templ components
- configData := &app.TaskConfigTemplData{
- TaskType: taskType,
- TaskName: provider.GetDisplayName(),
- TaskIcon: provider.GetIcon(),
- Description: provider.GetDescription(),
- ConfigSections: configSections,
- }
-
- // Render HTML template using templ components
+ // Get current configuration
+ currentConfig := provider.GetCurrentConfig()
+
+ // Note: Do NOT apply schema defaults to current config as it overrides saved values
+ // Only apply defaults when creating new configs, not when displaying existing ones
+
+ // Create task configuration data
+ configData := &maintenance.TaskConfigData{
+ TaskType: maintenance.MaintenanceTaskType(taskTypeName),
+ TaskName: schema.DisplayName,
+ TaskIcon: schema.Icon,
+ Description: schema.Description,
+ }
+
+ // Render HTML template using schema-based approach
c.Header("Content-Type", "text/html")
- taskConfigComponent := app.TaskConfigTempl(configData)
+ taskConfigComponent := app.TaskConfigSchema(configData, schema, currentConfig)
layoutComponent := layout.Layout(c, taskConfigComponent)
err := layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
@@ -186,19 +159,10 @@ func (h *MaintenanceHandlers) ShowTaskConfig(c *gin.Context) {
}
}
-// UpdateTaskConfig updates configuration for a specific task type
+// UpdateTaskConfig updates task configuration from form
func (h *MaintenanceHandlers) UpdateTaskConfig(c *gin.Context) {
taskTypeName := c.Param("taskType")
-
- // Get the task type
- taskType := maintenance.GetMaintenanceTaskType(taskTypeName)
- if taskType == "" {
- c.JSON(http.StatusNotFound, gin.H{"error": "Task type not found"})
- return
- }
-
- // Try to get templ UI provider first - temporarily disabled
- // templUIProvider := getTemplUIProvider(taskType)
+ taskType := types.TaskType(taskTypeName)
// Parse form data
err := c.Request.ParseForm()
@@ -207,31 +171,100 @@ func (h *MaintenanceHandlers) UpdateTaskConfig(c *gin.Context) {
return
}
- // Convert form data to map
- formData := make(map[string][]string)
+ // Debug logging - show received form data
+ glog.V(1).Infof("Received form data for task type %s:", taskTypeName)
for key, values := range c.Request.PostForm {
- formData[key] = values
- }
-
- var config interface{}
-
- // Temporarily disabled templ UI provider
- // if templUIProvider != nil {
- // // Use the new templ-based UI provider
- // config, err = templUIProvider.ParseConfigForm(formData)
- // if err != nil {
- // c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse configuration: " + err.Error()})
- // return
- // }
- // // Apply configuration using templ provider
- // err = templUIProvider.ApplyConfig(config)
- // if err != nil {
- // c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply configuration: " + err.Error()})
- // return
- // }
- // } else {
- // Fallback to old UI provider for tasks that haven't been migrated yet
- // Fallback to old UI provider for tasks that haven't been migrated yet
+ glog.V(1).Infof(" %s: %v", key, values)
+ }
+
+ // Get the task configuration schema
+ schema := tasks.GetTaskConfigSchema(taskTypeName)
+ if schema == nil {
+ c.JSON(http.StatusNotFound, gin.H{"error": "Schema not found for task type: " + taskTypeName})
+ return
+ }
+
+ // Create a new config instance based on task type and apply schema defaults
+ var config TaskConfig
+ switch taskType {
+ case types.TaskTypeVacuum:
+ config = &vacuum.Config{}
+ case types.TaskTypeBalance:
+ config = &balance.Config{}
+ case types.TaskTypeErasureCoding:
+ config = &erasure_coding.Config{}
+ default:
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Unsupported task type: " + taskTypeName})
+ return
+ }
+
+ // Apply schema defaults first using type-safe method
+ if err := schema.ApplyDefaultsToConfig(config); err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply defaults: " + err.Error()})
+ return
+ }
+
+ // First, get the current configuration to preserve existing values
+ currentUIRegistry := tasks.GetGlobalUIRegistry()
+ currentTypesRegistry := tasks.GetGlobalTypesRegistry()
+
+ var currentProvider types.TaskUIProvider
+ for workerTaskType := range currentTypesRegistry.GetAllDetectors() {
+ if string(workerTaskType) == string(taskType) {
+ currentProvider = currentUIRegistry.GetProvider(workerTaskType)
+ break
+ }
+ }
+
+ if currentProvider != nil {
+ // Copy current config values to the new config
+ currentConfig := currentProvider.GetCurrentConfig()
+ if currentConfigProtobuf, ok := currentConfig.(TaskConfig); ok {
+ // Apply current values using protobuf directly - no map conversion needed!
+ currentPolicy := currentConfigProtobuf.ToTaskPolicy()
+ if err := config.FromTaskPolicy(currentPolicy); err != nil {
+ glog.Warningf("Failed to load current config for %s: %v", taskTypeName, err)
+ }
+ }
+ }
+
+ // Parse form data using schema-based approach (this will override with new values)
+ err = h.parseTaskConfigFromForm(c.Request.PostForm, schema, config)
+ if err != nil {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse configuration: " + err.Error()})
+ return
+ }
+
+ // Debug logging - show parsed config values
+ switch taskType {
+ case types.TaskTypeVacuum:
+ if vacuumConfig, ok := config.(*vacuum.Config); ok {
+ glog.V(1).Infof("Parsed vacuum config - GarbageThreshold: %f, MinVolumeAgeSeconds: %d, MinIntervalSeconds: %d",
+ vacuumConfig.GarbageThreshold, vacuumConfig.MinVolumeAgeSeconds, vacuumConfig.MinIntervalSeconds)
+ }
+ case types.TaskTypeErasureCoding:
+ if ecConfig, ok := config.(*erasure_coding.Config); ok {
+ glog.V(1).Infof("Parsed EC config - FullnessRatio: %f, QuietForSeconds: %d, MinSizeMB: %d, CollectionFilter: '%s'",
+ ecConfig.FullnessRatio, ecConfig.QuietForSeconds, ecConfig.MinSizeMB, ecConfig.CollectionFilter)
+ }
+ case types.TaskTypeBalance:
+ if balanceConfig, ok := config.(*balance.Config); ok {
+ glog.V(1).Infof("Parsed balance config - Enabled: %v, MaxConcurrent: %d, ScanIntervalSeconds: %d, ImbalanceThreshold: %f, MinServerCount: %d",
+ balanceConfig.Enabled, balanceConfig.MaxConcurrent, balanceConfig.ScanIntervalSeconds, balanceConfig.ImbalanceThreshold, balanceConfig.MinServerCount)
+ }
+ }
+
+ // Validate the configuration
+ if validationErrors := schema.ValidateConfig(config); len(validationErrors) > 0 {
+ errorMessages := make([]string, len(validationErrors))
+ for i, err := range validationErrors {
+ errorMessages[i] = err.Error()
+ }
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Configuration validation failed", "details": errorMessages})
+ return
+ }
+
+ // Apply configuration using UIProvider
uiRegistry := tasks.GetGlobalUIRegistry()
typesRegistry := tasks.GetGlobalTypesRegistry()
@@ -248,25 +281,153 @@ func (h *MaintenanceHandlers) UpdateTaskConfig(c *gin.Context) {
return
}
- // Parse configuration from form using old provider
- config, err = provider.ParseConfigForm(formData)
+ // Apply configuration using provider
+ err = provider.ApplyTaskConfig(config)
if err != nil {
- c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse configuration: " + err.Error()})
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply configuration: " + err.Error()})
return
}
- // Apply configuration using old provider
- err = provider.ApplyConfig(config)
- if err != nil {
- c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply configuration: " + err.Error()})
- return
+ // Save task configuration to protobuf file using ConfigPersistence
+ if h.adminServer != nil && h.adminServer.GetConfigPersistence() != nil {
+ err = h.saveTaskConfigToProtobuf(taskType, config)
+ if err != nil {
+ glog.Warningf("Failed to save task config to protobuf file: %v", err)
+ // Don't fail the request, just log the warning
+ }
+ }
+
+ // Trigger a configuration reload in the maintenance manager
+ if h.adminServer != nil {
+ if manager := h.adminServer.GetMaintenanceManager(); manager != nil {
+ err = manager.ReloadTaskConfigurations()
+ if err != nil {
+ glog.Warningf("Failed to reload task configurations: %v", err)
+ } else {
+ glog.V(1).Infof("Successfully reloaded task configurations after updating %s", taskTypeName)
+ }
+ }
}
- // } // End of disabled templ UI provider else block
// Redirect back to task configuration page
c.Redirect(http.StatusSeeOther, "/maintenance/config/"+taskTypeName)
}
+// parseTaskConfigFromForm parses form data using schema definitions
+func (h *MaintenanceHandlers) parseTaskConfigFromForm(formData map[string][]string, schema *tasks.TaskConfigSchema, config interface{}) error {
+ configValue := reflect.ValueOf(config)
+ if configValue.Kind() == reflect.Ptr {
+ configValue = configValue.Elem()
+ }
+
+ if configValue.Kind() != reflect.Struct {
+ return fmt.Errorf("config must be a struct or pointer to struct")
+ }
+
+ configType := configValue.Type()
+
+ for i := 0; i < configValue.NumField(); i++ {
+ field := configValue.Field(i)
+ fieldType := configType.Field(i)
+
+ // Handle embedded structs recursively
+ if fieldType.Anonymous && field.Kind() == reflect.Struct {
+ err := h.parseTaskConfigFromForm(formData, schema, field.Addr().Interface())
+ if err != nil {
+ return fmt.Errorf("error parsing embedded struct %s: %w", fieldType.Name, err)
+ }
+ continue
+ }
+
+ // Get JSON tag name
+ jsonTag := fieldType.Tag.Get("json")
+ if jsonTag == "" {
+ continue
+ }
+
+ // Remove options like ",omitempty"
+ if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 {
+ jsonTag = jsonTag[:commaIdx]
+ }
+
+ // Find corresponding schema field
+ schemaField := schema.GetFieldByName(jsonTag)
+ if schemaField == nil {
+ continue
+ }
+
+ // Parse value based on field type
+ if err := h.parseFieldFromForm(formData, schemaField, field); err != nil {
+ return fmt.Errorf("error parsing field %s: %w", schemaField.DisplayName, err)
+ }
+ }
+
+ return nil
+}
+
+// parseFieldFromForm parses a single field value from form data
+func (h *MaintenanceHandlers) parseFieldFromForm(formData map[string][]string, schemaField *config.Field, fieldValue reflect.Value) error {
+ if !fieldValue.CanSet() {
+ return nil
+ }
+
+ switch schemaField.Type {
+ case config.FieldTypeBool:
+ // Checkbox fields - present means true, absent means false
+ _, exists := formData[schemaField.JSONName]
+ fieldValue.SetBool(exists)
+
+ case config.FieldTypeInt:
+ if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 {
+ if intVal, err := strconv.Atoi(values[0]); err != nil {
+ return fmt.Errorf("invalid integer value: %s", values[0])
+ } else {
+ fieldValue.SetInt(int64(intVal))
+ }
+ }
+
+ case config.FieldTypeFloat:
+ if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 {
+ if floatVal, err := strconv.ParseFloat(values[0], 64); err != nil {
+ return fmt.Errorf("invalid float value: %s", values[0])
+ } else {
+ fieldValue.SetFloat(floatVal)
+ }
+ }
+
+ case config.FieldTypeString:
+ if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 {
+ fieldValue.SetString(values[0])
+ }
+
+ case config.FieldTypeInterval:
+ // Parse interval fields with value + unit
+ valueKey := schemaField.JSONName + "_value"
+ unitKey := schemaField.JSONName + "_unit"
+
+ if valueStrs, ok := formData[valueKey]; ok && len(valueStrs) > 0 {
+ value, err := strconv.Atoi(valueStrs[0])
+ if err != nil {
+ return fmt.Errorf("invalid interval value: %s", valueStrs[0])
+ }
+
+ unit := "minutes" // default
+ if unitStrs, ok := formData[unitKey]; ok && len(unitStrs) > 0 {
+ unit = unitStrs[0]
+ }
+
+ // Convert to seconds
+ seconds := config.IntervalValueUnitToSeconds(value, unit)
+ fieldValue.SetInt(int64(seconds))
+ }
+
+ default:
+ return fmt.Errorf("unsupported field type: %s", schemaField.Type)
+ }
+
+ return nil
+}
+
// UpdateMaintenanceConfig updates maintenance configuration from form
func (h *MaintenanceHandlers) UpdateMaintenanceConfig(c *gin.Context) {
var config maintenance.MaintenanceConfig
@@ -302,36 +463,50 @@ func (h *MaintenanceHandlers) getMaintenanceQueueData() (*maintenance.Maintenanc
return nil, err
}
- return &maintenance.MaintenanceQueueData{
+ data := &maintenance.MaintenanceQueueData{
Tasks: tasks,
Workers: workers,
Stats: stats,
LastUpdated: time.Now(),
- }, nil
+ }
+
+ return data, nil
}
func (h *MaintenanceHandlers) getMaintenanceQueueStats() (*maintenance.QueueStats, error) {
- // This would integrate with the maintenance queue to get real statistics
- // For now, return mock data
- return &maintenance.QueueStats{
- PendingTasks: 5,
- RunningTasks: 2,
- CompletedToday: 15,
- FailedToday: 1,
- TotalTasks: 23,
- }, nil
+ // Use the exported method from AdminServer
+ return h.adminServer.GetMaintenanceQueueStats()
}
func (h *MaintenanceHandlers) getMaintenanceTasks() ([]*maintenance.MaintenanceTask, error) {
- // This would integrate with the maintenance queue to get real tasks
- // For now, return mock data
- return []*maintenance.MaintenanceTask{}, nil
+ // Call the maintenance manager directly to get all tasks
+ if h.adminServer == nil {
+ return []*maintenance.MaintenanceTask{}, nil
+ }
+
+ manager := h.adminServer.GetMaintenanceManager()
+ if manager == nil {
+ return []*maintenance.MaintenanceTask{}, nil
+ }
+
+ // Get ALL tasks using empty parameters - this should match what the API returns
+ allTasks := manager.GetTasks("", "", 0)
+ return allTasks, nil
}
func (h *MaintenanceHandlers) getMaintenanceWorkers() ([]*maintenance.MaintenanceWorker, error) {
- // This would integrate with the maintenance system to get real workers
- // For now, return mock data
- return []*maintenance.MaintenanceWorker{}, nil
+ // Get workers from the admin server's maintenance manager
+ if h.adminServer == nil {
+ return []*maintenance.MaintenanceWorker{}, nil
+ }
+
+ if h.adminServer.GetMaintenanceManager() == nil {
+ return []*maintenance.MaintenanceWorker{}, nil
+ }
+
+ // Get workers from the maintenance manager
+ workers := h.adminServer.GetMaintenanceManager().GetWorkers()
+ return workers, nil
}
func (h *MaintenanceHandlers) getMaintenanceConfig() (*maintenance.MaintenanceConfigData, error) {
@@ -344,40 +519,25 @@ func (h *MaintenanceHandlers) updateMaintenanceConfig(config *maintenance.Mainte
return h.adminServer.UpdateMaintenanceConfigData(config)
}
-// floatPtr is a helper function to create float64 pointers
-func floatPtr(f float64) *float64 {
- return &f
-}
-
-// Global templ UI registry - temporarily disabled
-// var globalTemplUIRegistry *types.UITemplRegistry
-
-// initTemplUIRegistry initializes the global templ UI registry - temporarily disabled
-func initTemplUIRegistry() {
- // Temporarily disabled due to missing types
- // if globalTemplUIRegistry == nil {
- // globalTemplUIRegistry = types.NewUITemplRegistry()
- // // Register vacuum templ UI provider using shared instances
- // vacuumDetector, vacuumScheduler := vacuum.GetSharedInstances()
- // vacuum.RegisterUITempl(globalTemplUIRegistry, vacuumDetector, vacuumScheduler)
- // // Register erasure coding templ UI provider using shared instances
- // erasureCodingDetector, erasureCodingScheduler := erasure_coding.GetSharedInstances()
- // erasure_coding.RegisterUITempl(globalTemplUIRegistry, erasureCodingDetector, erasureCodingScheduler)
- // // Register balance templ UI provider using shared instances
- // balanceDetector, balanceScheduler := balance.GetSharedInstances()
- // balance.RegisterUITempl(globalTemplUIRegistry, balanceDetector, balanceScheduler)
- // }
-}
+// saveTaskConfigToProtobuf saves task configuration to protobuf file
+func (h *MaintenanceHandlers) saveTaskConfigToProtobuf(taskType types.TaskType, config TaskConfig) error {
+ configPersistence := h.adminServer.GetConfigPersistence()
+ if configPersistence == nil {
+ return fmt.Errorf("config persistence not available")
+ }
-// getTemplUIProvider gets the templ UI provider for a task type - temporarily disabled
-func getTemplUIProvider(taskType maintenance.MaintenanceTaskType) interface{} {
- // initTemplUIRegistry()
- // Convert maintenance task type to worker task type
- // typesRegistry := tasks.GetGlobalTypesRegistry()
- // for workerTaskType := range typesRegistry.GetAllDetectors() {
- // if string(workerTaskType) == string(taskType) {
- // return globalTemplUIRegistry.GetProvider(workerTaskType)
- // }
- // }
- return nil
+ // Use the new ToTaskPolicy method - much simpler and more maintainable!
+ taskPolicy := config.ToTaskPolicy()
+
+ // Save using task-specific methods
+ switch taskType {
+ case types.TaskTypeVacuum:
+ return configPersistence.SaveVacuumTaskPolicy(taskPolicy)
+ case types.TaskTypeErasureCoding:
+ return configPersistence.SaveErasureCodingTaskPolicy(taskPolicy)
+ case types.TaskTypeBalance:
+ return configPersistence.SaveBalanceTaskPolicy(taskPolicy)
+ default:
+ return fmt.Errorf("unsupported task type for protobuf persistence: %s", taskType)
+ }
}
diff --git a/weed/admin/handlers/maintenance_handlers_test.go b/weed/admin/handlers/maintenance_handlers_test.go
new file mode 100644
index 000000000..fa5a365f1
--- /dev/null
+++ b/weed/admin/handlers/maintenance_handlers_test.go
@@ -0,0 +1,389 @@
+package handlers
+
+import (
+ "net/url"
+ "testing"
+
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
+)
+
+func TestParseTaskConfigFromForm_WithEmbeddedStruct(t *testing.T) {
+ // Create a maintenance handlers instance for testing
+ h := &MaintenanceHandlers{}
+
+ // Test with balance config
+ t.Run("Balance Config", func(t *testing.T) {
+ // Simulate form data
+ formData := url.Values{
+ "enabled": {"on"}, // checkbox field
+ "scan_interval_seconds_value": {"30"}, // interval field
+ "scan_interval_seconds_unit": {"minutes"}, // interval unit
+ "max_concurrent": {"2"}, // number field
+ "imbalance_threshold": {"0.15"}, // float field
+ "min_server_count": {"3"}, // number field
+ }
+
+ // Get schema
+ schema := tasks.GetTaskConfigSchema("balance")
+ if schema == nil {
+ t.Fatal("Failed to get balance schema")
+ }
+
+ // Create config instance
+ config := &balance.Config{}
+
+ // Parse form data
+ err := h.parseTaskConfigFromForm(formData, schema, config)
+ if err != nil {
+ t.Fatalf("Failed to parse form data: %v", err)
+ }
+
+ // Verify embedded struct fields were set correctly
+ if !config.Enabled {
+ t.Errorf("Expected Enabled=true, got %v", config.Enabled)
+ }
+
+ if config.ScanIntervalSeconds != 1800 { // 30 minutes * 60
+ t.Errorf("Expected ScanIntervalSeconds=1800, got %v", config.ScanIntervalSeconds)
+ }
+
+ if config.MaxConcurrent != 2 {
+ t.Errorf("Expected MaxConcurrent=2, got %v", config.MaxConcurrent)
+ }
+
+ // Verify balance-specific fields were set correctly
+ if config.ImbalanceThreshold != 0.15 {
+ t.Errorf("Expected ImbalanceThreshold=0.15, got %v", config.ImbalanceThreshold)
+ }
+
+ if config.MinServerCount != 3 {
+ t.Errorf("Expected MinServerCount=3, got %v", config.MinServerCount)
+ }
+ })
+
+ // Test with vacuum config
+ t.Run("Vacuum Config", func(t *testing.T) {
+ // Simulate form data
+ formData := url.Values{
+ // "enabled" field omitted to simulate unchecked checkbox
+ "scan_interval_seconds_value": {"4"}, // interval field
+ "scan_interval_seconds_unit": {"hours"}, // interval unit
+ "max_concurrent": {"3"}, // number field
+ "garbage_threshold": {"0.4"}, // float field
+ "min_volume_age_seconds_value": {"2"}, // interval field
+ "min_volume_age_seconds_unit": {"days"}, // interval unit
+ "min_interval_seconds_value": {"1"}, // interval field
+ "min_interval_seconds_unit": {"days"}, // interval unit
+ }
+
+ // Get schema
+ schema := tasks.GetTaskConfigSchema("vacuum")
+ if schema == nil {
+ t.Fatal("Failed to get vacuum schema")
+ }
+
+ // Create config instance
+ config := &vacuum.Config{}
+
+ // Parse form data
+ err := h.parseTaskConfigFromForm(formData, schema, config)
+ if err != nil {
+ t.Fatalf("Failed to parse form data: %v", err)
+ }
+
+ // Verify embedded struct fields were set correctly
+ if config.Enabled {
+ t.Errorf("Expected Enabled=false, got %v", config.Enabled)
+ }
+
+ if config.ScanIntervalSeconds != 14400 { // 4 hours * 3600
+ t.Errorf("Expected ScanIntervalSeconds=14400, got %v", config.ScanIntervalSeconds)
+ }
+
+ if config.MaxConcurrent != 3 {
+ t.Errorf("Expected MaxConcurrent=3, got %v", config.MaxConcurrent)
+ }
+
+ // Verify vacuum-specific fields were set correctly
+ if config.GarbageThreshold != 0.4 {
+ t.Errorf("Expected GarbageThreshold=0.4, got %v", config.GarbageThreshold)
+ }
+
+ if config.MinVolumeAgeSeconds != 172800 { // 2 days * 86400
+ t.Errorf("Expected MinVolumeAgeSeconds=172800, got %v", config.MinVolumeAgeSeconds)
+ }
+
+ if config.MinIntervalSeconds != 86400 { // 1 day * 86400
+ t.Errorf("Expected MinIntervalSeconds=86400, got %v", config.MinIntervalSeconds)
+ }
+ })
+
+ // Test with erasure coding config
+ t.Run("Erasure Coding Config", func(t *testing.T) {
+ // Simulate form data
+ formData := url.Values{
+ "enabled": {"on"}, // checkbox field
+ "scan_interval_seconds_value": {"2"}, // interval field
+ "scan_interval_seconds_unit": {"hours"}, // interval unit
+ "max_concurrent": {"1"}, // number field
+ "quiet_for_seconds_value": {"10"}, // interval field
+ "quiet_for_seconds_unit": {"minutes"}, // interval unit
+ "fullness_ratio": {"0.85"}, // float field
+ "collection_filter": {"test_collection"}, // string field
+ "min_size_mb": {"50"}, // number field
+ }
+
+ // Get schema
+ schema := tasks.GetTaskConfigSchema("erasure_coding")
+ if schema == nil {
+ t.Fatal("Failed to get erasure_coding schema")
+ }
+
+ // Create config instance
+ config := &erasure_coding.Config{}
+
+ // Parse form data
+ err := h.parseTaskConfigFromForm(formData, schema, config)
+ if err != nil {
+ t.Fatalf("Failed to parse form data: %v", err)
+ }
+
+ // Verify embedded struct fields were set correctly
+ if !config.Enabled {
+ t.Errorf("Expected Enabled=true, got %v", config.Enabled)
+ }
+
+ if config.ScanIntervalSeconds != 7200 { // 2 hours * 3600
+ t.Errorf("Expected ScanIntervalSeconds=7200, got %v", config.ScanIntervalSeconds)
+ }
+
+ if config.MaxConcurrent != 1 {
+ t.Errorf("Expected MaxConcurrent=1, got %v", config.MaxConcurrent)
+ }
+
+ // Verify erasure coding-specific fields were set correctly
+ if config.QuietForSeconds != 600 { // 10 minutes * 60
+ t.Errorf("Expected QuietForSeconds=600, got %v", config.QuietForSeconds)
+ }
+
+ if config.FullnessRatio != 0.85 {
+ t.Errorf("Expected FullnessRatio=0.85, got %v", config.FullnessRatio)
+ }
+
+ if config.CollectionFilter != "test_collection" {
+ t.Errorf("Expected CollectionFilter='test_collection', got %v", config.CollectionFilter)
+ }
+
+ if config.MinSizeMB != 50 {
+ t.Errorf("Expected MinSizeMB=50, got %v", config.MinSizeMB)
+ }
+ })
+}
+
+func TestConfigurationValidation(t *testing.T) {
+ // Test that config structs can be validated and converted to protobuf format
+ taskTypes := []struct {
+ name string
+ config interface{}
+ }{
+ {
+ "balance",
+ &balance.Config{
+ BaseConfig: base.BaseConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 2400,
+ MaxConcurrent: 3,
+ },
+ ImbalanceThreshold: 0.18,
+ MinServerCount: 4,
+ },
+ },
+ {
+ "vacuum",
+ &vacuum.Config{
+ BaseConfig: base.BaseConfig{
+ Enabled: false,
+ ScanIntervalSeconds: 7200,
+ MaxConcurrent: 2,
+ },
+ GarbageThreshold: 0.35,
+ MinVolumeAgeSeconds: 86400,
+ MinIntervalSeconds: 604800,
+ },
+ },
+ {
+ "erasure_coding",
+ &erasure_coding.Config{
+ BaseConfig: base.BaseConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 3600,
+ MaxConcurrent: 1,
+ },
+ QuietForSeconds: 900,
+ FullnessRatio: 0.9,
+ CollectionFilter: "important",
+ MinSizeMB: 100,
+ },
+ },
+ }
+
+ for _, test := range taskTypes {
+ t.Run(test.name, func(t *testing.T) {
+ // Test that configs can be converted to protobuf TaskPolicy
+ switch cfg := test.config.(type) {
+ case *balance.Config:
+ policy := cfg.ToTaskPolicy()
+ if policy == nil {
+ t.Fatal("ToTaskPolicy returned nil")
+ }
+ if policy.Enabled != cfg.Enabled {
+ t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled)
+ }
+ if policy.MaxConcurrent != int32(cfg.MaxConcurrent) {
+ t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent)
+ }
+ case *vacuum.Config:
+ policy := cfg.ToTaskPolicy()
+ if policy == nil {
+ t.Fatal("ToTaskPolicy returned nil")
+ }
+ if policy.Enabled != cfg.Enabled {
+ t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled)
+ }
+ if policy.MaxConcurrent != int32(cfg.MaxConcurrent) {
+ t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent)
+ }
+ case *erasure_coding.Config:
+ policy := cfg.ToTaskPolicy()
+ if policy == nil {
+ t.Fatal("ToTaskPolicy returned nil")
+ }
+ if policy.Enabled != cfg.Enabled {
+ t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled)
+ }
+ if policy.MaxConcurrent != int32(cfg.MaxConcurrent) {
+ t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent)
+ }
+ default:
+ t.Fatalf("Unknown config type: %T", test.config)
+ }
+
+ // Test that configs can be validated
+ switch cfg := test.config.(type) {
+ case *balance.Config:
+ if err := cfg.Validate(); err != nil {
+ t.Errorf("Validation failed: %v", err)
+ }
+ case *vacuum.Config:
+ if err := cfg.Validate(); err != nil {
+ t.Errorf("Validation failed: %v", err)
+ }
+ case *erasure_coding.Config:
+ if err := cfg.Validate(); err != nil {
+ t.Errorf("Validation failed: %v", err)
+ }
+ }
+ })
+ }
+}
+
+func TestParseFieldFromForm_EdgeCases(t *testing.T) {
+ h := &MaintenanceHandlers{}
+
+ // Test checkbox parsing (boolean fields)
+ t.Run("Checkbox Fields", func(t *testing.T) {
+ tests := []struct {
+ name string
+ formData url.Values
+ expectedValue bool
+ }{
+ {"Checked checkbox", url.Values{"test_field": {"on"}}, true},
+ {"Unchecked checkbox", url.Values{}, false},
+ {"Empty value checkbox", url.Values{"test_field": {""}}, true}, // Present but empty means checked
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ schema := &tasks.TaskConfigSchema{
+ Schema: config.Schema{
+ Fields: []*config.Field{
+ {
+ JSONName: "test_field",
+ Type: config.FieldTypeBool,
+ InputType: "checkbox",
+ },
+ },
+ },
+ }
+
+ type TestConfig struct {
+ TestField bool `json:"test_field"`
+ }
+
+ config := &TestConfig{}
+ err := h.parseTaskConfigFromForm(test.formData, schema, config)
+ if err != nil {
+ t.Fatalf("parseTaskConfigFromForm failed: %v", err)
+ }
+
+ if config.TestField != test.expectedValue {
+ t.Errorf("Expected %v, got %v", test.expectedValue, config.TestField)
+ }
+ })
+ }
+ })
+
+ // Test interval parsing
+ t.Run("Interval Fields", func(t *testing.T) {
+ tests := []struct {
+ name string
+ value string
+ unit string
+ expectedSecs int
+ }{
+ {"Minutes", "30", "minutes", 1800},
+ {"Hours", "2", "hours", 7200},
+ {"Days", "1", "days", 86400},
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ formData := url.Values{
+ "test_field_value": {test.value},
+ "test_field_unit": {test.unit},
+ }
+
+ schema := &tasks.TaskConfigSchema{
+ Schema: config.Schema{
+ Fields: []*config.Field{
+ {
+ JSONName: "test_field",
+ Type: config.FieldTypeInterval,
+ InputType: "interval",
+ },
+ },
+ },
+ }
+
+ type TestConfig struct {
+ TestField int `json:"test_field"`
+ }
+
+ config := &TestConfig{}
+ err := h.parseTaskConfigFromForm(formData, schema, config)
+ if err != nil {
+ t.Fatalf("parseTaskConfigFromForm failed: %v", err)
+ }
+
+ if config.TestField != test.expectedSecs {
+ t.Errorf("Expected %d seconds, got %d", test.expectedSecs, config.TestField)
+ }
+ })
+ }
+ })
+}
diff --git a/weed/admin/handlers/task_config_interface.go b/weed/admin/handlers/task_config_interface.go
new file mode 100644
index 000000000..dd22c5250
--- /dev/null
+++ b/weed/admin/handlers/task_config_interface.go
@@ -0,0 +1,25 @@
+package handlers
+
+import (
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+)
+
+// TaskConfig defines the interface that all task configuration types must implement
+type TaskConfig interface {
+ config.ConfigWithDefaults // Extends ConfigWithDefaults for type-safe schema operations
+
+ // Common methods from BaseConfig
+ IsEnabled() bool
+ SetEnabled(enabled bool)
+
+ // Protobuf serialization methods - no more map[string]interface{}!
+ ToTaskPolicy() *worker_pb.TaskPolicy
+ FromTaskPolicy(policy *worker_pb.TaskPolicy) error
+}
+
+// TaskConfigProvider defines the interface for creating specific task config types
+type TaskConfigProvider interface {
+ NewConfig() TaskConfig
+ GetTaskType() string
+}
diff --git a/weed/admin/maintenance/config_schema.go b/weed/admin/maintenance/config_schema.go
new file mode 100644
index 000000000..c911ad59c
--- /dev/null
+++ b/weed/admin/maintenance/config_schema.go
@@ -0,0 +1,190 @@
+package maintenance
+
+import (
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+)
+
+// Type aliases for backward compatibility
+type ConfigFieldType = config.FieldType
+type ConfigFieldUnit = config.FieldUnit
+type ConfigField = config.Field
+
+// Constant aliases for backward compatibility
+const (
+ FieldTypeBool = config.FieldTypeBool
+ FieldTypeInt = config.FieldTypeInt
+ FieldTypeDuration = config.FieldTypeDuration
+ FieldTypeInterval = config.FieldTypeInterval
+ FieldTypeString = config.FieldTypeString
+ FieldTypeFloat = config.FieldTypeFloat
+)
+
+const (
+ UnitSeconds = config.UnitSeconds
+ UnitMinutes = config.UnitMinutes
+ UnitHours = config.UnitHours
+ UnitDays = config.UnitDays
+ UnitCount = config.UnitCount
+ UnitNone = config.UnitNone
+)
+
+// Function aliases for backward compatibility
+var (
+ SecondsToIntervalValueUnit = config.SecondsToIntervalValueUnit
+ IntervalValueUnitToSeconds = config.IntervalValueUnitToSeconds
+)
+
+// MaintenanceConfigSchema defines the schema for maintenance configuration
+type MaintenanceConfigSchema struct {
+ config.Schema // Embed common schema functionality
+}
+
+// GetMaintenanceConfigSchema returns the schema for maintenance configuration
+func GetMaintenanceConfigSchema() *MaintenanceConfigSchema {
+ return &MaintenanceConfigSchema{
+ Schema: config.Schema{
+ Fields: []*config.Field{
+ {
+ Name: "enabled",
+ JSONName: "enabled",
+ Type: config.FieldTypeBool,
+ DefaultValue: true,
+ Required: false,
+ DisplayName: "Enable Maintenance System",
+ Description: "When enabled, the system will automatically scan for and execute maintenance tasks",
+ HelpText: "Toggle this to enable or disable the entire maintenance system",
+ InputType: "checkbox",
+ CSSClasses: "form-check-input",
+ },
+ {
+ Name: "scan_interval_seconds",
+ JSONName: "scan_interval_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 30 * 60, // 30 minutes in seconds
+ MinValue: 1 * 60, // 1 minute
+ MaxValue: 24 * 60 * 60, // 24 hours
+ Required: true,
+ DisplayName: "Scan Interval",
+ Description: "How often to scan for maintenance tasks",
+ HelpText: "The system will check for new maintenance tasks at this interval",
+ Placeholder: "30",
+ Unit: config.UnitMinutes,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "worker_timeout_seconds",
+ JSONName: "worker_timeout_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 5 * 60, // 5 minutes
+ MinValue: 1 * 60, // 1 minute
+ MaxValue: 60 * 60, // 1 hour
+ Required: true,
+ DisplayName: "Worker Timeout",
+ Description: "How long to wait for worker heartbeat before considering it inactive",
+ HelpText: "Workers that don't send heartbeats within this time are considered offline",
+ Placeholder: "5",
+ Unit: config.UnitMinutes,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "task_timeout_seconds",
+ JSONName: "task_timeout_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 2 * 60 * 60, // 2 hours
+ MinValue: 10 * 60, // 10 minutes
+ MaxValue: 24 * 60 * 60, // 24 hours
+ Required: true,
+ DisplayName: "Task Timeout",
+ Description: "Maximum time allowed for a task to complete",
+ HelpText: "Tasks that exceed this duration will be marked as failed",
+ Placeholder: "2",
+ Unit: config.UnitHours,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "retry_delay_seconds",
+ JSONName: "retry_delay_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 15 * 60, // 15 minutes
+ MinValue: 1 * 60, // 1 minute
+ MaxValue: 24 * 60 * 60, // 24 hours
+ Required: true,
+ DisplayName: "Retry Delay",
+ Description: "How long to wait before retrying a failed task",
+ HelpText: "Failed tasks will be retried after this delay",
+ Placeholder: "15",
+ Unit: config.UnitMinutes,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "max_retries",
+ JSONName: "max_retries",
+ Type: config.FieldTypeInt,
+ DefaultValue: 3,
+ MinValue: 0,
+ MaxValue: 10,
+ Required: true,
+ DisplayName: "Max Retries",
+ Description: "Maximum number of times to retry a failed task",
+ HelpText: "Tasks that fail more than this many times will be marked as permanently failed",
+ Placeholder: "3",
+ Unit: config.UnitCount,
+ InputType: "number",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "cleanup_interval_seconds",
+ JSONName: "cleanup_interval_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 24 * 60 * 60, // 24 hours
+ MinValue: 1 * 60 * 60, // 1 hour
+ MaxValue: 7 * 24 * 60 * 60, // 7 days
+ Required: true,
+ DisplayName: "Cleanup Interval",
+ Description: "How often to run maintenance cleanup operations",
+ HelpText: "Removes old task records and temporary files at this interval",
+ Placeholder: "24",
+ Unit: config.UnitHours,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "task_retention_seconds",
+ JSONName: "task_retention_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 7 * 24 * 60 * 60, // 7 days
+ MinValue: 1 * 24 * 60 * 60, // 1 day
+ MaxValue: 30 * 24 * 60 * 60, // 30 days
+ Required: true,
+ DisplayName: "Task Retention",
+ Description: "How long to keep completed task records",
+ HelpText: "Task history older than this duration will be automatically deleted",
+ Placeholder: "7",
+ Unit: config.UnitDays,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "global_max_concurrent",
+ JSONName: "global_max_concurrent",
+ Type: config.FieldTypeInt,
+ DefaultValue: 10,
+ MinValue: 1,
+ MaxValue: 100,
+ Required: true,
+ DisplayName: "Global Max Concurrent Tasks",
+ Description: "Maximum number of maintenance tasks that can run simultaneously across all workers",
+ HelpText: "Limits the total number of maintenance operations to control system load",
+ Placeholder: "10",
+ Unit: config.UnitCount,
+ InputType: "number",
+ CSSClasses: "form-control",
+ },
+ },
+ },
+ }
+}
diff --git a/weed/admin/maintenance/config_verification.go b/weed/admin/maintenance/config_verification.go
new file mode 100644
index 000000000..0ac40aad1
--- /dev/null
+++ b/weed/admin/maintenance/config_verification.go
@@ -0,0 +1,124 @@
+package maintenance
+
+import (
+ "fmt"
+
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+)
+
+// VerifyProtobufConfig demonstrates that the protobuf configuration system is working
+func VerifyProtobufConfig() error {
+ // Create configuration manager
+ configManager := NewMaintenanceConfigManager()
+ config := configManager.GetConfig()
+
+ // Verify basic configuration
+ if !config.Enabled {
+ return fmt.Errorf("expected config to be enabled by default")
+ }
+
+ if config.ScanIntervalSeconds != 30*60 {
+ return fmt.Errorf("expected scan interval to be 1800 seconds, got %d", config.ScanIntervalSeconds)
+ }
+
+ // Verify policy configuration
+ if config.Policy == nil {
+ return fmt.Errorf("expected policy to be configured")
+ }
+
+ if config.Policy.GlobalMaxConcurrent != 4 {
+ return fmt.Errorf("expected global max concurrent to be 4, got %d", config.Policy.GlobalMaxConcurrent)
+ }
+
+ // Verify task policies
+ vacuumPolicy := config.Policy.TaskPolicies["vacuum"]
+ if vacuumPolicy == nil {
+ return fmt.Errorf("expected vacuum policy to be configured")
+ }
+
+ if !vacuumPolicy.Enabled {
+ return fmt.Errorf("expected vacuum policy to be enabled")
+ }
+
+ // Verify typed configuration access
+ vacuumConfig := vacuumPolicy.GetVacuumConfig()
+ if vacuumConfig == nil {
+ return fmt.Errorf("expected vacuum config to be accessible")
+ }
+
+ if vacuumConfig.GarbageThreshold != 0.3 {
+ return fmt.Errorf("expected garbage threshold to be 0.3, got %f", vacuumConfig.GarbageThreshold)
+ }
+
+ // Verify helper functions work
+ if !IsTaskEnabled(config.Policy, "vacuum") {
+ return fmt.Errorf("expected vacuum task to be enabled via helper function")
+ }
+
+ maxConcurrent := GetMaxConcurrent(config.Policy, "vacuum")
+ if maxConcurrent != 2 {
+ return fmt.Errorf("expected vacuum max concurrent to be 2, got %d", maxConcurrent)
+ }
+
+ // Verify erasure coding configuration
+ ecPolicy := config.Policy.TaskPolicies["erasure_coding"]
+ if ecPolicy == nil {
+ return fmt.Errorf("expected EC policy to be configured")
+ }
+
+ ecConfig := ecPolicy.GetErasureCodingConfig()
+ if ecConfig == nil {
+ return fmt.Errorf("expected EC config to be accessible")
+ }
+
+ // Verify configurable EC fields only
+ if ecConfig.FullnessRatio <= 0 || ecConfig.FullnessRatio > 1 {
+ return fmt.Errorf("expected EC config to have valid fullness ratio (0-1), got %f", ecConfig.FullnessRatio)
+ }
+
+ return nil
+}
+
+// GetProtobufConfigSummary returns a summary of the current protobuf configuration
+func GetProtobufConfigSummary() string {
+ configManager := NewMaintenanceConfigManager()
+ config := configManager.GetConfig()
+
+ summary := fmt.Sprintf("SeaweedFS Protobuf Maintenance Configuration:\n")
+ summary += fmt.Sprintf(" Enabled: %v\n", config.Enabled)
+ summary += fmt.Sprintf(" Scan Interval: %d seconds\n", config.ScanIntervalSeconds)
+ summary += fmt.Sprintf(" Max Retries: %d\n", config.MaxRetries)
+ summary += fmt.Sprintf(" Global Max Concurrent: %d\n", config.Policy.GlobalMaxConcurrent)
+ summary += fmt.Sprintf(" Task Policies: %d configured\n", len(config.Policy.TaskPolicies))
+
+ for taskType, policy := range config.Policy.TaskPolicies {
+ summary += fmt.Sprintf(" %s: enabled=%v, max_concurrent=%d\n",
+ taskType, policy.Enabled, policy.MaxConcurrent)
+ }
+
+ return summary
+}
+
+// CreateCustomConfig demonstrates creating a custom protobuf configuration
+func CreateCustomConfig() *worker_pb.MaintenanceConfig {
+ return &worker_pb.MaintenanceConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 60 * 60, // 1 hour
+ MaxRetries: 5,
+ Policy: &worker_pb.MaintenancePolicy{
+ GlobalMaxConcurrent: 8,
+ TaskPolicies: map[string]*worker_pb.TaskPolicy{
+ "custom_vacuum": {
+ Enabled: true,
+ MaxConcurrent: 4,
+ TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{
+ VacuumConfig: &worker_pb.VacuumTaskConfig{
+ GarbageThreshold: 0.5,
+ MinVolumeAgeHours: 48,
+ },
+ },
+ },
+ },
+ },
+ }
+}
diff --git a/weed/admin/maintenance/maintenance_config_proto.go b/weed/admin/maintenance/maintenance_config_proto.go
new file mode 100644
index 000000000..67a6b74be
--- /dev/null
+++ b/weed/admin/maintenance/maintenance_config_proto.go
@@ -0,0 +1,287 @@
+package maintenance
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+)
+
+// MaintenanceConfigManager handles protobuf-based configuration
+type MaintenanceConfigManager struct {
+ config *worker_pb.MaintenanceConfig
+}
+
+// NewMaintenanceConfigManager creates a new config manager with defaults
+func NewMaintenanceConfigManager() *MaintenanceConfigManager {
+ return &MaintenanceConfigManager{
+ config: DefaultMaintenanceConfigProto(),
+ }
+}
+
+// DefaultMaintenanceConfigProto returns default configuration as protobuf
+func DefaultMaintenanceConfigProto() *worker_pb.MaintenanceConfig {
+ return &worker_pb.MaintenanceConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 30 * 60, // 30 minutes
+ WorkerTimeoutSeconds: 5 * 60, // 5 minutes
+ TaskTimeoutSeconds: 2 * 60 * 60, // 2 hours
+ RetryDelaySeconds: 15 * 60, // 15 minutes
+ MaxRetries: 3,
+ CleanupIntervalSeconds: 24 * 60 * 60, // 24 hours
+ TaskRetentionSeconds: 7 * 24 * 60 * 60, // 7 days
+ // Policy field will be populated dynamically from separate task configuration files
+ Policy: nil,
+ }
+}
+
+// GetConfig returns the current configuration
+func (mcm *MaintenanceConfigManager) GetConfig() *worker_pb.MaintenanceConfig {
+ return mcm.config
+}
+
+// Type-safe configuration accessors
+
+// GetVacuumConfig returns vacuum-specific configuration for a task type
+func (mcm *MaintenanceConfigManager) GetVacuumConfig(taskType string) *worker_pb.VacuumTaskConfig {
+ if policy := mcm.getTaskPolicy(taskType); policy != nil {
+ if vacuumConfig := policy.GetVacuumConfig(); vacuumConfig != nil {
+ return vacuumConfig
+ }
+ }
+ // Return defaults if not configured
+ return &worker_pb.VacuumTaskConfig{
+ GarbageThreshold: 0.3,
+ MinVolumeAgeHours: 24,
+ MinIntervalSeconds: 7 * 24 * 60 * 60, // 7 days
+ }
+}
+
+// GetErasureCodingConfig returns EC-specific configuration for a task type
+func (mcm *MaintenanceConfigManager) GetErasureCodingConfig(taskType string) *worker_pb.ErasureCodingTaskConfig {
+ if policy := mcm.getTaskPolicy(taskType); policy != nil {
+ if ecConfig := policy.GetErasureCodingConfig(); ecConfig != nil {
+ return ecConfig
+ }
+ }
+ // Return defaults if not configured
+ return &worker_pb.ErasureCodingTaskConfig{
+ FullnessRatio: 0.95,
+ QuietForSeconds: 3600,
+ MinVolumeSizeMb: 100,
+ CollectionFilter: "",
+ }
+}
+
+// GetBalanceConfig returns balance-specific configuration for a task type
+func (mcm *MaintenanceConfigManager) GetBalanceConfig(taskType string) *worker_pb.BalanceTaskConfig {
+ if policy := mcm.getTaskPolicy(taskType); policy != nil {
+ if balanceConfig := policy.GetBalanceConfig(); balanceConfig != nil {
+ return balanceConfig
+ }
+ }
+ // Return defaults if not configured
+ return &worker_pb.BalanceTaskConfig{
+ ImbalanceThreshold: 0.2,
+ MinServerCount: 2,
+ }
+}
+
+// GetReplicationConfig returns replication-specific configuration for a task type
+func (mcm *MaintenanceConfigManager) GetReplicationConfig(taskType string) *worker_pb.ReplicationTaskConfig {
+ if policy := mcm.getTaskPolicy(taskType); policy != nil {
+ if replicationConfig := policy.GetReplicationConfig(); replicationConfig != nil {
+ return replicationConfig
+ }
+ }
+ // Return defaults if not configured
+ return &worker_pb.ReplicationTaskConfig{
+ TargetReplicaCount: 2,
+ }
+}
+
+// Typed convenience methods for getting task configurations
+
+// GetVacuumTaskConfigForType returns vacuum configuration for a specific task type
+func (mcm *MaintenanceConfigManager) GetVacuumTaskConfigForType(taskType string) *worker_pb.VacuumTaskConfig {
+ return GetVacuumTaskConfig(mcm.config.Policy, MaintenanceTaskType(taskType))
+}
+
+// GetErasureCodingTaskConfigForType returns erasure coding configuration for a specific task type
+func (mcm *MaintenanceConfigManager) GetErasureCodingTaskConfigForType(taskType string) *worker_pb.ErasureCodingTaskConfig {
+ return GetErasureCodingTaskConfig(mcm.config.Policy, MaintenanceTaskType(taskType))
+}
+
+// GetBalanceTaskConfigForType returns balance configuration for a specific task type
+func (mcm *MaintenanceConfigManager) GetBalanceTaskConfigForType(taskType string) *worker_pb.BalanceTaskConfig {
+ return GetBalanceTaskConfig(mcm.config.Policy, MaintenanceTaskType(taskType))
+}
+
+// GetReplicationTaskConfigForType returns replication configuration for a specific task type
+func (mcm *MaintenanceConfigManager) GetReplicationTaskConfigForType(taskType string) *worker_pb.ReplicationTaskConfig {
+ return GetReplicationTaskConfig(mcm.config.Policy, MaintenanceTaskType(taskType))
+}
+
+// Helper methods
+
+func (mcm *MaintenanceConfigManager) getTaskPolicy(taskType string) *worker_pb.TaskPolicy {
+ if mcm.config.Policy != nil && mcm.config.Policy.TaskPolicies != nil {
+ return mcm.config.Policy.TaskPolicies[taskType]
+ }
+ return nil
+}
+
+// IsTaskEnabled returns whether a task type is enabled
+func (mcm *MaintenanceConfigManager) IsTaskEnabled(taskType string) bool {
+ if policy := mcm.getTaskPolicy(taskType); policy != nil {
+ return policy.Enabled
+ }
+ return false
+}
+
+// GetMaxConcurrent returns the max concurrent limit for a task type
+func (mcm *MaintenanceConfigManager) GetMaxConcurrent(taskType string) int32 {
+ if policy := mcm.getTaskPolicy(taskType); policy != nil {
+ return policy.MaxConcurrent
+ }
+ return 1 // Default
+}
+
+// GetRepeatInterval returns the repeat interval for a task type in seconds
+func (mcm *MaintenanceConfigManager) GetRepeatInterval(taskType string) int32 {
+ if policy := mcm.getTaskPolicy(taskType); policy != nil {
+ return policy.RepeatIntervalSeconds
+ }
+ return mcm.config.Policy.DefaultRepeatIntervalSeconds
+}
+
+// GetCheckInterval returns the check interval for a task type in seconds
+func (mcm *MaintenanceConfigManager) GetCheckInterval(taskType string) int32 {
+ if policy := mcm.getTaskPolicy(taskType); policy != nil {
+ return policy.CheckIntervalSeconds
+ }
+ return mcm.config.Policy.DefaultCheckIntervalSeconds
+}
+
+// Duration accessor methods
+
+// GetScanInterval returns the scan interval as a time.Duration
+func (mcm *MaintenanceConfigManager) GetScanInterval() time.Duration {
+ return time.Duration(mcm.config.ScanIntervalSeconds) * time.Second
+}
+
+// GetWorkerTimeout returns the worker timeout as a time.Duration
+func (mcm *MaintenanceConfigManager) GetWorkerTimeout() time.Duration {
+ return time.Duration(mcm.config.WorkerTimeoutSeconds) * time.Second
+}
+
+// GetTaskTimeout returns the task timeout as a time.Duration
+func (mcm *MaintenanceConfigManager) GetTaskTimeout() time.Duration {
+ return time.Duration(mcm.config.TaskTimeoutSeconds) * time.Second
+}
+
+// GetRetryDelay returns the retry delay as a time.Duration
+func (mcm *MaintenanceConfigManager) GetRetryDelay() time.Duration {
+ return time.Duration(mcm.config.RetryDelaySeconds) * time.Second
+}
+
+// GetCleanupInterval returns the cleanup interval as a time.Duration
+func (mcm *MaintenanceConfigManager) GetCleanupInterval() time.Duration {
+ return time.Duration(mcm.config.CleanupIntervalSeconds) * time.Second
+}
+
+// GetTaskRetention returns the task retention period as a time.Duration
+func (mcm *MaintenanceConfigManager) GetTaskRetention() time.Duration {
+ return time.Duration(mcm.config.TaskRetentionSeconds) * time.Second
+}
+
+// ValidateMaintenanceConfigWithSchema validates protobuf maintenance configuration using ConfigField rules
+func ValidateMaintenanceConfigWithSchema(config *worker_pb.MaintenanceConfig) error {
+ if config == nil {
+ return fmt.Errorf("configuration cannot be nil")
+ }
+
+ // Get the schema to access field validation rules
+ schema := GetMaintenanceConfigSchema()
+
+ // Validate each field individually using the ConfigField rules
+ if err := validateFieldWithSchema(schema, "enabled", config.Enabled); err != nil {
+ return err
+ }
+
+ if err := validateFieldWithSchema(schema, "scan_interval_seconds", int(config.ScanIntervalSeconds)); err != nil {
+ return err
+ }
+
+ if err := validateFieldWithSchema(schema, "worker_timeout_seconds", int(config.WorkerTimeoutSeconds)); err != nil {
+ return err
+ }
+
+ if err := validateFieldWithSchema(schema, "task_timeout_seconds", int(config.TaskTimeoutSeconds)); err != nil {
+ return err
+ }
+
+ if err := validateFieldWithSchema(schema, "retry_delay_seconds", int(config.RetryDelaySeconds)); err != nil {
+ return err
+ }
+
+ if err := validateFieldWithSchema(schema, "max_retries", int(config.MaxRetries)); err != nil {
+ return err
+ }
+
+ if err := validateFieldWithSchema(schema, "cleanup_interval_seconds", int(config.CleanupIntervalSeconds)); err != nil {
+ return err
+ }
+
+ if err := validateFieldWithSchema(schema, "task_retention_seconds", int(config.TaskRetentionSeconds)); err != nil {
+ return err
+ }
+
+ // Validate policy fields if present
+ if config.Policy != nil {
+ // Note: These field names might need to be adjusted based on the actual schema
+ if err := validatePolicyField("global_max_concurrent", int(config.Policy.GlobalMaxConcurrent)); err != nil {
+ return err
+ }
+
+ if err := validatePolicyField("default_repeat_interval_seconds", int(config.Policy.DefaultRepeatIntervalSeconds)); err != nil {
+ return err
+ }
+
+ if err := validatePolicyField("default_check_interval_seconds", int(config.Policy.DefaultCheckIntervalSeconds)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// validateFieldWithSchema validates a single field using its ConfigField definition
+func validateFieldWithSchema(schema *MaintenanceConfigSchema, fieldName string, value interface{}) error {
+ field := schema.GetFieldByName(fieldName)
+ if field == nil {
+ // Field not in schema, skip validation
+ return nil
+ }
+
+ return field.ValidateValue(value)
+}
+
+// validatePolicyField validates policy fields (simplified validation for now)
+func validatePolicyField(fieldName string, value int) error {
+ switch fieldName {
+ case "global_max_concurrent":
+ if value < 1 || value > 20 {
+ return fmt.Errorf("Global Max Concurrent must be between 1 and 20, got %d", value)
+ }
+ case "default_repeat_interval":
+ if value < 1 || value > 168 {
+ return fmt.Errorf("Default Repeat Interval must be between 1 and 168 hours, got %d", value)
+ }
+ case "default_check_interval":
+ if value < 1 || value > 168 {
+ return fmt.Errorf("Default Check Interval must be between 1 and 168 hours, got %d", value)
+ }
+ }
+ return nil
+}
diff --git a/weed/admin/maintenance/maintenance_integration.go b/weed/admin/maintenance/maintenance_integration.go
index 9a965d38a..1bdd7ffcc 100644
--- a/weed/admin/maintenance/maintenance_integration.go
+++ b/weed/admin/maintenance/maintenance_integration.go
@@ -1,11 +1,20 @@
package maintenance
import (
+ "context"
+ "fmt"
"time"
+ "github.com/seaweedfs/seaweedfs/weed/admin/topology"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/operation"
+ "github.com/seaweedfs/seaweedfs/weed/pb"
+ "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
)
// MaintenanceIntegration bridges the task system with existing maintenance
@@ -17,6 +26,12 @@ type MaintenanceIntegration struct {
maintenanceQueue *MaintenanceQueue
maintenancePolicy *MaintenancePolicy
+ // Pending operations tracker
+ pendingOperations *PendingOperations
+
+ // Active topology for task detection and target selection
+ activeTopology *topology.ActiveTopology
+
// Type conversion maps
taskTypeMap map[types.TaskType]MaintenanceTaskType
revTaskTypeMap map[MaintenanceTaskType]types.TaskType
@@ -31,8 +46,12 @@ func NewMaintenanceIntegration(queue *MaintenanceQueue, policy *MaintenancePolic
uiRegistry: tasks.GetGlobalUIRegistry(), // Use global UI registry with auto-registered UI providers
maintenanceQueue: queue,
maintenancePolicy: policy,
+ pendingOperations: NewPendingOperations(),
}
+ // Initialize active topology with 10 second recent task window
+ integration.activeTopology = topology.NewActiveTopology(10)
+
// Initialize type conversion maps
integration.initializeTypeMaps()
@@ -96,7 +115,7 @@ func (s *MaintenanceIntegration) registerAllTasks() {
s.buildTaskTypeMappings()
// Configure tasks from policy
- s.configureTasksFromPolicy()
+ s.ConfigureTasksFromPolicy()
registeredTaskTypes := make([]string, 0, len(s.taskTypeMap))
for _, maintenanceTaskType := range s.taskTypeMap {
@@ -105,8 +124,8 @@ func (s *MaintenanceIntegration) registerAllTasks() {
glog.V(1).Infof("Registered tasks: %v", registeredTaskTypes)
}
-// configureTasksFromPolicy dynamically configures all registered tasks based on the maintenance policy
-func (s *MaintenanceIntegration) configureTasksFromPolicy() {
+// ConfigureTasksFromPolicy dynamically configures all registered tasks based on the maintenance policy
+func (s *MaintenanceIntegration) ConfigureTasksFromPolicy() {
if s.maintenancePolicy == nil {
return
}
@@ -143,7 +162,7 @@ func (s *MaintenanceIntegration) configureDetectorFromPolicy(taskType types.Task
// Convert task system type to maintenance task type for policy lookup
maintenanceTaskType, exists := s.taskTypeMap[taskType]
if exists {
- enabled := s.maintenancePolicy.IsTaskEnabled(maintenanceTaskType)
+ enabled := IsTaskEnabled(s.maintenancePolicy, maintenanceTaskType)
basicDetector.SetEnabled(enabled)
glog.V(3).Infof("Set enabled=%v for detector %s", enabled, taskType)
}
@@ -172,14 +191,14 @@ func (s *MaintenanceIntegration) configureSchedulerFromPolicy(taskType types.Tas
// Set enabled status if scheduler supports it
if enableableScheduler, ok := scheduler.(interface{ SetEnabled(bool) }); ok {
- enabled := s.maintenancePolicy.IsTaskEnabled(maintenanceTaskType)
+ enabled := IsTaskEnabled(s.maintenancePolicy, maintenanceTaskType)
enableableScheduler.SetEnabled(enabled)
glog.V(3).Infof("Set enabled=%v for scheduler %s", enabled, taskType)
}
// Set max concurrent if scheduler supports it
if concurrentScheduler, ok := scheduler.(interface{ SetMaxConcurrent(int) }); ok {
- maxConcurrent := s.maintenancePolicy.GetMaxConcurrent(maintenanceTaskType)
+ maxConcurrent := GetMaxConcurrent(s.maintenancePolicy, maintenanceTaskType)
if maxConcurrent > 0 {
concurrentScheduler.SetMaxConcurrent(maxConcurrent)
glog.V(3).Infof("Set max concurrent=%d for scheduler %s", maxConcurrent, taskType)
@@ -193,11 +212,20 @@ func (s *MaintenanceIntegration) configureSchedulerFromPolicy(taskType types.Tas
// ScanWithTaskDetectors performs a scan using the task system
func (s *MaintenanceIntegration) ScanWithTaskDetectors(volumeMetrics []*types.VolumeHealthMetrics) ([]*TaskDetectionResult, error) {
+ // Note: ActiveTopology gets updated from topology info instead of volume metrics
+ glog.V(2).Infof("Processed %d volume metrics for task detection", len(volumeMetrics))
+
+ // Filter out volumes with pending operations to avoid duplicates
+ filteredMetrics := s.pendingOperations.FilterVolumeMetricsExcludingPending(volumeMetrics)
+
+ glog.V(1).Infof("Scanning %d volumes (filtered from %d) excluding pending operations",
+ len(filteredMetrics), len(volumeMetrics))
+
var allResults []*TaskDetectionResult
// Create cluster info
clusterInfo := &types.ClusterInfo{
- TotalVolumes: len(volumeMetrics),
+ TotalVolumes: len(filteredMetrics),
LastUpdated: time.Now(),
}
@@ -209,17 +237,26 @@ func (s *MaintenanceIntegration) ScanWithTaskDetectors(volumeMetrics []*types.Vo
glog.V(2).Infof("Running detection for task type: %s", taskType)
- results, err := detector.ScanForTasks(volumeMetrics, clusterInfo)
+ results, err := detector.ScanForTasks(filteredMetrics, clusterInfo)
if err != nil {
glog.Errorf("Failed to scan for %s tasks: %v", taskType, err)
continue
}
- // Convert results to existing system format
+ // Convert results to existing system format and check for conflicts
for _, result := range results {
existingResult := s.convertToExistingFormat(result)
if existingResult != nil {
- allResults = append(allResults, existingResult)
+ // Double-check for conflicts with pending operations
+ opType := s.mapMaintenanceTaskTypeToPendingOperationType(existingResult.TaskType)
+ if !s.pendingOperations.WouldConflictWithPending(existingResult.VolumeID, opType) {
+ // Plan destination for operations that need it
+ s.planDestinationForTask(existingResult, opType)
+ allResults = append(allResults, existingResult)
+ } else {
+ glog.V(2).Infof("Skipping task %s for volume %d due to conflict with pending operation",
+ existingResult.TaskType, existingResult.VolumeID)
+ }
}
}
@@ -229,6 +266,11 @@ func (s *MaintenanceIntegration) ScanWithTaskDetectors(volumeMetrics []*types.Vo
return allResults, nil
}
+// UpdateTopologyInfo updates the volume shard tracker with topology information for empty servers
+func (s *MaintenanceIntegration) UpdateTopologyInfo(topologyInfo *master_pb.TopologyInfo) error {
+ return s.activeTopology.UpdateTopology(topologyInfo)
+}
+
// convertToExistingFormat converts task results to existing system format using dynamic mapping
func (s *MaintenanceIntegration) convertToExistingFormat(result *types.TaskDetectionResult) *TaskDetectionResult {
// Convert types using mapping tables
@@ -241,49 +283,62 @@ func (s *MaintenanceIntegration) convertToExistingFormat(result *types.TaskDetec
existingPriority, exists := s.priorityMap[result.Priority]
if !exists {
- glog.Warningf("Unknown priority %d, defaulting to normal", result.Priority)
+ glog.Warningf("Unknown priority %s, defaulting to normal", result.Priority)
existingPriority = PriorityNormal
}
return &TaskDetectionResult{
- TaskType: existingType,
- VolumeID: result.VolumeID,
- Server: result.Server,
- Collection: result.Collection,
- Priority: existingPriority,
- Reason: result.Reason,
- Parameters: result.Parameters,
- ScheduleAt: result.ScheduleAt,
+ TaskType: existingType,
+ VolumeID: result.VolumeID,
+ Server: result.Server,
+ Collection: result.Collection,
+ Priority: existingPriority,
+ Reason: result.Reason,
+ TypedParams: result.TypedParams,
+ ScheduleAt: result.ScheduleAt,
}
}
// CanScheduleWithTaskSchedulers determines if a task can be scheduled using task schedulers with dynamic type conversion
func (s *MaintenanceIntegration) CanScheduleWithTaskSchedulers(task *MaintenanceTask, runningTasks []*MaintenanceTask, availableWorkers []*MaintenanceWorker) bool {
+ glog.Infof("DEBUG CanScheduleWithTaskSchedulers: Checking task %s (type: %s)", task.ID, task.Type)
+
// Convert existing types to task types using mapping
taskType, exists := s.revTaskTypeMap[task.Type]
if !exists {
- glog.V(2).Infof("Unknown task type %s for scheduling, falling back to existing logic", task.Type)
+ glog.Infof("DEBUG CanScheduleWithTaskSchedulers: Unknown task type %s for scheduling, falling back to existing logic", task.Type)
return false // Fallback to existing logic for unknown types
}
+ glog.Infof("DEBUG CanScheduleWithTaskSchedulers: Mapped task type %s to %s", task.Type, taskType)
+
// Convert task objects
taskObject := s.convertTaskToTaskSystem(task)
if taskObject == nil {
- glog.V(2).Infof("Failed to convert task %s for scheduling", task.ID)
+ glog.Infof("DEBUG CanScheduleWithTaskSchedulers: Failed to convert task %s for scheduling", task.ID)
return false
}
+ glog.Infof("DEBUG CanScheduleWithTaskSchedulers: Successfully converted task %s", task.ID)
+
runningTaskObjects := s.convertTasksToTaskSystem(runningTasks)
workerObjects := s.convertWorkersToTaskSystem(availableWorkers)
+ glog.Infof("DEBUG CanScheduleWithTaskSchedulers: Converted %d running tasks and %d workers", len(runningTaskObjects), len(workerObjects))
+
// Get the appropriate scheduler
scheduler := s.taskRegistry.GetScheduler(taskType)
if scheduler == nil {
- glog.V(2).Infof("No scheduler found for task type %s", taskType)
+ glog.Infof("DEBUG CanScheduleWithTaskSchedulers: No scheduler found for task type %s", taskType)
return false
}
- return scheduler.CanScheduleNow(taskObject, runningTaskObjects, workerObjects)
+ glog.Infof("DEBUG CanScheduleWithTaskSchedulers: Found scheduler for task type %s", taskType)
+
+ canSchedule := scheduler.CanScheduleNow(taskObject, runningTaskObjects, workerObjects)
+ glog.Infof("DEBUG CanScheduleWithTaskSchedulers: Scheduler decision for task %s: %v", task.ID, canSchedule)
+
+ return canSchedule
}
// convertTaskToTaskSystem converts existing task to task system format using dynamic mapping
@@ -304,14 +359,14 @@ func (s *MaintenanceIntegration) convertTaskToTaskSystem(task *MaintenanceTask)
}
return &types.Task{
- ID: task.ID,
- Type: taskType,
- Priority: priority,
- VolumeID: task.VolumeID,
- Server: task.Server,
- Collection: task.Collection,
- Parameters: task.Parameters,
- CreatedAt: task.CreatedAt,
+ ID: task.ID,
+ Type: taskType,
+ Priority: priority,
+ VolumeID: task.VolumeID,
+ Server: task.Server,
+ Collection: task.Collection,
+ TypedParams: task.TypedParams,
+ CreatedAt: task.CreatedAt,
}
}
@@ -407,3 +462,463 @@ func (s *MaintenanceIntegration) GetAllTaskStats() []*types.TaskStats {
return stats
}
+
+// mapMaintenanceTaskTypeToPendingOperationType converts a maintenance task type to a pending operation type
+func (s *MaintenanceIntegration) mapMaintenanceTaskTypeToPendingOperationType(taskType MaintenanceTaskType) PendingOperationType {
+ switch taskType {
+ case MaintenanceTaskType("balance"):
+ return OpTypeVolumeBalance
+ case MaintenanceTaskType("erasure_coding"):
+ return OpTypeErasureCoding
+ case MaintenanceTaskType("vacuum"):
+ return OpTypeVacuum
+ case MaintenanceTaskType("replication"):
+ return OpTypeReplication
+ default:
+ // For other task types, assume they're volume operations
+ return OpTypeVolumeMove
+ }
+}
+
+// GetPendingOperations returns the pending operations tracker
+func (s *MaintenanceIntegration) GetPendingOperations() *PendingOperations {
+ return s.pendingOperations
+}
+
+// GetActiveTopology returns the active topology for task detection
+func (s *MaintenanceIntegration) GetActiveTopology() *topology.ActiveTopology {
+ return s.activeTopology
+}
+
+// planDestinationForTask plans the destination for a task that requires it and creates typed protobuf parameters
+func (s *MaintenanceIntegration) planDestinationForTask(task *TaskDetectionResult, opType PendingOperationType) {
+ // Only plan destinations for operations that move volumes/shards
+ if opType == OpTypeVacuum {
+ // For vacuum tasks, create VacuumTaskParams
+ s.createVacuumTaskParams(task)
+ return
+ }
+
+ glog.V(1).Infof("Planning destination for %s task on volume %d (server: %s)", task.TaskType, task.VolumeID, task.Server)
+
+ // Use ActiveTopology for destination planning
+ destinationPlan, err := s.planDestinationWithActiveTopology(task, opType)
+
+ if err != nil {
+ glog.Warningf("Failed to plan primary destination for %s task volume %d: %v",
+ task.TaskType, task.VolumeID, err)
+ // Don't return here - still try to create task params which might work with multiple destinations
+ }
+
+ // Create typed protobuf parameters based on operation type
+ switch opType {
+ case OpTypeErasureCoding:
+ if destinationPlan == nil {
+ glog.Warningf("Cannot create EC task for volume %d: destination planning failed", task.VolumeID)
+ return
+ }
+ s.createErasureCodingTaskParams(task, destinationPlan)
+ case OpTypeVolumeMove, OpTypeVolumeBalance:
+ if destinationPlan == nil {
+ glog.Warningf("Cannot create balance task for volume %d: destination planning failed", task.VolumeID)
+ return
+ }
+ s.createBalanceTaskParams(task, destinationPlan.(*topology.DestinationPlan))
+ case OpTypeReplication:
+ if destinationPlan == nil {
+ glog.Warningf("Cannot create replication task for volume %d: destination planning failed", task.VolumeID)
+ return
+ }
+ s.createReplicationTaskParams(task, destinationPlan.(*topology.DestinationPlan))
+ default:
+ glog.V(2).Infof("Unknown operation type for task %s: %v", task.TaskType, opType)
+ }
+
+ if destinationPlan != nil {
+ switch plan := destinationPlan.(type) {
+ case *topology.DestinationPlan:
+ glog.V(1).Infof("Completed destination planning for %s task on volume %d: %s -> %s",
+ task.TaskType, task.VolumeID, task.Server, plan.TargetNode)
+ case *topology.MultiDestinationPlan:
+ glog.V(1).Infof("Completed EC destination planning for volume %d: %s -> %d destinations (racks: %d, DCs: %d)",
+ task.VolumeID, task.Server, len(plan.Plans), plan.SuccessfulRack, plan.SuccessfulDCs)
+ }
+ } else {
+ glog.V(1).Infof("Completed destination planning for %s task on volume %d: no destination planned",
+ task.TaskType, task.VolumeID)
+ }
+}
+
+// createVacuumTaskParams creates typed parameters for vacuum tasks
+func (s *MaintenanceIntegration) createVacuumTaskParams(task *TaskDetectionResult) {
+ // Get configuration from policy instead of using hard-coded values
+ vacuumConfig := GetVacuumTaskConfig(s.maintenancePolicy, MaintenanceTaskType("vacuum"))
+
+ // Use configured values or defaults if config is not available
+ garbageThreshold := 0.3 // Default 30%
+ verifyChecksum := true // Default to verify
+ batchSize := int32(1000) // Default batch size
+ workingDir := "/tmp/seaweedfs_vacuum_work" // Default working directory
+
+ if vacuumConfig != nil {
+ garbageThreshold = vacuumConfig.GarbageThreshold
+ // Note: VacuumTaskConfig has GarbageThreshold, MinVolumeAgeHours, MinIntervalSeconds
+ // Other fields like VerifyChecksum, BatchSize, WorkingDir would need to be added
+ // to the protobuf definition if they should be configurable
+ }
+
+ // Create typed protobuf parameters
+ task.TypedParams = &worker_pb.TaskParams{
+ VolumeId: task.VolumeID,
+ Server: task.Server,
+ Collection: task.Collection,
+ TaskParams: &worker_pb.TaskParams_VacuumParams{
+ VacuumParams: &worker_pb.VacuumTaskParams{
+ GarbageThreshold: garbageThreshold,
+ ForceVacuum: false,
+ BatchSize: batchSize,
+ WorkingDir: workingDir,
+ VerifyChecksum: verifyChecksum,
+ },
+ },
+ }
+}
+
+// planDestinationWithActiveTopology uses ActiveTopology to plan destinations
+func (s *MaintenanceIntegration) planDestinationWithActiveTopology(task *TaskDetectionResult, opType PendingOperationType) (interface{}, error) {
+ // Get source node information from topology
+ var sourceRack, sourceDC string
+
+ // Extract rack and DC from topology info
+ topologyInfo := s.activeTopology.GetTopologyInfo()
+ if topologyInfo != nil {
+ for _, dc := range topologyInfo.DataCenterInfos {
+ for _, rack := range dc.RackInfos {
+ for _, dataNodeInfo := range rack.DataNodeInfos {
+ if dataNodeInfo.Id == task.Server {
+ sourceDC = dc.Id
+ sourceRack = rack.Id
+ break
+ }
+ }
+ if sourceRack != "" {
+ break
+ }
+ }
+ if sourceDC != "" {
+ break
+ }
+ }
+ }
+
+ switch opType {
+ case OpTypeVolumeBalance, OpTypeVolumeMove:
+ // Plan single destination for balance operation
+ return s.activeTopology.PlanBalanceDestination(task.VolumeID, task.Server, sourceRack, sourceDC, 0)
+
+ case OpTypeErasureCoding:
+ // Plan multiple destinations for EC operation using adaptive shard counts
+ // Start with the default configuration, but fall back to smaller configurations if insufficient disks
+ totalShards := s.getOptimalECShardCount()
+ multiPlan, err := s.activeTopology.PlanECDestinations(task.VolumeID, task.Server, sourceRack, sourceDC, totalShards)
+ if err != nil {
+ return nil, err
+ }
+ if multiPlan != nil && len(multiPlan.Plans) > 0 {
+ // Return the multi-destination plan for EC
+ return multiPlan, nil
+ }
+ return nil, fmt.Errorf("no EC destinations found")
+
+ default:
+ return nil, fmt.Errorf("unsupported operation type for destination planning: %v", opType)
+ }
+}
+
+// createErasureCodingTaskParams creates typed parameters for EC tasks
+func (s *MaintenanceIntegration) createErasureCodingTaskParams(task *TaskDetectionResult, destinationPlan interface{}) {
+ // Determine EC shard counts based on the number of planned destinations
+ multiPlan, ok := destinationPlan.(*topology.MultiDestinationPlan)
+ if !ok {
+ glog.Warningf("EC task for volume %d received unexpected destination plan type", task.VolumeID)
+ task.TypedParams = nil
+ return
+ }
+
+ // Use adaptive shard configuration based on actual planned destinations
+ totalShards := len(multiPlan.Plans)
+ dataShards, parityShards := s.getECShardCounts(totalShards)
+
+ // Extract disk-aware destinations from the multi-destination plan
+ var destinations []*worker_pb.ECDestination
+ var allConflicts []string
+
+ for _, plan := range multiPlan.Plans {
+ allConflicts = append(allConflicts, plan.Conflicts...)
+
+ // Create disk-aware destination
+ destinations = append(destinations, &worker_pb.ECDestination{
+ Node: plan.TargetNode,
+ DiskId: plan.TargetDisk,
+ Rack: plan.TargetRack,
+ DataCenter: plan.TargetDC,
+ PlacementScore: plan.PlacementScore,
+ })
+ }
+
+ glog.V(1).Infof("EC destination planning for volume %d: got %d destinations (%d+%d shards) across %d racks and %d DCs",
+ task.VolumeID, len(destinations), dataShards, parityShards, multiPlan.SuccessfulRack, multiPlan.SuccessfulDCs)
+
+ if len(destinations) == 0 {
+ glog.Warningf("No destinations available for EC task volume %d - rejecting task", task.VolumeID)
+ task.TypedParams = nil
+ return
+ }
+
+ // Collect existing EC shard locations for cleanup
+ existingShardLocations := s.collectExistingEcShardLocations(task.VolumeID)
+
+ // Create EC task parameters
+ ecParams := &worker_pb.ErasureCodingTaskParams{
+ Destinations: destinations, // Disk-aware destinations
+ DataShards: dataShards,
+ ParityShards: parityShards,
+ WorkingDir: "/tmp/seaweedfs_ec_work",
+ MasterClient: "localhost:9333",
+ CleanupSource: true,
+ ExistingShardLocations: existingShardLocations, // Pass existing shards for cleanup
+ }
+
+ // Add placement conflicts if any
+ if len(allConflicts) > 0 {
+ // Remove duplicates
+ conflictMap := make(map[string]bool)
+ var uniqueConflicts []string
+ for _, conflict := range allConflicts {
+ if !conflictMap[conflict] {
+ conflictMap[conflict] = true
+ uniqueConflicts = append(uniqueConflicts, conflict)
+ }
+ }
+ ecParams.PlacementConflicts = uniqueConflicts
+ }
+
+ // Wrap in TaskParams
+ task.TypedParams = &worker_pb.TaskParams{
+ VolumeId: task.VolumeID,
+ Server: task.Server,
+ Collection: task.Collection,
+ TaskParams: &worker_pb.TaskParams_ErasureCodingParams{
+ ErasureCodingParams: ecParams,
+ },
+ }
+
+ glog.V(1).Infof("Created EC task params with %d destinations for volume %d",
+ len(destinations), task.VolumeID)
+}
+
+// createBalanceTaskParams creates typed parameters for balance/move tasks
+func (s *MaintenanceIntegration) createBalanceTaskParams(task *TaskDetectionResult, destinationPlan *topology.DestinationPlan) {
+ // balanceConfig could be used for future config options like ImbalanceThreshold, MinServerCount
+
+ // Create balance task parameters
+ balanceParams := &worker_pb.BalanceTaskParams{
+ DestNode: destinationPlan.TargetNode,
+ EstimatedSize: destinationPlan.ExpectedSize,
+ DestRack: destinationPlan.TargetRack,
+ DestDc: destinationPlan.TargetDC,
+ PlacementScore: destinationPlan.PlacementScore,
+ ForceMove: false, // Default to false
+ TimeoutSeconds: 300, // Default 5 minutes
+ }
+
+ // Add placement conflicts if any
+ if len(destinationPlan.Conflicts) > 0 {
+ balanceParams.PlacementConflicts = destinationPlan.Conflicts
+ }
+
+ // Note: balanceConfig would have ImbalanceThreshold, MinServerCount if needed for future enhancements
+
+ // Wrap in TaskParams
+ task.TypedParams = &worker_pb.TaskParams{
+ VolumeId: task.VolumeID,
+ Server: task.Server,
+ Collection: task.Collection,
+ TaskParams: &worker_pb.TaskParams_BalanceParams{
+ BalanceParams: balanceParams,
+ },
+ }
+
+ glog.V(1).Infof("Created balance task params for volume %d: %s -> %s (score: %.2f)",
+ task.VolumeID, task.Server, destinationPlan.TargetNode, destinationPlan.PlacementScore)
+}
+
+// createReplicationTaskParams creates typed parameters for replication tasks
+func (s *MaintenanceIntegration) createReplicationTaskParams(task *TaskDetectionResult, destinationPlan *topology.DestinationPlan) {
+ // replicationConfig could be used for future config options like TargetReplicaCount
+
+ // Create replication task parameters
+ replicationParams := &worker_pb.ReplicationTaskParams{
+ DestNode: destinationPlan.TargetNode,
+ DestRack: destinationPlan.TargetRack,
+ DestDc: destinationPlan.TargetDC,
+ PlacementScore: destinationPlan.PlacementScore,
+ }
+
+ // Add placement conflicts if any
+ if len(destinationPlan.Conflicts) > 0 {
+ replicationParams.PlacementConflicts = destinationPlan.Conflicts
+ }
+
+ // Note: replicationConfig would have TargetReplicaCount if needed for future enhancements
+
+ // Wrap in TaskParams
+ task.TypedParams = &worker_pb.TaskParams{
+ VolumeId: task.VolumeID,
+ Server: task.Server,
+ Collection: task.Collection,
+ TaskParams: &worker_pb.TaskParams_ReplicationParams{
+ ReplicationParams: replicationParams,
+ },
+ }
+
+ glog.V(1).Infof("Created replication task params for volume %d: %s -> %s",
+ task.VolumeID, task.Server, destinationPlan.TargetNode)
+}
+
+// getOptimalECShardCount returns the optimal number of EC shards based on available disks
+// Uses a simplified approach to avoid blocking during UI access
+func (s *MaintenanceIntegration) getOptimalECShardCount() int {
+ // Try to get available disks quickly, but don't block if topology is busy
+ availableDisks := s.getAvailableDisksQuickly()
+
+ // EC configurations in order of preference: (data+parity=total)
+ // Use smaller configurations for smaller clusters
+ if availableDisks >= 14 {
+ glog.V(1).Infof("Using default EC configuration: 10+4=14 shards for %d available disks", availableDisks)
+ return 14 // Default: 10+4
+ } else if availableDisks >= 6 {
+ glog.V(1).Infof("Using small cluster EC configuration: 4+2=6 shards for %d available disks", availableDisks)
+ return 6 // Small cluster: 4+2
+ } else if availableDisks >= 4 {
+ glog.V(1).Infof("Using minimal EC configuration: 3+1=4 shards for %d available disks", availableDisks)
+ return 4 // Minimal: 3+1
+ } else {
+ glog.V(1).Infof("Using very small cluster EC configuration: 2+1=3 shards for %d available disks", availableDisks)
+ return 3 // Very small: 2+1
+ }
+}
+
+// getAvailableDisksQuickly returns available disk count with a fast path to avoid UI blocking
+func (s *MaintenanceIntegration) getAvailableDisksQuickly() int {
+ // Use ActiveTopology's optimized disk counting if available
+ // Use empty task type and node filter for general availability check
+ allDisks := s.activeTopology.GetAvailableDisks(topology.TaskTypeErasureCoding, "")
+ if len(allDisks) > 0 {
+ return len(allDisks)
+ }
+
+ // Fallback: try to count from topology but don't hold locks for too long
+ topologyInfo := s.activeTopology.GetTopologyInfo()
+ return s.countAvailableDisks(topologyInfo)
+}
+
+// countAvailableDisks counts the total number of available disks in the topology
+func (s *MaintenanceIntegration) countAvailableDisks(topologyInfo *master_pb.TopologyInfo) int {
+ if topologyInfo == nil {
+ return 0
+ }
+
+ diskCount := 0
+ for _, dc := range topologyInfo.DataCenterInfos {
+ for _, rack := range dc.RackInfos {
+ for _, node := range rack.DataNodeInfos {
+ diskCount += len(node.DiskInfos)
+ }
+ }
+ }
+
+ return diskCount
+}
+
+// getECShardCounts determines data and parity shard counts for a given total
+func (s *MaintenanceIntegration) getECShardCounts(totalShards int) (int32, int32) {
+ // Map total shards to (data, parity) configurations
+ switch totalShards {
+ case 14:
+ return 10, 4 // Default: 10+4
+ case 9:
+ return 6, 3 // Medium: 6+3
+ case 6:
+ return 4, 2 // Small: 4+2
+ case 4:
+ return 3, 1 // Minimal: 3+1
+ case 3:
+ return 2, 1 // Very small: 2+1
+ default:
+ // For any other total, try to maintain roughly 3:1 or 4:1 ratio
+ if totalShards >= 4 {
+ parityShards := totalShards / 4
+ if parityShards < 1 {
+ parityShards = 1
+ }
+ dataShards := totalShards - parityShards
+ return int32(dataShards), int32(parityShards)
+ }
+ // Fallback for very small clusters
+ return int32(totalShards - 1), 1
+ }
+}
+
+// collectExistingEcShardLocations queries the master for existing EC shard locations during planning
+func (s *MaintenanceIntegration) collectExistingEcShardLocations(volumeId uint32) []*worker_pb.ExistingECShardLocation {
+ var existingShardLocations []*worker_pb.ExistingECShardLocation
+
+ // Use insecure connection for simplicity - in production this might be configurable
+ grpcDialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
+
+ err := operation.WithMasterServerClient(false, pb.ServerAddress("localhost:9333"), grpcDialOption,
+ func(masterClient master_pb.SeaweedClient) error {
+ req := &master_pb.LookupEcVolumeRequest{
+ VolumeId: volumeId,
+ }
+ resp, err := masterClient.LookupEcVolume(context.Background(), req)
+ if err != nil {
+ // If volume doesn't exist as EC volume, that's fine - just no existing shards
+ glog.V(1).Infof("LookupEcVolume for volume %d returned: %v (this is normal if no existing EC shards)", volumeId, err)
+ return nil
+ }
+
+ // Group shard locations by server
+ serverShardMap := make(map[string][]uint32)
+ for _, shardIdLocation := range resp.ShardIdLocations {
+ shardId := uint32(shardIdLocation.ShardId)
+ for _, location := range shardIdLocation.Locations {
+ serverAddr := pb.NewServerAddressFromLocation(location)
+ serverShardMap[string(serverAddr)] = append(serverShardMap[string(serverAddr)], shardId)
+ }
+ }
+
+ // Convert to protobuf format
+ for serverAddr, shardIds := range serverShardMap {
+ existingShardLocations = append(existingShardLocations, &worker_pb.ExistingECShardLocation{
+ Node: serverAddr,
+ ShardIds: shardIds,
+ })
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ glog.Errorf("Failed to lookup existing EC shards from master for volume %d: %v", volumeId, err)
+ // Return empty list - cleanup will be skipped but task can continue
+ return []*worker_pb.ExistingECShardLocation{}
+ }
+
+ if len(existingShardLocations) > 0 {
+ glog.V(1).Infof("Found existing EC shards for volume %d on %d servers during planning", volumeId, len(existingShardLocations))
+ }
+
+ return existingShardLocations
+}
diff --git a/weed/admin/maintenance/maintenance_manager.go b/weed/admin/maintenance/maintenance_manager.go
index 5d87d817e..4aab137e0 100644
--- a/weed/admin/maintenance/maintenance_manager.go
+++ b/weed/admin/maintenance/maintenance_manager.go
@@ -7,8 +7,76 @@ import (
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
)
+// buildPolicyFromTaskConfigs loads task configurations from separate files and builds a MaintenancePolicy
+func buildPolicyFromTaskConfigs() *worker_pb.MaintenancePolicy {
+ policy := &worker_pb.MaintenancePolicy{
+ GlobalMaxConcurrent: 4,
+ DefaultRepeatIntervalSeconds: 6 * 3600, // 6 hours in seconds
+ DefaultCheckIntervalSeconds: 12 * 3600, // 12 hours in seconds
+ TaskPolicies: make(map[string]*worker_pb.TaskPolicy),
+ }
+
+ // Load vacuum task configuration
+ if vacuumConfig := vacuum.LoadConfigFromPersistence(nil); vacuumConfig != nil {
+ policy.TaskPolicies["vacuum"] = &worker_pb.TaskPolicy{
+ Enabled: vacuumConfig.Enabled,
+ MaxConcurrent: int32(vacuumConfig.MaxConcurrent),
+ RepeatIntervalSeconds: int32(vacuumConfig.ScanIntervalSeconds),
+ CheckIntervalSeconds: int32(vacuumConfig.ScanIntervalSeconds),
+ TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{
+ VacuumConfig: &worker_pb.VacuumTaskConfig{
+ GarbageThreshold: float64(vacuumConfig.GarbageThreshold),
+ MinVolumeAgeHours: int32(vacuumConfig.MinVolumeAgeSeconds / 3600), // Convert seconds to hours
+ MinIntervalSeconds: int32(vacuumConfig.MinIntervalSeconds),
+ },
+ },
+ }
+ }
+
+ // Load erasure coding task configuration
+ if ecConfig := erasure_coding.LoadConfigFromPersistence(nil); ecConfig != nil {
+ policy.TaskPolicies["erasure_coding"] = &worker_pb.TaskPolicy{
+ Enabled: ecConfig.Enabled,
+ MaxConcurrent: int32(ecConfig.MaxConcurrent),
+ RepeatIntervalSeconds: int32(ecConfig.ScanIntervalSeconds),
+ CheckIntervalSeconds: int32(ecConfig.ScanIntervalSeconds),
+ TaskConfig: &worker_pb.TaskPolicy_ErasureCodingConfig{
+ ErasureCodingConfig: &worker_pb.ErasureCodingTaskConfig{
+ FullnessRatio: float64(ecConfig.FullnessRatio),
+ QuietForSeconds: int32(ecConfig.QuietForSeconds),
+ MinVolumeSizeMb: int32(ecConfig.MinSizeMB),
+ CollectionFilter: ecConfig.CollectionFilter,
+ },
+ },
+ }
+ }
+
+ // Load balance task configuration
+ if balanceConfig := balance.LoadConfigFromPersistence(nil); balanceConfig != nil {
+ policy.TaskPolicies["balance"] = &worker_pb.TaskPolicy{
+ Enabled: balanceConfig.Enabled,
+ MaxConcurrent: int32(balanceConfig.MaxConcurrent),
+ RepeatIntervalSeconds: int32(balanceConfig.ScanIntervalSeconds),
+ CheckIntervalSeconds: int32(balanceConfig.ScanIntervalSeconds),
+ TaskConfig: &worker_pb.TaskPolicy_BalanceConfig{
+ BalanceConfig: &worker_pb.BalanceTaskConfig{
+ ImbalanceThreshold: float64(balanceConfig.ImbalanceThreshold),
+ MinServerCount: int32(balanceConfig.MinServerCount),
+ },
+ },
+ }
+ }
+
+ glog.V(1).Infof("Built maintenance policy from separate task configs - %d task policies loaded", len(policy.TaskPolicies))
+ return policy
+}
+
// MaintenanceManager coordinates the maintenance system
type MaintenanceManager struct {
config *MaintenanceConfig
@@ -18,11 +86,12 @@ type MaintenanceManager struct {
running bool
stopChan chan struct{}
// Error handling and backoff
- errorCount int
- lastError error
- lastErrorTime time.Time
- backoffDelay time.Duration
- mutex sync.RWMutex
+ errorCount int
+ lastError error
+ lastErrorTime time.Time
+ backoffDelay time.Duration
+ mutex sync.RWMutex
+ scanInProgress bool
}
// NewMaintenanceManager creates a new maintenance manager
@@ -31,8 +100,15 @@ func NewMaintenanceManager(adminClient AdminClient, config *MaintenanceConfig) *
config = DefaultMaintenanceConfig()
}
- queue := NewMaintenanceQueue(config.Policy)
- scanner := NewMaintenanceScanner(adminClient, config.Policy, queue)
+ // Use the policy from the config (which is populated from separate task files in LoadMaintenanceConfig)
+ policy := config.Policy
+ if policy == nil {
+ // Fallback: build policy from separate task configuration files if not already populated
+ policy = buildPolicyFromTaskConfigs()
+ }
+
+ queue := NewMaintenanceQueue(policy)
+ scanner := NewMaintenanceScanner(adminClient, policy, queue)
return &MaintenanceManager{
config: config,
@@ -125,23 +201,14 @@ func (mm *MaintenanceManager) scanLoop() {
return
case <-ticker.C:
glog.V(1).Infof("Performing maintenance scan every %v", scanInterval)
- mm.performScan()
-
- // Adjust ticker interval based on error state
- mm.mutex.RLock()
- currentInterval := scanInterval
- if mm.errorCount > 0 {
- // Use backoff delay when there are errors
- currentInterval = mm.backoffDelay
- if currentInterval > scanInterval {
- // Don't make it longer than the configured interval * 10
- maxInterval := scanInterval * 10
- if currentInterval > maxInterval {
- currentInterval = maxInterval
- }
- }
+
+ // Use the same synchronization as TriggerScan to prevent concurrent scans
+ if err := mm.triggerScanInternal(false); err != nil {
+ glog.V(1).Infof("Scheduled scan skipped: %v", err)
}
- mm.mutex.RUnlock()
+
+ // Adjust ticker interval based on error state (read error state safely)
+ currentInterval := mm.getScanInterval(scanInterval)
// Reset ticker with new interval if needed
if currentInterval != scanInterval {
@@ -152,6 +219,26 @@ func (mm *MaintenanceManager) scanLoop() {
}
}
+// getScanInterval safely reads the current scan interval with error backoff
+func (mm *MaintenanceManager) getScanInterval(baseInterval time.Duration) time.Duration {
+ mm.mutex.RLock()
+ defer mm.mutex.RUnlock()
+
+ if mm.errorCount > 0 {
+ // Use backoff delay when there are errors
+ currentInterval := mm.backoffDelay
+ if currentInterval > baseInterval {
+ // Don't make it longer than the configured interval * 10
+ maxInterval := baseInterval * 10
+ if currentInterval > maxInterval {
+ currentInterval = maxInterval
+ }
+ }
+ return currentInterval
+ }
+ return baseInterval
+}
+
// cleanupLoop periodically cleans up old tasks and stale workers
func (mm *MaintenanceManager) cleanupLoop() {
cleanupInterval := time.Duration(mm.config.CleanupIntervalSeconds) * time.Second
@@ -170,25 +257,54 @@ func (mm *MaintenanceManager) cleanupLoop() {
// performScan executes a maintenance scan with error handling and backoff
func (mm *MaintenanceManager) performScan() {
- mm.mutex.Lock()
- defer mm.mutex.Unlock()
+ defer func() {
+ // Always reset scan in progress flag when done
+ mm.mutex.Lock()
+ mm.scanInProgress = false
+ mm.mutex.Unlock()
+ }()
- glog.V(2).Infof("Starting maintenance scan")
+ glog.Infof("Starting maintenance scan...")
results, err := mm.scanner.ScanForMaintenanceTasks()
if err != nil {
+ // Handle scan error
+ mm.mutex.Lock()
mm.handleScanError(err)
+ mm.mutex.Unlock()
+ glog.Warningf("Maintenance scan failed: %v", err)
return
}
- // Scan succeeded, reset error tracking
+ // Scan succeeded - update state and process results
+ mm.handleScanSuccess(results)
+}
+
+// handleScanSuccess processes successful scan results with proper lock management
+func (mm *MaintenanceManager) handleScanSuccess(results []*TaskDetectionResult) {
+ // Update manager state first
+ mm.mutex.Lock()
mm.resetErrorTracking()
+ taskCount := len(results)
+ mm.mutex.Unlock()
+
+ if taskCount > 0 {
+ // Count tasks by type for logging (outside of lock)
+ taskCounts := make(map[MaintenanceTaskType]int)
+ for _, result := range results {
+ taskCounts[result.TaskType]++
+ }
- if len(results) > 0 {
+ // Add tasks to queue (no manager lock held)
mm.queue.AddTasksFromResults(results)
- glog.V(1).Infof("Maintenance scan completed: added %d tasks", len(results))
+
+ // Log detailed scan results
+ glog.Infof("Maintenance scan completed: found %d tasks", taskCount)
+ for taskType, count := range taskCounts {
+ glog.Infof(" - %s: %d tasks", taskType, count)
+ }
} else {
- glog.V(2).Infof("Maintenance scan completed: no tasks needed")
+ glog.Infof("Maintenance scan completed: no maintenance tasks needed")
}
}
@@ -272,8 +388,19 @@ func (mm *MaintenanceManager) performCleanup() {
removedTasks := mm.queue.CleanupOldTasks(taskRetention)
removedWorkers := mm.queue.RemoveStaleWorkers(workerTimeout)
- if removedTasks > 0 || removedWorkers > 0 {
- glog.V(1).Infof("Cleanup completed: removed %d old tasks and %d stale workers", removedTasks, removedWorkers)
+ // Clean up stale pending operations (operations running for more than 4 hours)
+ staleOperationTimeout := 4 * time.Hour
+ removedOperations := 0
+ if mm.scanner != nil && mm.scanner.integration != nil {
+ pendingOps := mm.scanner.integration.GetPendingOperations()
+ if pendingOps != nil {
+ removedOperations = pendingOps.CleanupStaleOperations(staleOperationTimeout)
+ }
+ }
+
+ if removedTasks > 0 || removedWorkers > 0 || removedOperations > 0 {
+ glog.V(1).Infof("Cleanup completed: removed %d old tasks, %d stale workers, and %d stale operations",
+ removedTasks, removedWorkers, removedOperations)
}
}
@@ -311,6 +438,21 @@ func (mm *MaintenanceManager) GetStats() *MaintenanceStats {
return stats
}
+// ReloadTaskConfigurations reloads task configurations from the current policy
+func (mm *MaintenanceManager) ReloadTaskConfigurations() error {
+ mm.mutex.Lock()
+ defer mm.mutex.Unlock()
+
+ // Trigger configuration reload in the integration layer
+ if mm.scanner != nil && mm.scanner.integration != nil {
+ mm.scanner.integration.ConfigureTasksFromPolicy()
+ glog.V(1).Infof("Task configurations reloaded from policy")
+ return nil
+ }
+
+ return fmt.Errorf("integration not available for configuration reload")
+}
+
// GetErrorState returns the current error state for monitoring
func (mm *MaintenanceManager) GetErrorState() (errorCount int, lastError error, backoffDelay time.Duration) {
mm.mutex.RLock()
@@ -330,10 +472,29 @@ func (mm *MaintenanceManager) GetWorkers() []*MaintenanceWorker {
// TriggerScan manually triggers a maintenance scan
func (mm *MaintenanceManager) TriggerScan() error {
+ return mm.triggerScanInternal(true)
+}
+
+// triggerScanInternal handles both manual and automatic scan triggers
+func (mm *MaintenanceManager) triggerScanInternal(isManual bool) error {
if !mm.running {
return fmt.Errorf("maintenance manager is not running")
}
+ // Prevent multiple concurrent scans
+ mm.mutex.Lock()
+ if mm.scanInProgress {
+ mm.mutex.Unlock()
+ if isManual {
+ glog.V(1).Infof("Manual scan already in progress, ignoring trigger request")
+ } else {
+ glog.V(2).Infof("Automatic scan already in progress, ignoring scheduled scan")
+ }
+ return fmt.Errorf("scan already in progress")
+ }
+ mm.scanInProgress = true
+ mm.mutex.Unlock()
+
go mm.performScan()
return nil
}
diff --git a/weed/admin/maintenance/maintenance_queue.go b/weed/admin/maintenance/maintenance_queue.go
index 580a98718..ca402bd4d 100644
--- a/weed/admin/maintenance/maintenance_queue.go
+++ b/weed/admin/maintenance/maintenance_queue.go
@@ -1,10 +1,13 @@
package maintenance
import (
+ "crypto/rand"
+ "fmt"
"sort"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
)
// NewMaintenanceQueue creates a new maintenance queue
@@ -24,11 +27,18 @@ func (mq *MaintenanceQueue) SetIntegration(integration *MaintenanceIntegration)
glog.V(1).Infof("Maintenance queue configured with integration")
}
-// AddTask adds a new maintenance task to the queue
+// AddTask adds a new maintenance task to the queue with deduplication
func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) {
mq.mutex.Lock()
defer mq.mutex.Unlock()
+ // Check for duplicate tasks (same type + volume + not completed)
+ if mq.hasDuplicateTask(task) {
+ glog.V(1).Infof("Task skipped (duplicate): %s for volume %d on %s (already queued or running)",
+ task.Type, task.VolumeID, task.Server)
+ return
+ }
+
task.ID = generateTaskID()
task.Status = TaskStatusPending
task.CreatedAt = time.Now()
@@ -45,19 +55,48 @@ func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) {
return mq.pendingTasks[i].ScheduledAt.Before(mq.pendingTasks[j].ScheduledAt)
})
- glog.V(2).Infof("Added maintenance task %s: %s for volume %d", task.ID, task.Type, task.VolumeID)
+ scheduleInfo := ""
+ if !task.ScheduledAt.IsZero() && time.Until(task.ScheduledAt) > time.Minute {
+ scheduleInfo = fmt.Sprintf(", scheduled for %v", task.ScheduledAt.Format("15:04:05"))
+ }
+
+ glog.Infof("Task queued: %s (%s) volume %d on %s, priority %d%s, reason: %s",
+ task.ID, task.Type, task.VolumeID, task.Server, task.Priority, scheduleInfo, task.Reason)
+}
+
+// hasDuplicateTask checks if a similar task already exists (same type, volume, and not completed)
+func (mq *MaintenanceQueue) hasDuplicateTask(newTask *MaintenanceTask) bool {
+ for _, existingTask := range mq.tasks {
+ if existingTask.Type == newTask.Type &&
+ existingTask.VolumeID == newTask.VolumeID &&
+ existingTask.Server == newTask.Server &&
+ (existingTask.Status == TaskStatusPending ||
+ existingTask.Status == TaskStatusAssigned ||
+ existingTask.Status == TaskStatusInProgress) {
+ return true
+ }
+ }
+ return false
}
// AddTasksFromResults converts detection results to tasks and adds them to the queue
func (mq *MaintenanceQueue) AddTasksFromResults(results []*TaskDetectionResult) {
for _, result := range results {
+ // Validate that task has proper typed parameters
+ if result.TypedParams == nil {
+ glog.Warningf("Rejecting invalid task: %s for volume %d on %s - no typed parameters (insufficient destinations or planning failed)",
+ result.TaskType, result.VolumeID, result.Server)
+ continue
+ }
+
task := &MaintenanceTask{
- Type: result.TaskType,
- Priority: result.Priority,
- VolumeID: result.VolumeID,
- Server: result.Server,
- Collection: result.Collection,
- Parameters: result.Parameters,
+ Type: result.TaskType,
+ Priority: result.Priority,
+ VolumeID: result.VolumeID,
+ Server: result.Server,
+ Collection: result.Collection,
+ // Copy typed protobuf parameters
+ TypedParams: result.TypedParams,
Reason: result.Reason,
ScheduledAt: result.ScheduleAt,
}
@@ -67,57 +106,92 @@ func (mq *MaintenanceQueue) AddTasksFromResults(results []*TaskDetectionResult)
// GetNextTask returns the next available task for a worker
func (mq *MaintenanceQueue) GetNextTask(workerID string, capabilities []MaintenanceTaskType) *MaintenanceTask {
- mq.mutex.Lock()
- defer mq.mutex.Unlock()
+ // Use read lock for initial checks and search
+ mq.mutex.RLock()
worker, exists := mq.workers[workerID]
if !exists {
+ mq.mutex.RUnlock()
+ glog.V(2).Infof("Task assignment failed for worker %s: worker not registered", workerID)
return nil
}
// Check if worker has capacity
if worker.CurrentLoad >= worker.MaxConcurrent {
+ mq.mutex.RUnlock()
+ glog.V(2).Infof("Task assignment failed for worker %s: at capacity (%d/%d)", workerID, worker.CurrentLoad, worker.MaxConcurrent)
return nil
}
now := time.Now()
+ var selectedTask *MaintenanceTask
+ var selectedIndex int = -1
- // Find the next suitable task
+ // Find the next suitable task (using read lock)
for i, task := range mq.pendingTasks {
// Check if it's time to execute the task
if task.ScheduledAt.After(now) {
+ glog.V(3).Infof("Task %s skipped for worker %s: scheduled for future (%v)", task.ID, workerID, task.ScheduledAt)
continue
}
// Check if worker can handle this task type
if !mq.workerCanHandle(task.Type, capabilities) {
+ glog.V(3).Infof("Task %s (%s) skipped for worker %s: capability mismatch (worker has: %v)", task.ID, task.Type, workerID, capabilities)
continue
}
- // Check scheduling logic - use simplified system if available, otherwise fallback
+ // Check if this task type needs a cooldown period
if !mq.canScheduleTaskNow(task) {
+ glog.V(3).Infof("Task %s (%s) skipped for worker %s: scheduling constraints not met", task.ID, task.Type, workerID)
continue
}
- // Assign task to worker
- task.Status = TaskStatusAssigned
- task.WorkerID = workerID
- startTime := now
- task.StartedAt = &startTime
+ // Found a suitable task
+ selectedTask = task
+ selectedIndex = i
+ break
+ }
- // Remove from pending tasks
- mq.pendingTasks = append(mq.pendingTasks[:i], mq.pendingTasks[i+1:]...)
+ // Release read lock
+ mq.mutex.RUnlock()
- // Update worker
- worker.CurrentTask = task
- worker.CurrentLoad++
- worker.Status = "busy"
+ // If no task found, return nil
+ if selectedTask == nil {
+ glog.V(2).Infof("No suitable tasks available for worker %s (checked %d pending tasks)", workerID, len(mq.pendingTasks))
+ return nil
+ }
+
+ // Now acquire write lock to actually assign the task
+ mq.mutex.Lock()
+ defer mq.mutex.Unlock()
+
+ // Re-check that the task is still available (it might have been assigned to another worker)
+ if selectedIndex >= len(mq.pendingTasks) || mq.pendingTasks[selectedIndex].ID != selectedTask.ID {
+ glog.V(2).Infof("Task %s no longer available for worker %s: assigned to another worker", selectedTask.ID, workerID)
+ return nil
+ }
+
+ // Assign the task
+ selectedTask.Status = TaskStatusAssigned
+ selectedTask.WorkerID = workerID
+ selectedTask.StartedAt = &now
+
+ // Remove from pending tasks
+ mq.pendingTasks = append(mq.pendingTasks[:selectedIndex], mq.pendingTasks[selectedIndex+1:]...)
- glog.V(2).Infof("Assigned task %s to worker %s", task.ID, workerID)
- return task
+ // Update worker load
+ if worker, exists := mq.workers[workerID]; exists {
+ worker.CurrentLoad++
}
- return nil
+ // Track pending operation
+ mq.trackPendingOperation(selectedTask)
+
+ glog.Infof("Task assigned: %s (%s) → worker %s (volume %d, server %s)",
+ selectedTask.ID, selectedTask.Type, workerID, selectedTask.VolumeID, selectedTask.Server)
+
+ return selectedTask
}
// CompleteTask marks a task as completed
@@ -127,12 +201,19 @@ func (mq *MaintenanceQueue) CompleteTask(taskID string, error string) {
task, exists := mq.tasks[taskID]
if !exists {
+ glog.Warningf("Attempted to complete non-existent task: %s", taskID)
return
}
completedTime := time.Now()
task.CompletedAt = &completedTime
+ // Calculate task duration
+ var duration time.Duration
+ if task.StartedAt != nil {
+ duration = completedTime.Sub(*task.StartedAt)
+ }
+
if error != "" {
task.Status = TaskStatusFailed
task.Error = error
@@ -148,14 +229,17 @@ func (mq *MaintenanceQueue) CompleteTask(taskID string, error string) {
task.ScheduledAt = time.Now().Add(15 * time.Minute) // Retry delay
mq.pendingTasks = append(mq.pendingTasks, task)
- glog.V(2).Infof("Retrying task %s (attempt %d/%d)", taskID, task.RetryCount, task.MaxRetries)
+ glog.Warningf("Task failed, scheduling retry: %s (%s) attempt %d/%d, worker %s, duration %v, error: %s",
+ taskID, task.Type, task.RetryCount, task.MaxRetries, task.WorkerID, duration, error)
} else {
- glog.Errorf("Task %s failed permanently after %d retries: %s", taskID, task.MaxRetries, error)
+ glog.Errorf("Task failed permanently: %s (%s) worker %s, duration %v, after %d retries: %s",
+ taskID, task.Type, task.WorkerID, duration, task.MaxRetries, error)
}
} else {
task.Status = TaskStatusCompleted
task.Progress = 100
- glog.V(2).Infof("Task %s completed successfully", taskID)
+ glog.Infof("Task completed: %s (%s) worker %s, duration %v, volume %d",
+ taskID, task.Type, task.WorkerID, duration, task.VolumeID)
}
// Update worker
@@ -168,6 +252,11 @@ func (mq *MaintenanceQueue) CompleteTask(taskID string, error string) {
}
}
}
+
+ // Remove pending operation (unless it's being retried)
+ if task.Status != TaskStatusPending {
+ mq.removePendingOperation(taskID)
+ }
}
// UpdateTaskProgress updates the progress of a running task
@@ -176,8 +265,26 @@ func (mq *MaintenanceQueue) UpdateTaskProgress(taskID string, progress float64)
defer mq.mutex.RUnlock()
if task, exists := mq.tasks[taskID]; exists {
+ oldProgress := task.Progress
task.Progress = progress
task.Status = TaskStatusInProgress
+
+ // Update pending operation status
+ mq.updatePendingOperationStatus(taskID, "in_progress")
+
+ // Log progress at significant milestones or changes
+ if progress == 0 {
+ glog.V(1).Infof("Task started: %s (%s) worker %s, volume %d",
+ taskID, task.Type, task.WorkerID, task.VolumeID)
+ } else if progress >= 100 {
+ glog.V(1).Infof("Task progress: %s (%s) worker %s, %.1f%% complete",
+ taskID, task.Type, task.WorkerID, progress)
+ } else if progress-oldProgress >= 25 { // Log every 25% increment
+ glog.V(1).Infof("Task progress: %s (%s) worker %s, %.1f%% complete",
+ taskID, task.Type, task.WorkerID, progress)
+ }
+ } else {
+ glog.V(2).Infof("Progress update for unknown task: %s (%.1f%%)", taskID, progress)
}
}
@@ -186,12 +293,25 @@ func (mq *MaintenanceQueue) RegisterWorker(worker *MaintenanceWorker) {
mq.mutex.Lock()
defer mq.mutex.Unlock()
+ isNewWorker := true
+ if existingWorker, exists := mq.workers[worker.ID]; exists {
+ isNewWorker = false
+ glog.Infof("Worker reconnected: %s at %s (capabilities: %v, max concurrent: %d)",
+ worker.ID, worker.Address, worker.Capabilities, worker.MaxConcurrent)
+
+ // Preserve current load when reconnecting
+ worker.CurrentLoad = existingWorker.CurrentLoad
+ } else {
+ glog.Infof("Worker registered: %s at %s (capabilities: %v, max concurrent: %d)",
+ worker.ID, worker.Address, worker.Capabilities, worker.MaxConcurrent)
+ }
+
worker.LastHeartbeat = time.Now()
worker.Status = "active"
- worker.CurrentLoad = 0
+ if isNewWorker {
+ worker.CurrentLoad = 0
+ }
mq.workers[worker.ID] = worker
-
- glog.V(1).Infof("Registered maintenance worker %s at %s", worker.ID, worker.Address)
}
// UpdateWorkerHeartbeat updates worker heartbeat
@@ -200,7 +320,15 @@ func (mq *MaintenanceQueue) UpdateWorkerHeartbeat(workerID string) {
defer mq.mutex.Unlock()
if worker, exists := mq.workers[workerID]; exists {
+ lastSeen := worker.LastHeartbeat
worker.LastHeartbeat = time.Now()
+
+ // Log if worker was offline for a while
+ if time.Since(lastSeen) > 2*time.Minute {
+ glog.Infof("Worker %s heartbeat resumed after %v", workerID, time.Since(lastSeen))
+ }
+ } else {
+ glog.V(2).Infof("Heartbeat from unknown worker: %s", workerID)
}
}
@@ -255,7 +383,7 @@ func (mq *MaintenanceQueue) getRepeatPreventionInterval(taskType MaintenanceTask
// Fallback to policy configuration if no scheduler available or scheduler doesn't provide default
if mq.policy != nil {
- repeatIntervalHours := mq.policy.GetRepeatInterval(taskType)
+ repeatIntervalHours := GetRepeatInterval(mq.policy, taskType)
if repeatIntervalHours > 0 {
interval := time.Duration(repeatIntervalHours) * time.Hour
glog.V(3).Infof("Using policy configuration repeat interval for %s: %v", taskType, interval)
@@ -311,10 +439,23 @@ func (mq *MaintenanceQueue) GetWorkers() []*MaintenanceWorker {
func generateTaskID() string {
const charset = "abcdefghijklmnopqrstuvwxyz0123456789"
b := make([]byte, 8)
+ randBytes := make([]byte, 8)
+
+ // Generate random bytes
+ if _, err := rand.Read(randBytes); err != nil {
+ // Fallback to timestamp-based ID if crypto/rand fails
+ timestamp := time.Now().UnixNano()
+ return fmt.Sprintf("task-%d", timestamp)
+ }
+
+ // Convert random bytes to charset
for i := range b {
- b[i] = charset[i%len(charset)]
+ b[i] = charset[int(randBytes[i])%len(charset)]
}
- return string(b)
+
+ // Add timestamp suffix to ensure uniqueness
+ timestamp := time.Now().Unix() % 10000 // last 4 digits of timestamp
+ return fmt.Sprintf("%s-%04d", string(b), timestamp)
}
// CleanupOldTasks removes old completed and failed tasks
@@ -427,19 +568,31 @@ func (mq *MaintenanceQueue) workerCanHandle(taskType MaintenanceTaskType, capabi
// canScheduleTaskNow determines if a task can be scheduled using task schedulers or fallback logic
func (mq *MaintenanceQueue) canScheduleTaskNow(task *MaintenanceTask) bool {
- // Try task scheduling logic first
- if mq.integration != nil {
- // Get all running tasks and available workers
- runningTasks := mq.getRunningTasks()
- availableWorkers := mq.getAvailableWorkers()
+ glog.V(2).Infof("Checking if task %s (type: %s) can be scheduled", task.ID, task.Type)
- canSchedule := mq.integration.CanScheduleWithTaskSchedulers(task, runningTasks, availableWorkers)
- glog.V(3).Infof("Task scheduler decision for task %s (%s): %v", task.ID, task.Type, canSchedule)
- return canSchedule
- }
+ // TEMPORARY FIX: Skip integration task scheduler which is being overly restrictive
+ // Use fallback logic directly for now
+ glog.V(2).Infof("Using fallback logic for task scheduling")
+ canExecute := mq.canExecuteTaskType(task.Type)
+ glog.V(2).Infof("Fallback decision for task %s: %v", task.ID, canExecute)
+ return canExecute
- // Fallback to hardcoded logic
- return mq.canExecuteTaskType(task.Type)
+ // NOTE: Original integration code disabled temporarily
+ // Try task scheduling logic first
+ /*
+ if mq.integration != nil {
+ glog.Infof("DEBUG canScheduleTaskNow: Using integration task scheduler")
+ // Get all running tasks and available workers
+ runningTasks := mq.getRunningTasks()
+ availableWorkers := mq.getAvailableWorkers()
+
+ glog.Infof("DEBUG canScheduleTaskNow: Running tasks: %d, Available workers: %d", len(runningTasks), len(availableWorkers))
+
+ canSchedule := mq.integration.CanScheduleWithTaskSchedulers(task, runningTasks, availableWorkers)
+ glog.Infof("DEBUG canScheduleTaskNow: Task scheduler decision for task %s (%s): %v", task.ID, task.Type, canSchedule)
+ return canSchedule
+ }
+ */
}
// canExecuteTaskType checks if we can execute more tasks of this type (concurrency limits) - fallback logic
@@ -465,7 +618,7 @@ func (mq *MaintenanceQueue) getMaxConcurrentForTaskType(taskType MaintenanceTask
// Fallback to policy configuration if no scheduler available or scheduler doesn't provide default
if mq.policy != nil {
- maxConcurrent := mq.policy.GetMaxConcurrent(taskType)
+ maxConcurrent := GetMaxConcurrent(mq.policy, taskType)
if maxConcurrent > 0 {
glog.V(3).Infof("Using policy configuration max concurrent for %s: %d", taskType, maxConcurrent)
return maxConcurrent
@@ -498,3 +651,108 @@ func (mq *MaintenanceQueue) getAvailableWorkers() []*MaintenanceWorker {
}
return availableWorkers
}
+
+// trackPendingOperation adds a task to the pending operations tracker
+func (mq *MaintenanceQueue) trackPendingOperation(task *MaintenanceTask) {
+ if mq.integration == nil {
+ return
+ }
+
+ pendingOps := mq.integration.GetPendingOperations()
+ if pendingOps == nil {
+ return
+ }
+
+ // Skip tracking for tasks without proper typed parameters
+ if task.TypedParams == nil {
+ glog.V(2).Infof("Skipping pending operation tracking for task %s - no typed parameters", task.ID)
+ return
+ }
+
+ // Map maintenance task type to pending operation type
+ var opType PendingOperationType
+ switch task.Type {
+ case MaintenanceTaskType("balance"):
+ opType = OpTypeVolumeBalance
+ case MaintenanceTaskType("erasure_coding"):
+ opType = OpTypeErasureCoding
+ case MaintenanceTaskType("vacuum"):
+ opType = OpTypeVacuum
+ case MaintenanceTaskType("replication"):
+ opType = OpTypeReplication
+ default:
+ opType = OpTypeVolumeMove
+ }
+
+ // Determine destination node and estimated size from typed parameters
+ destNode := ""
+ estimatedSize := uint64(1024 * 1024 * 1024) // Default 1GB estimate
+
+ switch params := task.TypedParams.TaskParams.(type) {
+ case *worker_pb.TaskParams_ErasureCodingParams:
+ if params.ErasureCodingParams != nil {
+ if len(params.ErasureCodingParams.Destinations) > 0 {
+ destNode = params.ErasureCodingParams.Destinations[0].Node
+ }
+ if params.ErasureCodingParams.EstimatedShardSize > 0 {
+ estimatedSize = params.ErasureCodingParams.EstimatedShardSize
+ }
+ }
+ case *worker_pb.TaskParams_BalanceParams:
+ if params.BalanceParams != nil {
+ destNode = params.BalanceParams.DestNode
+ if params.BalanceParams.EstimatedSize > 0 {
+ estimatedSize = params.BalanceParams.EstimatedSize
+ }
+ }
+ case *worker_pb.TaskParams_ReplicationParams:
+ if params.ReplicationParams != nil {
+ destNode = params.ReplicationParams.DestNode
+ if params.ReplicationParams.EstimatedSize > 0 {
+ estimatedSize = params.ReplicationParams.EstimatedSize
+ }
+ }
+ }
+
+ operation := &PendingOperation{
+ VolumeID: task.VolumeID,
+ OperationType: opType,
+ SourceNode: task.Server,
+ DestNode: destNode,
+ TaskID: task.ID,
+ StartTime: time.Now(),
+ EstimatedSize: estimatedSize,
+ Collection: task.Collection,
+ Status: "assigned",
+ }
+
+ pendingOps.AddOperation(operation)
+}
+
+// removePendingOperation removes a task from the pending operations tracker
+func (mq *MaintenanceQueue) removePendingOperation(taskID string) {
+ if mq.integration == nil {
+ return
+ }
+
+ pendingOps := mq.integration.GetPendingOperations()
+ if pendingOps == nil {
+ return
+ }
+
+ pendingOps.RemoveOperation(taskID)
+}
+
+// updatePendingOperationStatus updates the status of a pending operation
+func (mq *MaintenanceQueue) updatePendingOperationStatus(taskID string, status string) {
+ if mq.integration == nil {
+ return
+ }
+
+ pendingOps := mq.integration.GetPendingOperations()
+ if pendingOps == nil {
+ return
+ }
+
+ pendingOps.UpdateOperationStatus(taskID, status)
+}
diff --git a/weed/admin/maintenance/maintenance_queue_test.go b/weed/admin/maintenance/maintenance_queue_test.go
new file mode 100644
index 000000000..2c38471a0
--- /dev/null
+++ b/weed/admin/maintenance/maintenance_queue_test.go
@@ -0,0 +1,353 @@
+package maintenance
+
+import (
+ "testing"
+
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+)
+
+// Test suite for canScheduleTaskNow() function and related scheduling logic
+//
+// This test suite ensures that:
+// 1. The fallback scheduling logic works correctly when no integration is present
+// 2. Task concurrency limits are properly enforced per task type
+// 3. Different task types don't interfere with each other's concurrency limits
+// 4. Custom policies with higher concurrency limits work correctly
+// 5. Edge cases (nil tasks, empty task types) are handled gracefully
+// 6. Helper functions (GetRunningTaskCount, canExecuteTaskType, etc.) work correctly
+//
+// Background: The canScheduleTaskNow() function is critical for task assignment.
+// It was previously failing due to an overly restrictive integration scheduler,
+// so we implemented a temporary fix that bypasses the integration and uses
+// fallback logic based on simple concurrency limits per task type.
+
+func TestCanScheduleTaskNow_FallbackLogic(t *testing.T) {
+ // Test the current implementation which uses fallback logic
+ mq := &MaintenanceQueue{
+ tasks: make(map[string]*MaintenanceTask),
+ pendingTasks: []*MaintenanceTask{},
+ workers: make(map[string]*MaintenanceWorker),
+ policy: nil, // No policy for default behavior
+ integration: nil, // No integration to force fallback
+ }
+
+ task := &MaintenanceTask{
+ ID: "test-task-1",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusPending,
+ }
+
+ // Should return true with fallback logic (no running tasks, default max concurrent = 1)
+ result := mq.canScheduleTaskNow(task)
+ if !result {
+ t.Errorf("Expected canScheduleTaskNow to return true with fallback logic, got false")
+ }
+}
+
+func TestCanScheduleTaskNow_FallbackWithRunningTasks(t *testing.T) {
+ // Test fallback logic when there are already running tasks
+ mq := &MaintenanceQueue{
+ tasks: map[string]*MaintenanceTask{
+ "running-task": {
+ ID: "running-task",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusInProgress,
+ },
+ },
+ pendingTasks: []*MaintenanceTask{},
+ workers: make(map[string]*MaintenanceWorker),
+ policy: nil,
+ integration: nil,
+ }
+
+ task := &MaintenanceTask{
+ ID: "test-task-2",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusPending,
+ }
+
+ // Should return false because max concurrent is 1 and we have 1 running task
+ result := mq.canScheduleTaskNow(task)
+ if result {
+ t.Errorf("Expected canScheduleTaskNow to return false when at capacity, got true")
+ }
+}
+
+func TestCanScheduleTaskNow_DifferentTaskTypes(t *testing.T) {
+ // Test that different task types don't interfere with each other
+ mq := &MaintenanceQueue{
+ tasks: map[string]*MaintenanceTask{
+ "running-ec-task": {
+ ID: "running-ec-task",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusInProgress,
+ },
+ },
+ pendingTasks: []*MaintenanceTask{},
+ workers: make(map[string]*MaintenanceWorker),
+ policy: nil,
+ integration: nil,
+ }
+
+ // Test vacuum task when EC task is running
+ vacuumTask := &MaintenanceTask{
+ ID: "vacuum-task",
+ Type: MaintenanceTaskType("vacuum"),
+ Status: TaskStatusPending,
+ }
+
+ // Should return true because vacuum and erasure_coding are different task types
+ result := mq.canScheduleTaskNow(vacuumTask)
+ if !result {
+ t.Errorf("Expected canScheduleTaskNow to return true for different task type, got false")
+ }
+
+ // Test another EC task when one is already running
+ ecTask := &MaintenanceTask{
+ ID: "ec-task",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusPending,
+ }
+
+ // Should return false because max concurrent for EC is 1 and we have 1 running
+ result = mq.canScheduleTaskNow(ecTask)
+ if result {
+ t.Errorf("Expected canScheduleTaskNow to return false for same task type at capacity, got true")
+ }
+}
+
+func TestCanScheduleTaskNow_WithIntegration(t *testing.T) {
+ // Test with a real MaintenanceIntegration (will use fallback logic in current implementation)
+ policy := &MaintenancePolicy{
+ TaskPolicies: make(map[string]*worker_pb.TaskPolicy),
+ GlobalMaxConcurrent: 10,
+ DefaultRepeatIntervalSeconds: 24 * 60 * 60, // 24 hours in seconds
+ DefaultCheckIntervalSeconds: 60 * 60, // 1 hour in seconds
+ }
+ mq := NewMaintenanceQueue(policy)
+
+ // Create a basic integration (this would normally be more complex)
+ integration := NewMaintenanceIntegration(mq, policy)
+ mq.SetIntegration(integration)
+
+ task := &MaintenanceTask{
+ ID: "test-task-3",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusPending,
+ }
+
+ // With our current implementation (fallback logic), this should return true
+ result := mq.canScheduleTaskNow(task)
+ if !result {
+ t.Errorf("Expected canScheduleTaskNow to return true with fallback logic, got false")
+ }
+}
+
+func TestGetRunningTaskCount(t *testing.T) {
+ // Test the helper function used by fallback logic
+ mq := &MaintenanceQueue{
+ tasks: map[string]*MaintenanceTask{
+ "task1": {
+ ID: "task1",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusInProgress,
+ },
+ "task2": {
+ ID: "task2",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusAssigned,
+ },
+ "task3": {
+ ID: "task3",
+ Type: MaintenanceTaskType("vacuum"),
+ Status: TaskStatusInProgress,
+ },
+ "task4": {
+ ID: "task4",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusCompleted,
+ },
+ },
+ pendingTasks: []*MaintenanceTask{},
+ workers: make(map[string]*MaintenanceWorker),
+ }
+
+ // Should count 2 running EC tasks (in_progress + assigned)
+ ecCount := mq.GetRunningTaskCount(MaintenanceTaskType("erasure_coding"))
+ if ecCount != 2 {
+ t.Errorf("Expected 2 running EC tasks, got %d", ecCount)
+ }
+
+ // Should count 1 running vacuum task
+ vacuumCount := mq.GetRunningTaskCount(MaintenanceTaskType("vacuum"))
+ if vacuumCount != 1 {
+ t.Errorf("Expected 1 running vacuum task, got %d", vacuumCount)
+ }
+
+ // Should count 0 running balance tasks
+ balanceCount := mq.GetRunningTaskCount(MaintenanceTaskType("balance"))
+ if balanceCount != 0 {
+ t.Errorf("Expected 0 running balance tasks, got %d", balanceCount)
+ }
+}
+
+func TestCanExecuteTaskType(t *testing.T) {
+ // Test the fallback logic helper function
+ mq := &MaintenanceQueue{
+ tasks: map[string]*MaintenanceTask{
+ "running-task": {
+ ID: "running-task",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusInProgress,
+ },
+ },
+ pendingTasks: []*MaintenanceTask{},
+ workers: make(map[string]*MaintenanceWorker),
+ policy: nil, // Will use default max concurrent = 1
+ integration: nil,
+ }
+
+ // Should return false for EC (1 running, max = 1)
+ result := mq.canExecuteTaskType(MaintenanceTaskType("erasure_coding"))
+ if result {
+ t.Errorf("Expected canExecuteTaskType to return false for EC at capacity, got true")
+ }
+
+ // Should return true for vacuum (0 running, max = 1)
+ result = mq.canExecuteTaskType(MaintenanceTaskType("vacuum"))
+ if !result {
+ t.Errorf("Expected canExecuteTaskType to return true for vacuum, got false")
+ }
+}
+
+func TestGetMaxConcurrentForTaskType_DefaultBehavior(t *testing.T) {
+ // Test the default behavior when no policy or integration is set
+ mq := &MaintenanceQueue{
+ tasks: make(map[string]*MaintenanceTask),
+ pendingTasks: []*MaintenanceTask{},
+ workers: make(map[string]*MaintenanceWorker),
+ policy: nil,
+ integration: nil,
+ }
+
+ // Should return default value of 1
+ maxConcurrent := mq.getMaxConcurrentForTaskType(MaintenanceTaskType("erasure_coding"))
+ if maxConcurrent != 1 {
+ t.Errorf("Expected default max concurrent to be 1, got %d", maxConcurrent)
+ }
+
+ maxConcurrent = mq.getMaxConcurrentForTaskType(MaintenanceTaskType("vacuum"))
+ if maxConcurrent != 1 {
+ t.Errorf("Expected default max concurrent to be 1, got %d", maxConcurrent)
+ }
+}
+
+// Test edge cases and error conditions
+func TestCanScheduleTaskNow_NilTask(t *testing.T) {
+ mq := &MaintenanceQueue{
+ tasks: make(map[string]*MaintenanceTask),
+ pendingTasks: []*MaintenanceTask{},
+ workers: make(map[string]*MaintenanceWorker),
+ policy: nil,
+ integration: nil,
+ }
+
+ // This should panic with a nil task, so we expect and catch the panic
+ defer func() {
+ if r := recover(); r == nil {
+ t.Errorf("Expected canScheduleTaskNow to panic with nil task, but it didn't")
+ }
+ }()
+
+ // This should panic
+ mq.canScheduleTaskNow(nil)
+}
+
+func TestCanScheduleTaskNow_EmptyTaskType(t *testing.T) {
+ mq := &MaintenanceQueue{
+ tasks: make(map[string]*MaintenanceTask),
+ pendingTasks: []*MaintenanceTask{},
+ workers: make(map[string]*MaintenanceWorker),
+ policy: nil,
+ integration: nil,
+ }
+
+ task := &MaintenanceTask{
+ ID: "empty-type-task",
+ Type: MaintenanceTaskType(""), // Empty task type
+ Status: TaskStatusPending,
+ }
+
+ // Should handle empty task type gracefully
+ result := mq.canScheduleTaskNow(task)
+ if !result {
+ t.Errorf("Expected canScheduleTaskNow to handle empty task type, got false")
+ }
+}
+
+func TestCanScheduleTaskNow_WithPolicy(t *testing.T) {
+ // Test with a policy that allows higher concurrency
+ policy := &MaintenancePolicy{
+ TaskPolicies: map[string]*worker_pb.TaskPolicy{
+ string(MaintenanceTaskType("erasure_coding")): {
+ Enabled: true,
+ MaxConcurrent: 3,
+ RepeatIntervalSeconds: 60 * 60, // 1 hour
+ CheckIntervalSeconds: 60 * 60, // 1 hour
+ },
+ string(MaintenanceTaskType("vacuum")): {
+ Enabled: true,
+ MaxConcurrent: 2,
+ RepeatIntervalSeconds: 60 * 60, // 1 hour
+ CheckIntervalSeconds: 60 * 60, // 1 hour
+ },
+ },
+ GlobalMaxConcurrent: 10,
+ DefaultRepeatIntervalSeconds: 24 * 60 * 60, // 24 hours in seconds
+ DefaultCheckIntervalSeconds: 60 * 60, // 1 hour in seconds
+ }
+
+ mq := &MaintenanceQueue{
+ tasks: map[string]*MaintenanceTask{
+ "running-task-1": {
+ ID: "running-task-1",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusInProgress,
+ },
+ "running-task-2": {
+ ID: "running-task-2",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusAssigned,
+ },
+ },
+ pendingTasks: []*MaintenanceTask{},
+ workers: make(map[string]*MaintenanceWorker),
+ policy: policy,
+ integration: nil,
+ }
+
+ task := &MaintenanceTask{
+ ID: "test-task-policy",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusPending,
+ }
+
+ // Should return true because we have 2 running EC tasks but max is 3
+ result := mq.canScheduleTaskNow(task)
+ if !result {
+ t.Errorf("Expected canScheduleTaskNow to return true with policy allowing 3 concurrent, got false")
+ }
+
+ // Add one more running task to reach the limit
+ mq.tasks["running-task-3"] = &MaintenanceTask{
+ ID: "running-task-3",
+ Type: MaintenanceTaskType("erasure_coding"),
+ Status: TaskStatusInProgress,
+ }
+
+ // Should return false because we now have 3 running EC tasks (at limit)
+ result = mq.canScheduleTaskNow(task)
+ if result {
+ t.Errorf("Expected canScheduleTaskNow to return false when at policy limit, got true")
+ }
+}
diff --git a/weed/admin/maintenance/maintenance_scanner.go b/weed/admin/maintenance/maintenance_scanner.go
index 271765ef8..ef41b78ed 100644
--- a/weed/admin/maintenance/maintenance_scanner.go
+++ b/weed/admin/maintenance/maintenance_scanner.go
@@ -43,7 +43,18 @@ func (ms *MaintenanceScanner) ScanForMaintenanceTasks() ([]*TaskDetectionResult,
// Convert metrics to task system format
taskMetrics := ms.convertToTaskMetrics(volumeMetrics)
- // Use task detection system
+ // Update topology information for complete cluster view (including empty servers)
+ // This must happen before task detection to ensure EC placement can consider all servers
+ if ms.lastTopologyInfo != nil {
+ if err := ms.integration.UpdateTopologyInfo(ms.lastTopologyInfo); err != nil {
+ glog.Errorf("Failed to update topology info for empty servers: %v", err)
+ // Don't fail the scan - continue with just volume-bearing servers
+ } else {
+ glog.V(1).Infof("Updated topology info for complete cluster view including empty servers")
+ }
+ }
+
+ // Use task detection system with complete cluster information
results, err := ms.integration.ScanWithTaskDetectors(taskMetrics)
if err != nil {
glog.Errorf("Task scanning failed: %v", err)
@@ -62,25 +73,60 @@ func (ms *MaintenanceScanner) ScanForMaintenanceTasks() ([]*TaskDetectionResult,
// getVolumeHealthMetrics collects health information for all volumes
func (ms *MaintenanceScanner) getVolumeHealthMetrics() ([]*VolumeHealthMetrics, error) {
var metrics []*VolumeHealthMetrics
+ var volumeSizeLimitMB uint64
+ glog.V(1).Infof("Collecting volume health metrics from master")
err := ms.adminClient.WithMasterClient(func(client master_pb.SeaweedClient) error {
+ // First, get volume size limit from master configuration
+ configResp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
+ if err != nil {
+ glog.Warningf("Failed to get volume size limit from master: %v", err)
+ volumeSizeLimitMB = 30000 // Default to 30GB if we can't get from master
+ } else {
+ volumeSizeLimitMB = uint64(configResp.VolumeSizeLimitMB)
+ }
+
+ // Now get volume list
resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
if err != nil {
return err
}
if resp.TopologyInfo == nil {
+ glog.Warningf("No topology info received from master")
return nil
}
+ volumeSizeLimitBytes := volumeSizeLimitMB * 1024 * 1024 // Convert MB to bytes
+
+ // Track all nodes discovered in topology
+ var allNodesInTopology []string
+ var nodesWithVolumes []string
+ var nodesWithoutVolumes []string
+
for _, dc := range resp.TopologyInfo.DataCenterInfos {
+ glog.V(2).Infof("Processing datacenter: %s", dc.Id)
for _, rack := range dc.RackInfos {
+ glog.V(2).Infof("Processing rack: %s in datacenter: %s", rack.Id, dc.Id)
for _, node := range rack.DataNodeInfos {
- for _, diskInfo := range node.DiskInfos {
+ allNodesInTopology = append(allNodesInTopology, node.Id)
+ glog.V(2).Infof("Found volume server in topology: %s (disks: %d)", node.Id, len(node.DiskInfos))
+
+ hasVolumes := false
+ // Process each disk on this node
+ for diskType, diskInfo := range node.DiskInfos {
+ if len(diskInfo.VolumeInfos) > 0 {
+ hasVolumes = true
+ glog.V(2).Infof("Volume server %s disk %s has %d volumes", node.Id, diskType, len(diskInfo.VolumeInfos))
+ }
+
+ // Process volumes on this specific disk
for _, volInfo := range diskInfo.VolumeInfos {
metric := &VolumeHealthMetrics{
VolumeID: volInfo.Id,
Server: node.Id,
+ DiskType: diskType, // Track which disk this volume is on
+ DiskId: volInfo.DiskId, // Use disk ID from volume info
Collection: volInfo.Collection,
Size: volInfo.Size,
DeletedBytes: volInfo.DeletedByteCount,
@@ -94,31 +140,58 @@ func (ms *MaintenanceScanner) getVolumeHealthMetrics() ([]*VolumeHealthMetrics,
// Calculate derived metrics
if metric.Size > 0 {
metric.GarbageRatio = float64(metric.DeletedBytes) / float64(metric.Size)
- // Calculate fullness ratio (would need volume size limit)
- // metric.FullnessRatio = float64(metric.Size) / float64(volumeSizeLimit)
+ // Calculate fullness ratio using actual volume size limit from master
+ metric.FullnessRatio = float64(metric.Size) / float64(volumeSizeLimitBytes)
}
metric.Age = time.Since(metric.LastModified)
+ glog.V(3).Infof("Volume %d on %s:%s (ID %d): size=%d, limit=%d, fullness=%.2f",
+ metric.VolumeID, metric.Server, metric.DiskType, metric.DiskId, metric.Size, volumeSizeLimitBytes, metric.FullnessRatio)
+
metrics = append(metrics, metric)
}
}
+
+ if hasVolumes {
+ nodesWithVolumes = append(nodesWithVolumes, node.Id)
+ } else {
+ nodesWithoutVolumes = append(nodesWithoutVolumes, node.Id)
+ glog.V(1).Infof("Volume server %s found in topology but has no volumes", node.Id)
+ }
}
}
}
+ glog.Infof("Topology discovery complete:")
+ glog.Infof(" - Total volume servers in topology: %d (%v)", len(allNodesInTopology), allNodesInTopology)
+ glog.Infof(" - Volume servers with volumes: %d (%v)", len(nodesWithVolumes), nodesWithVolumes)
+ glog.Infof(" - Volume servers without volumes: %d (%v)", len(nodesWithoutVolumes), nodesWithoutVolumes)
+ glog.Infof("Note: Maintenance system will track empty servers separately from volume metrics.")
+
+ // Store topology info for volume shard tracker
+ ms.lastTopologyInfo = resp.TopologyInfo
+
return nil
})
if err != nil {
+ glog.Errorf("Failed to get volume health metrics: %v", err)
return nil, err
}
+ glog.V(1).Infof("Successfully collected metrics for %d actual volumes with disk ID information", len(metrics))
+
// Count actual replicas and identify EC volumes
ms.enrichVolumeMetrics(metrics)
return metrics, nil
}
+// getTopologyInfo returns the last collected topology information
+func (ms *MaintenanceScanner) getTopologyInfo() *master_pb.TopologyInfo {
+ return ms.lastTopologyInfo
+}
+
// enrichVolumeMetrics adds additional information like replica counts
func (ms *MaintenanceScanner) enrichVolumeMetrics(metrics []*VolumeHealthMetrics) {
// Group volumes by ID to count replicas
@@ -127,13 +200,17 @@ func (ms *MaintenanceScanner) enrichVolumeMetrics(metrics []*VolumeHealthMetrics
volumeGroups[metric.VolumeID] = append(volumeGroups[metric.VolumeID], metric)
}
- // Update replica counts
- for _, group := range volumeGroups {
- actualReplicas := len(group)
- for _, metric := range group {
- metric.ReplicaCount = actualReplicas
+ // Update replica counts for actual volumes
+ for volumeID, replicas := range volumeGroups {
+ replicaCount := len(replicas)
+ for _, replica := range replicas {
+ replica.ReplicaCount = replicaCount
}
+ glog.V(3).Infof("Volume %d has %d replicas", volumeID, replicaCount)
}
+
+ // TODO: Identify EC volumes by checking volume structure
+ // This would require querying volume servers for EC shard information
}
// convertToTaskMetrics converts existing volume metrics to task system format
@@ -144,6 +221,8 @@ func (ms *MaintenanceScanner) convertToTaskMetrics(metrics []*VolumeHealthMetric
simplified = append(simplified, &types.VolumeHealthMetrics{
VolumeID: metric.VolumeID,
Server: metric.Server,
+ DiskType: metric.DiskType,
+ DiskId: metric.DiskId,
Collection: metric.Collection,
Size: metric.Size,
DeletedBytes: metric.DeletedBytes,
@@ -159,5 +238,6 @@ func (ms *MaintenanceScanner) convertToTaskMetrics(metrics []*VolumeHealthMetric
})
}
+ glog.V(2).Infof("Converted %d volume metrics with disk ID information for task detection", len(simplified))
return simplified
}
diff --git a/weed/admin/maintenance/maintenance_types.go b/weed/admin/maintenance/maintenance_types.go
index 6b8c2e9a0..e863b26e6 100644
--- a/weed/admin/maintenance/maintenance_types.go
+++ b/weed/admin/maintenance/maintenance_types.go
@@ -8,6 +8,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
@@ -96,7 +97,7 @@ type MaintenanceTask struct {
VolumeID uint32 `json:"volume_id,omitempty"`
Server string `json:"server,omitempty"`
Collection string `json:"collection,omitempty"`
- Parameters map[string]interface{} `json:"parameters,omitempty"`
+ TypedParams *worker_pb.TaskParams `json:"typed_params,omitempty"`
Reason string `json:"reason"`
CreatedAt time.Time `json:"created_at"`
ScheduledAt time.Time `json:"scheduled_at"`
@@ -109,90 +110,149 @@ type MaintenanceTask struct {
MaxRetries int `json:"max_retries"`
}
-// TaskPolicy represents configuration for a specific task type
-type TaskPolicy struct {
- Enabled bool `json:"enabled"`
- MaxConcurrent int `json:"max_concurrent"`
- RepeatInterval int `json:"repeat_interval"` // Hours to wait before repeating
- CheckInterval int `json:"check_interval"` // Hours between checks
- Configuration map[string]interface{} `json:"configuration"` // Task-specific config
-}
+// MaintenanceConfig holds configuration for the maintenance system
+// DEPRECATED: Use worker_pb.MaintenanceConfig instead
+type MaintenanceConfig = worker_pb.MaintenanceConfig
-// MaintenancePolicy defines policies for maintenance operations using a dynamic structure
-type MaintenancePolicy struct {
- // Task-specific policies mapped by task type
- TaskPolicies map[MaintenanceTaskType]*TaskPolicy `json:"task_policies"`
+// MaintenancePolicy defines policies for maintenance operations
+// DEPRECATED: Use worker_pb.MaintenancePolicy instead
+type MaintenancePolicy = worker_pb.MaintenancePolicy
+
+// TaskPolicy represents configuration for a specific task type
+// DEPRECATED: Use worker_pb.TaskPolicy instead
+type TaskPolicy = worker_pb.TaskPolicy
- // Global policy settings
- GlobalMaxConcurrent int `json:"global_max_concurrent"` // Overall limit across all task types
- DefaultRepeatInterval int `json:"default_repeat_interval"` // Default hours if task doesn't specify
- DefaultCheckInterval int `json:"default_check_interval"` // Default hours for periodic checks
+// Default configuration values
+func DefaultMaintenanceConfig() *MaintenanceConfig {
+ return DefaultMaintenanceConfigProto()
}
-// GetTaskPolicy returns the policy for a specific task type, creating generic defaults if needed
-func (mp *MaintenancePolicy) GetTaskPolicy(taskType MaintenanceTaskType) *TaskPolicy {
- if mp.TaskPolicies == nil {
- mp.TaskPolicies = make(map[MaintenanceTaskType]*TaskPolicy)
- }
+// Policy helper functions (since we can't add methods to type aliases)
- policy, exists := mp.TaskPolicies[taskType]
- if !exists {
- // Create generic default policy using global settings - no hardcoded fallbacks
- policy = &TaskPolicy{
- Enabled: false, // Conservative default - require explicit enabling
- MaxConcurrent: 1, // Conservative default concurrency
- RepeatInterval: mp.DefaultRepeatInterval, // Use configured default, 0 if not set
- CheckInterval: mp.DefaultCheckInterval, // Use configured default, 0 if not set
- Configuration: make(map[string]interface{}),
- }
- mp.TaskPolicies[taskType] = policy
+// GetTaskPolicy returns the policy for a specific task type
+func GetTaskPolicy(mp *MaintenancePolicy, taskType MaintenanceTaskType) *TaskPolicy {
+ if mp.TaskPolicies == nil {
+ return nil
}
-
- return policy
+ return mp.TaskPolicies[string(taskType)]
}
// SetTaskPolicy sets the policy for a specific task type
-func (mp *MaintenancePolicy) SetTaskPolicy(taskType MaintenanceTaskType, policy *TaskPolicy) {
+func SetTaskPolicy(mp *MaintenancePolicy, taskType MaintenanceTaskType, policy *TaskPolicy) {
if mp.TaskPolicies == nil {
- mp.TaskPolicies = make(map[MaintenanceTaskType]*TaskPolicy)
+ mp.TaskPolicies = make(map[string]*TaskPolicy)
}
- mp.TaskPolicies[taskType] = policy
+ mp.TaskPolicies[string(taskType)] = policy
}
// IsTaskEnabled returns whether a task type is enabled
-func (mp *MaintenancePolicy) IsTaskEnabled(taskType MaintenanceTaskType) bool {
- policy := mp.GetTaskPolicy(taskType)
+func IsTaskEnabled(mp *MaintenancePolicy, taskType MaintenanceTaskType) bool {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy == nil {
+ return false
+ }
return policy.Enabled
}
// GetMaxConcurrent returns the max concurrent limit for a task type
-func (mp *MaintenancePolicy) GetMaxConcurrent(taskType MaintenanceTaskType) int {
- policy := mp.GetTaskPolicy(taskType)
- return policy.MaxConcurrent
+func GetMaxConcurrent(mp *MaintenancePolicy, taskType MaintenanceTaskType) int {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy == nil {
+ return 1
+ }
+ return int(policy.MaxConcurrent)
}
// GetRepeatInterval returns the repeat interval for a task type
-func (mp *MaintenancePolicy) GetRepeatInterval(taskType MaintenanceTaskType) int {
- policy := mp.GetTaskPolicy(taskType)
- return policy.RepeatInterval
+func GetRepeatInterval(mp *MaintenancePolicy, taskType MaintenanceTaskType) int {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy == nil {
+ return int(mp.DefaultRepeatIntervalSeconds)
+ }
+ return int(policy.RepeatIntervalSeconds)
+}
+
+// GetVacuumTaskConfig returns the vacuum task configuration
+func GetVacuumTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType) *worker_pb.VacuumTaskConfig {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy == nil {
+ return nil
+ }
+ return policy.GetVacuumConfig()
+}
+
+// GetErasureCodingTaskConfig returns the erasure coding task configuration
+func GetErasureCodingTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType) *worker_pb.ErasureCodingTaskConfig {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy == nil {
+ return nil
+ }
+ return policy.GetErasureCodingConfig()
}
-// GetTaskConfig returns a configuration value for a task type
-func (mp *MaintenancePolicy) GetTaskConfig(taskType MaintenanceTaskType, key string) (interface{}, bool) {
- policy := mp.GetTaskPolicy(taskType)
- value, exists := policy.Configuration[key]
- return value, exists
+// GetBalanceTaskConfig returns the balance task configuration
+func GetBalanceTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType) *worker_pb.BalanceTaskConfig {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy == nil {
+ return nil
+ }
+ return policy.GetBalanceConfig()
}
-// SetTaskConfig sets a configuration value for a task type
-func (mp *MaintenancePolicy) SetTaskConfig(taskType MaintenanceTaskType, key string, value interface{}) {
- policy := mp.GetTaskPolicy(taskType)
- if policy.Configuration == nil {
- policy.Configuration = make(map[string]interface{})
+// GetReplicationTaskConfig returns the replication task configuration
+func GetReplicationTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType) *worker_pb.ReplicationTaskConfig {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy == nil {
+ return nil
}
- policy.Configuration[key] = value
+ return policy.GetReplicationConfig()
}
+// Note: GetTaskConfig was removed - use typed getters: GetVacuumTaskConfig, GetErasureCodingTaskConfig, GetBalanceTaskConfig, or GetReplicationTaskConfig
+
+// SetVacuumTaskConfig sets the vacuum task configuration
+func SetVacuumTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType, config *worker_pb.VacuumTaskConfig) {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy != nil {
+ policy.TaskConfig = &worker_pb.TaskPolicy_VacuumConfig{
+ VacuumConfig: config,
+ }
+ }
+}
+
+// SetErasureCodingTaskConfig sets the erasure coding task configuration
+func SetErasureCodingTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType, config *worker_pb.ErasureCodingTaskConfig) {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy != nil {
+ policy.TaskConfig = &worker_pb.TaskPolicy_ErasureCodingConfig{
+ ErasureCodingConfig: config,
+ }
+ }
+}
+
+// SetBalanceTaskConfig sets the balance task configuration
+func SetBalanceTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType, config *worker_pb.BalanceTaskConfig) {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy != nil {
+ policy.TaskConfig = &worker_pb.TaskPolicy_BalanceConfig{
+ BalanceConfig: config,
+ }
+ }
+}
+
+// SetReplicationTaskConfig sets the replication task configuration
+func SetReplicationTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType, config *worker_pb.ReplicationTaskConfig) {
+ policy := GetTaskPolicy(mp, taskType)
+ if policy != nil {
+ policy.TaskConfig = &worker_pb.TaskPolicy_ReplicationConfig{
+ ReplicationConfig: config,
+ }
+ }
+}
+
+// SetTaskConfig sets a configuration value for a task type (legacy method - use typed setters above)
+// Note: SetTaskConfig was removed - use typed setters: SetVacuumTaskConfig, SetErasureCodingTaskConfig, SetBalanceTaskConfig, or SetReplicationTaskConfig
+
// MaintenanceWorker represents a worker instance
type MaintenanceWorker struct {
ID string `json:"id"`
@@ -217,29 +277,32 @@ type MaintenanceQueue struct {
// MaintenanceScanner analyzes the cluster and generates maintenance tasks
type MaintenanceScanner struct {
- adminClient AdminClient
- policy *MaintenancePolicy
- queue *MaintenanceQueue
- lastScan map[MaintenanceTaskType]time.Time
- integration *MaintenanceIntegration
+ adminClient AdminClient
+ policy *MaintenancePolicy
+ queue *MaintenanceQueue
+ lastScan map[MaintenanceTaskType]time.Time
+ integration *MaintenanceIntegration
+ lastTopologyInfo *master_pb.TopologyInfo
}
// TaskDetectionResult represents the result of scanning for maintenance needs
type TaskDetectionResult struct {
- TaskType MaintenanceTaskType `json:"task_type"`
- VolumeID uint32 `json:"volume_id,omitempty"`
- Server string `json:"server,omitempty"`
- Collection string `json:"collection,omitempty"`
- Priority MaintenanceTaskPriority `json:"priority"`
- Reason string `json:"reason"`
- Parameters map[string]interface{} `json:"parameters,omitempty"`
- ScheduleAt time.Time `json:"schedule_at"`
+ TaskType MaintenanceTaskType `json:"task_type"`
+ VolumeID uint32 `json:"volume_id,omitempty"`
+ Server string `json:"server,omitempty"`
+ Collection string `json:"collection,omitempty"`
+ Priority MaintenanceTaskPriority `json:"priority"`
+ Reason string `json:"reason"`
+ TypedParams *worker_pb.TaskParams `json:"typed_params,omitempty"`
+ ScheduleAt time.Time `json:"schedule_at"`
}
-// VolumeHealthMetrics contains health information about a volume
+// VolumeHealthMetrics represents the health metrics for a volume
type VolumeHealthMetrics struct {
VolumeID uint32 `json:"volume_id"`
Server string `json:"server"`
+ DiskType string `json:"disk_type"` // Disk type (e.g., "hdd", "ssd") or disk path (e.g., "/data1")
+ DiskId uint32 `json:"disk_id"` // ID of the disk in Store.Locations array
Collection string `json:"collection"`
Size uint64 `json:"size"`
DeletedBytes uint64 `json:"deleted_bytes"`
@@ -267,38 +330,6 @@ type MaintenanceStats struct {
NextScanTime time.Time `json:"next_scan_time"`
}
-// MaintenanceConfig holds configuration for the maintenance system
-type MaintenanceConfig struct {
- Enabled bool `json:"enabled"`
- ScanIntervalSeconds int `json:"scan_interval_seconds"` // How often to scan for maintenance needs (in seconds)
- WorkerTimeoutSeconds int `json:"worker_timeout_seconds"` // Worker heartbeat timeout (in seconds)
- TaskTimeoutSeconds int `json:"task_timeout_seconds"` // Individual task timeout (in seconds)
- RetryDelaySeconds int `json:"retry_delay_seconds"` // Delay between retries (in seconds)
- MaxRetries int `json:"max_retries"` // Default max retries for tasks
- CleanupIntervalSeconds int `json:"cleanup_interval_seconds"` // How often to clean up old tasks (in seconds)
- TaskRetentionSeconds int `json:"task_retention_seconds"` // How long to keep completed/failed tasks (in seconds)
- Policy *MaintenancePolicy `json:"policy"`
-}
-
-// Default configuration values
-func DefaultMaintenanceConfig() *MaintenanceConfig {
- return &MaintenanceConfig{
- Enabled: false, // Disabled by default for safety
- ScanIntervalSeconds: 30 * 60, // 30 minutes
- WorkerTimeoutSeconds: 5 * 60, // 5 minutes
- TaskTimeoutSeconds: 2 * 60 * 60, // 2 hours
- RetryDelaySeconds: 15 * 60, // 15 minutes
- MaxRetries: 3,
- CleanupIntervalSeconds: 24 * 60 * 60, // 24 hours
- TaskRetentionSeconds: 7 * 24 * 60 * 60, // 7 days
- Policy: &MaintenancePolicy{
- GlobalMaxConcurrent: 4,
- DefaultRepeatInterval: 6,
- DefaultCheckInterval: 12,
- },
- }
-}
-
// MaintenanceQueueData represents data for the queue visualization UI
type MaintenanceQueueData struct {
Tasks []*MaintenanceTask `json:"tasks"`
@@ -380,10 +411,10 @@ type ClusterReplicationTask struct {
// from all registered tasks using their UI providers
func BuildMaintenancePolicyFromTasks() *MaintenancePolicy {
policy := &MaintenancePolicy{
- TaskPolicies: make(map[MaintenanceTaskType]*TaskPolicy),
- GlobalMaxConcurrent: 4,
- DefaultRepeatInterval: 6,
- DefaultCheckInterval: 12,
+ TaskPolicies: make(map[string]*TaskPolicy),
+ GlobalMaxConcurrent: 4,
+ DefaultRepeatIntervalSeconds: 6 * 3600, // 6 hours in seconds
+ DefaultCheckIntervalSeconds: 12 * 3600, // 12 hours in seconds
}
// Get all registered task types from the UI registry
@@ -399,32 +430,23 @@ func BuildMaintenancePolicyFromTasks() *MaintenancePolicy {
// Create task policy from UI configuration
taskPolicy := &TaskPolicy{
- Enabled: true, // Default enabled
- MaxConcurrent: 2, // Default concurrency
- RepeatInterval: policy.DefaultRepeatInterval,
- CheckInterval: policy.DefaultCheckInterval,
- Configuration: make(map[string]interface{}),
+ Enabled: true, // Default enabled
+ MaxConcurrent: 2, // Default concurrency
+ RepeatIntervalSeconds: policy.DefaultRepeatIntervalSeconds,
+ CheckIntervalSeconds: policy.DefaultCheckIntervalSeconds,
}
- // Extract configuration from UI provider's config
- if configMap, ok := defaultConfig.(map[string]interface{}); ok {
- // Copy all configuration values
- for key, value := range configMap {
- taskPolicy.Configuration[key] = value
+ // Extract configuration using TaskConfig interface - no more map conversions!
+ if taskConfig, ok := defaultConfig.(interface{ ToTaskPolicy() *worker_pb.TaskPolicy }); ok {
+ // Use protobuf directly for clean, type-safe config extraction
+ pbTaskPolicy := taskConfig.ToTaskPolicy()
+ taskPolicy.Enabled = pbTaskPolicy.Enabled
+ taskPolicy.MaxConcurrent = pbTaskPolicy.MaxConcurrent
+ if pbTaskPolicy.RepeatIntervalSeconds > 0 {
+ taskPolicy.RepeatIntervalSeconds = pbTaskPolicy.RepeatIntervalSeconds
}
-
- // Extract common fields
- if enabled, exists := configMap["enabled"]; exists {
- if enabledBool, ok := enabled.(bool); ok {
- taskPolicy.Enabled = enabledBool
- }
- }
- if maxConcurrent, exists := configMap["max_concurrent"]; exists {
- if maxConcurrentInt, ok := maxConcurrent.(int); ok {
- taskPolicy.MaxConcurrent = maxConcurrentInt
- } else if maxConcurrentFloat, ok := maxConcurrent.(float64); ok {
- taskPolicy.MaxConcurrent = int(maxConcurrentFloat)
- }
+ if pbTaskPolicy.CheckIntervalSeconds > 0 {
+ taskPolicy.CheckIntervalSeconds = pbTaskPolicy.CheckIntervalSeconds
}
}
@@ -432,24 +454,24 @@ func BuildMaintenancePolicyFromTasks() *MaintenancePolicy {
var scheduler types.TaskScheduler = typesRegistry.GetScheduler(taskType)
if scheduler != nil {
if taskPolicy.MaxConcurrent <= 0 {
- taskPolicy.MaxConcurrent = scheduler.GetMaxConcurrent()
+ taskPolicy.MaxConcurrent = int32(scheduler.GetMaxConcurrent())
}
- // Convert default repeat interval to hours
+ // Convert default repeat interval to seconds
if repeatInterval := scheduler.GetDefaultRepeatInterval(); repeatInterval > 0 {
- taskPolicy.RepeatInterval = int(repeatInterval.Hours())
+ taskPolicy.RepeatIntervalSeconds = int32(repeatInterval.Seconds())
}
}
// Also get defaults from detector if available (using types.TaskDetector explicitly)
var detector types.TaskDetector = typesRegistry.GetDetector(taskType)
if detector != nil {
- // Convert scan interval to check interval (hours)
+ // Convert scan interval to check interval (seconds)
if scanInterval := detector.ScanInterval(); scanInterval > 0 {
- taskPolicy.CheckInterval = int(scanInterval.Hours())
+ taskPolicy.CheckIntervalSeconds = int32(scanInterval.Seconds())
}
}
- policy.TaskPolicies[maintenanceTaskType] = taskPolicy
+ policy.TaskPolicies[string(maintenanceTaskType)] = taskPolicy
glog.V(3).Infof("Built policy for task type %s: enabled=%v, max_concurrent=%d",
maintenanceTaskType, taskPolicy.Enabled, taskPolicy.MaxConcurrent)
}
@@ -558,3 +580,8 @@ func BuildMaintenanceMenuItems() []*MaintenanceMenuItem {
return menuItems
}
+
+// Helper functions to extract configuration fields
+
+// Note: Removed getVacuumConfigField, getErasureCodingConfigField, getBalanceConfigField, getReplicationConfigField
+// These were orphaned after removing GetTaskConfig - use typed getters instead
diff --git a/weed/admin/maintenance/maintenance_worker.go b/weed/admin/maintenance/maintenance_worker.go
index ab2157f24..96e17f9e9 100644
--- a/weed/admin/maintenance/maintenance_worker.go
+++ b/weed/admin/maintenance/maintenance_worker.go
@@ -7,6 +7,7 @@ import (
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/worker"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
@@ -145,15 +146,20 @@ func NewMaintenanceWorkerService(workerID, address, adminServer string) *Mainten
func (mws *MaintenanceWorkerService) executeGenericTask(task *MaintenanceTask) error {
glog.V(2).Infof("Executing generic task %s: %s for volume %d", task.ID, task.Type, task.VolumeID)
+ // Validate that task has proper typed parameters
+ if task.TypedParams == nil {
+ return fmt.Errorf("task %s has no typed parameters - task was not properly planned (insufficient destinations)", task.ID)
+ }
+
// Convert MaintenanceTask to types.TaskType
taskType := types.TaskType(string(task.Type))
// Create task parameters
taskParams := types.TaskParams{
- VolumeID: task.VolumeID,
- Server: task.Server,
- Collection: task.Collection,
- Parameters: task.Parameters,
+ VolumeID: task.VolumeID,
+ Server: task.Server,
+ Collection: task.Collection,
+ TypedParams: task.TypedParams,
}
// Create task instance using the registry
@@ -396,10 +402,19 @@ func NewMaintenanceWorkerCommand(workerID, address, adminServer string) *Mainten
// Run starts the maintenance worker as a standalone service
func (mwc *MaintenanceWorkerCommand) Run() error {
- // Generate worker ID if not provided
+ // Generate or load persistent worker ID if not provided
if mwc.workerService.workerID == "" {
- hostname, _ := os.Hostname()
- mwc.workerService.workerID = fmt.Sprintf("worker-%s-%d", hostname, time.Now().Unix())
+ // Get current working directory for worker ID persistence
+ wd, err := os.Getwd()
+ if err != nil {
+ return fmt.Errorf("failed to get working directory: %w", err)
+ }
+
+ workerID, err := worker.GenerateOrLoadWorkerID(wd)
+ if err != nil {
+ return fmt.Errorf("failed to generate or load worker ID: %w", err)
+ }
+ mwc.workerService.workerID = workerID
}
// Start the worker service
diff --git a/weed/admin/maintenance/pending_operations.go b/weed/admin/maintenance/pending_operations.go
new file mode 100644
index 000000000..16130b4c9
--- /dev/null
+++ b/weed/admin/maintenance/pending_operations.go
@@ -0,0 +1,311 @@
+package maintenance
+
+import (
+ "sync"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// PendingOperationType represents the type of pending operation
+type PendingOperationType string
+
+const (
+ OpTypeVolumeMove PendingOperationType = "volume_move"
+ OpTypeVolumeBalance PendingOperationType = "volume_balance"
+ OpTypeErasureCoding PendingOperationType = "erasure_coding"
+ OpTypeVacuum PendingOperationType = "vacuum"
+ OpTypeReplication PendingOperationType = "replication"
+)
+
+// PendingOperation represents a pending volume/shard operation
+type PendingOperation struct {
+ VolumeID uint32 `json:"volume_id"`
+ OperationType PendingOperationType `json:"operation_type"`
+ SourceNode string `json:"source_node"`
+ DestNode string `json:"dest_node,omitempty"` // Empty for non-movement operations
+ TaskID string `json:"task_id"`
+ StartTime time.Time `json:"start_time"`
+ EstimatedSize uint64 `json:"estimated_size"` // Bytes
+ Collection string `json:"collection"`
+ Status string `json:"status"` // "assigned", "in_progress", "completing"
+}
+
+// PendingOperations tracks all pending volume/shard operations
+type PendingOperations struct {
+ // Operations by volume ID for conflict detection
+ byVolumeID map[uint32]*PendingOperation
+
+ // Operations by task ID for updates
+ byTaskID map[string]*PendingOperation
+
+ // Operations by node for capacity calculations
+ bySourceNode map[string][]*PendingOperation
+ byDestNode map[string][]*PendingOperation
+
+ mutex sync.RWMutex
+}
+
+// NewPendingOperations creates a new pending operations tracker
+func NewPendingOperations() *PendingOperations {
+ return &PendingOperations{
+ byVolumeID: make(map[uint32]*PendingOperation),
+ byTaskID: make(map[string]*PendingOperation),
+ bySourceNode: make(map[string][]*PendingOperation),
+ byDestNode: make(map[string][]*PendingOperation),
+ }
+}
+
+// AddOperation adds a pending operation
+func (po *PendingOperations) AddOperation(op *PendingOperation) {
+ po.mutex.Lock()
+ defer po.mutex.Unlock()
+
+ // Check for existing operation on this volume
+ if existing, exists := po.byVolumeID[op.VolumeID]; exists {
+ glog.V(1).Infof("Replacing existing pending operation on volume %d: %s -> %s",
+ op.VolumeID, existing.TaskID, op.TaskID)
+ po.removeOperationUnlocked(existing)
+ }
+
+ // Add new operation
+ po.byVolumeID[op.VolumeID] = op
+ po.byTaskID[op.TaskID] = op
+
+ // Add to node indexes
+ po.bySourceNode[op.SourceNode] = append(po.bySourceNode[op.SourceNode], op)
+ if op.DestNode != "" {
+ po.byDestNode[op.DestNode] = append(po.byDestNode[op.DestNode], op)
+ }
+
+ glog.V(2).Infof("Added pending operation: volume %d, type %s, task %s, %s -> %s",
+ op.VolumeID, op.OperationType, op.TaskID, op.SourceNode, op.DestNode)
+}
+
+// RemoveOperation removes a completed operation
+func (po *PendingOperations) RemoveOperation(taskID string) {
+ po.mutex.Lock()
+ defer po.mutex.Unlock()
+
+ if op, exists := po.byTaskID[taskID]; exists {
+ po.removeOperationUnlocked(op)
+ glog.V(2).Infof("Removed completed operation: volume %d, task %s", op.VolumeID, taskID)
+ }
+}
+
+// removeOperationUnlocked removes an operation (must hold lock)
+func (po *PendingOperations) removeOperationUnlocked(op *PendingOperation) {
+ delete(po.byVolumeID, op.VolumeID)
+ delete(po.byTaskID, op.TaskID)
+
+ // Remove from source node list
+ if ops, exists := po.bySourceNode[op.SourceNode]; exists {
+ for i, other := range ops {
+ if other.TaskID == op.TaskID {
+ po.bySourceNode[op.SourceNode] = append(ops[:i], ops[i+1:]...)
+ break
+ }
+ }
+ }
+
+ // Remove from dest node list
+ if op.DestNode != "" {
+ if ops, exists := po.byDestNode[op.DestNode]; exists {
+ for i, other := range ops {
+ if other.TaskID == op.TaskID {
+ po.byDestNode[op.DestNode] = append(ops[:i], ops[i+1:]...)
+ break
+ }
+ }
+ }
+ }
+}
+
+// HasPendingOperationOnVolume checks if a volume has a pending operation
+func (po *PendingOperations) HasPendingOperationOnVolume(volumeID uint32) bool {
+ po.mutex.RLock()
+ defer po.mutex.RUnlock()
+
+ _, exists := po.byVolumeID[volumeID]
+ return exists
+}
+
+// GetPendingOperationOnVolume returns the pending operation on a volume
+func (po *PendingOperations) GetPendingOperationOnVolume(volumeID uint32) *PendingOperation {
+ po.mutex.RLock()
+ defer po.mutex.RUnlock()
+
+ return po.byVolumeID[volumeID]
+}
+
+// WouldConflictWithPending checks if a new operation would conflict with pending ones
+func (po *PendingOperations) WouldConflictWithPending(volumeID uint32, opType PendingOperationType) bool {
+ po.mutex.RLock()
+ defer po.mutex.RUnlock()
+
+ if existing, exists := po.byVolumeID[volumeID]; exists {
+ // Volume already has a pending operation
+ glog.V(3).Infof("Volume %d conflict: already has %s operation (task %s)",
+ volumeID, existing.OperationType, existing.TaskID)
+ return true
+ }
+
+ return false
+}
+
+// GetPendingCapacityImpactForNode calculates pending capacity changes for a node
+func (po *PendingOperations) GetPendingCapacityImpactForNode(nodeID string) (incoming uint64, outgoing uint64) {
+ po.mutex.RLock()
+ defer po.mutex.RUnlock()
+
+ // Calculate outgoing capacity (volumes leaving this node)
+ if ops, exists := po.bySourceNode[nodeID]; exists {
+ for _, op := range ops {
+ // Only count movement operations
+ if op.DestNode != "" {
+ outgoing += op.EstimatedSize
+ }
+ }
+ }
+
+ // Calculate incoming capacity (volumes coming to this node)
+ if ops, exists := po.byDestNode[nodeID]; exists {
+ for _, op := range ops {
+ incoming += op.EstimatedSize
+ }
+ }
+
+ return incoming, outgoing
+}
+
+// FilterVolumeMetricsExcludingPending filters out volumes with pending operations
+func (po *PendingOperations) FilterVolumeMetricsExcludingPending(metrics []*types.VolumeHealthMetrics) []*types.VolumeHealthMetrics {
+ po.mutex.RLock()
+ defer po.mutex.RUnlock()
+
+ var filtered []*types.VolumeHealthMetrics
+ excludedCount := 0
+
+ for _, metric := range metrics {
+ if _, hasPending := po.byVolumeID[metric.VolumeID]; !hasPending {
+ filtered = append(filtered, metric)
+ } else {
+ excludedCount++
+ glog.V(3).Infof("Excluding volume %d from scan due to pending operation", metric.VolumeID)
+ }
+ }
+
+ if excludedCount > 0 {
+ glog.V(1).Infof("Filtered out %d volumes with pending operations from %d total volumes",
+ excludedCount, len(metrics))
+ }
+
+ return filtered
+}
+
+// GetNodeCapacityProjection calculates projected capacity for a node
+func (po *PendingOperations) GetNodeCapacityProjection(nodeID string, currentUsed uint64, totalCapacity uint64) NodeCapacityProjection {
+ incoming, outgoing := po.GetPendingCapacityImpactForNode(nodeID)
+
+ projectedUsed := currentUsed + incoming - outgoing
+ projectedFree := totalCapacity - projectedUsed
+
+ return NodeCapacityProjection{
+ NodeID: nodeID,
+ CurrentUsed: currentUsed,
+ TotalCapacity: totalCapacity,
+ PendingIncoming: incoming,
+ PendingOutgoing: outgoing,
+ ProjectedUsed: projectedUsed,
+ ProjectedFree: projectedFree,
+ }
+}
+
+// GetAllPendingOperations returns all pending operations
+func (po *PendingOperations) GetAllPendingOperations() []*PendingOperation {
+ po.mutex.RLock()
+ defer po.mutex.RUnlock()
+
+ var operations []*PendingOperation
+ for _, op := range po.byVolumeID {
+ operations = append(operations, op)
+ }
+
+ return operations
+}
+
+// UpdateOperationStatus updates the status of a pending operation
+func (po *PendingOperations) UpdateOperationStatus(taskID string, status string) {
+ po.mutex.Lock()
+ defer po.mutex.Unlock()
+
+ if op, exists := po.byTaskID[taskID]; exists {
+ op.Status = status
+ glog.V(3).Infof("Updated operation status: task %s, volume %d -> %s", taskID, op.VolumeID, status)
+ }
+}
+
+// CleanupStaleOperations removes operations that have been running too long
+func (po *PendingOperations) CleanupStaleOperations(maxAge time.Duration) int {
+ po.mutex.Lock()
+ defer po.mutex.Unlock()
+
+ cutoff := time.Now().Add(-maxAge)
+ var staleOps []*PendingOperation
+
+ for _, op := range po.byVolumeID {
+ if op.StartTime.Before(cutoff) {
+ staleOps = append(staleOps, op)
+ }
+ }
+
+ for _, op := range staleOps {
+ po.removeOperationUnlocked(op)
+ glog.Warningf("Removed stale pending operation: volume %d, task %s, age %v",
+ op.VolumeID, op.TaskID, time.Since(op.StartTime))
+ }
+
+ return len(staleOps)
+}
+
+// NodeCapacityProjection represents projected capacity for a node
+type NodeCapacityProjection struct {
+ NodeID string `json:"node_id"`
+ CurrentUsed uint64 `json:"current_used"`
+ TotalCapacity uint64 `json:"total_capacity"`
+ PendingIncoming uint64 `json:"pending_incoming"`
+ PendingOutgoing uint64 `json:"pending_outgoing"`
+ ProjectedUsed uint64 `json:"projected_used"`
+ ProjectedFree uint64 `json:"projected_free"`
+}
+
+// GetStats returns statistics about pending operations
+func (po *PendingOperations) GetStats() PendingOperationsStats {
+ po.mutex.RLock()
+ defer po.mutex.RUnlock()
+
+ stats := PendingOperationsStats{
+ TotalOperations: len(po.byVolumeID),
+ ByType: make(map[PendingOperationType]int),
+ ByStatus: make(map[string]int),
+ }
+
+ var totalSize uint64
+ for _, op := range po.byVolumeID {
+ stats.ByType[op.OperationType]++
+ stats.ByStatus[op.Status]++
+ totalSize += op.EstimatedSize
+ }
+
+ stats.TotalEstimatedSize = totalSize
+ return stats
+}
+
+// PendingOperationsStats provides statistics about pending operations
+type PendingOperationsStats struct {
+ TotalOperations int `json:"total_operations"`
+ ByType map[PendingOperationType]int `json:"by_type"`
+ ByStatus map[string]int `json:"by_status"`
+ TotalEstimatedSize uint64 `json:"total_estimated_size"`
+}
diff --git a/weed/admin/maintenance/pending_operations_test.go b/weed/admin/maintenance/pending_operations_test.go
new file mode 100644
index 000000000..64bb591fb
--- /dev/null
+++ b/weed/admin/maintenance/pending_operations_test.go
@@ -0,0 +1,250 @@
+package maintenance
+
+import (
+ "testing"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+func TestPendingOperations_ConflictDetection(t *testing.T) {
+ pendingOps := NewPendingOperations()
+
+ // Add a pending erasure coding operation on volume 123
+ op := &PendingOperation{
+ VolumeID: 123,
+ OperationType: OpTypeErasureCoding,
+ SourceNode: "node1",
+ TaskID: "task-001",
+ StartTime: time.Now(),
+ EstimatedSize: 1024 * 1024 * 1024, // 1GB
+ Collection: "test",
+ Status: "assigned",
+ }
+
+ pendingOps.AddOperation(op)
+
+ // Test conflict detection
+ if !pendingOps.HasPendingOperationOnVolume(123) {
+ t.Errorf("Expected volume 123 to have pending operation")
+ }
+
+ if !pendingOps.WouldConflictWithPending(123, OpTypeVacuum) {
+ t.Errorf("Expected conflict when trying to add vacuum operation on volume 123")
+ }
+
+ if pendingOps.HasPendingOperationOnVolume(124) {
+ t.Errorf("Expected volume 124 to have no pending operation")
+ }
+
+ if pendingOps.WouldConflictWithPending(124, OpTypeVacuum) {
+ t.Errorf("Expected no conflict for volume 124")
+ }
+}
+
+func TestPendingOperations_CapacityProjection(t *testing.T) {
+ pendingOps := NewPendingOperations()
+
+ // Add operation moving volume from node1 to node2
+ op1 := &PendingOperation{
+ VolumeID: 100,
+ OperationType: OpTypeVolumeMove,
+ SourceNode: "node1",
+ DestNode: "node2",
+ TaskID: "task-001",
+ StartTime: time.Now(),
+ EstimatedSize: 2 * 1024 * 1024 * 1024, // 2GB
+ Collection: "test",
+ Status: "in_progress",
+ }
+
+ // Add operation moving volume from node3 to node1
+ op2 := &PendingOperation{
+ VolumeID: 101,
+ OperationType: OpTypeVolumeMove,
+ SourceNode: "node3",
+ DestNode: "node1",
+ TaskID: "task-002",
+ StartTime: time.Now(),
+ EstimatedSize: 1 * 1024 * 1024 * 1024, // 1GB
+ Collection: "test",
+ Status: "assigned",
+ }
+
+ pendingOps.AddOperation(op1)
+ pendingOps.AddOperation(op2)
+
+ // Test capacity impact for node1
+ incoming, outgoing := pendingOps.GetPendingCapacityImpactForNode("node1")
+ expectedIncoming := uint64(1 * 1024 * 1024 * 1024) // 1GB incoming
+ expectedOutgoing := uint64(2 * 1024 * 1024 * 1024) // 2GB outgoing
+
+ if incoming != expectedIncoming {
+ t.Errorf("Expected incoming capacity %d, got %d", expectedIncoming, incoming)
+ }
+
+ if outgoing != expectedOutgoing {
+ t.Errorf("Expected outgoing capacity %d, got %d", expectedOutgoing, outgoing)
+ }
+
+ // Test projection for node1
+ currentUsed := uint64(10 * 1024 * 1024 * 1024) // 10GB current
+ totalCapacity := uint64(50 * 1024 * 1024 * 1024) // 50GB total
+
+ projection := pendingOps.GetNodeCapacityProjection("node1", currentUsed, totalCapacity)
+
+ expectedProjectedUsed := currentUsed + incoming - outgoing // 10 + 1 - 2 = 9GB
+ expectedProjectedFree := totalCapacity - expectedProjectedUsed // 50 - 9 = 41GB
+
+ if projection.ProjectedUsed != expectedProjectedUsed {
+ t.Errorf("Expected projected used %d, got %d", expectedProjectedUsed, projection.ProjectedUsed)
+ }
+
+ if projection.ProjectedFree != expectedProjectedFree {
+ t.Errorf("Expected projected free %d, got %d", expectedProjectedFree, projection.ProjectedFree)
+ }
+}
+
+func TestPendingOperations_VolumeFiltering(t *testing.T) {
+ pendingOps := NewPendingOperations()
+
+ // Create volume metrics
+ metrics := []*types.VolumeHealthMetrics{
+ {VolumeID: 100, Server: "node1"},
+ {VolumeID: 101, Server: "node2"},
+ {VolumeID: 102, Server: "node3"},
+ {VolumeID: 103, Server: "node1"},
+ }
+
+ // Add pending operations on volumes 101 and 103
+ op1 := &PendingOperation{
+ VolumeID: 101,
+ OperationType: OpTypeVacuum,
+ SourceNode: "node2",
+ TaskID: "task-001",
+ StartTime: time.Now(),
+ EstimatedSize: 1024 * 1024 * 1024,
+ Status: "in_progress",
+ }
+
+ op2 := &PendingOperation{
+ VolumeID: 103,
+ OperationType: OpTypeErasureCoding,
+ SourceNode: "node1",
+ TaskID: "task-002",
+ StartTime: time.Now(),
+ EstimatedSize: 2 * 1024 * 1024 * 1024,
+ Status: "assigned",
+ }
+
+ pendingOps.AddOperation(op1)
+ pendingOps.AddOperation(op2)
+
+ // Filter metrics
+ filtered := pendingOps.FilterVolumeMetricsExcludingPending(metrics)
+
+ // Should only have volumes 100 and 102 (101 and 103 are filtered out)
+ if len(filtered) != 2 {
+ t.Errorf("Expected 2 filtered metrics, got %d", len(filtered))
+ }
+
+ // Check that correct volumes remain
+ foundVolumes := make(map[uint32]bool)
+ for _, metric := range filtered {
+ foundVolumes[metric.VolumeID] = true
+ }
+
+ if !foundVolumes[100] || !foundVolumes[102] {
+ t.Errorf("Expected volumes 100 and 102 to remain after filtering")
+ }
+
+ if foundVolumes[101] || foundVolumes[103] {
+ t.Errorf("Expected volumes 101 and 103 to be filtered out")
+ }
+}
+
+func TestPendingOperations_OperationLifecycle(t *testing.T) {
+ pendingOps := NewPendingOperations()
+
+ // Add operation
+ op := &PendingOperation{
+ VolumeID: 200,
+ OperationType: OpTypeVolumeBalance,
+ SourceNode: "node1",
+ DestNode: "node2",
+ TaskID: "task-balance-001",
+ StartTime: time.Now(),
+ EstimatedSize: 1024 * 1024 * 1024,
+ Status: "assigned",
+ }
+
+ pendingOps.AddOperation(op)
+
+ // Check it exists
+ if !pendingOps.HasPendingOperationOnVolume(200) {
+ t.Errorf("Expected volume 200 to have pending operation")
+ }
+
+ // Update status
+ pendingOps.UpdateOperationStatus("task-balance-001", "in_progress")
+
+ retrievedOp := pendingOps.GetPendingOperationOnVolume(200)
+ if retrievedOp == nil {
+ t.Errorf("Expected to retrieve pending operation for volume 200")
+ } else if retrievedOp.Status != "in_progress" {
+ t.Errorf("Expected operation status to be 'in_progress', got '%s'", retrievedOp.Status)
+ }
+
+ // Complete operation
+ pendingOps.RemoveOperation("task-balance-001")
+
+ if pendingOps.HasPendingOperationOnVolume(200) {
+ t.Errorf("Expected volume 200 to have no pending operation after removal")
+ }
+}
+
+func TestPendingOperations_StaleCleanup(t *testing.T) {
+ pendingOps := NewPendingOperations()
+
+ // Add recent operation
+ recentOp := &PendingOperation{
+ VolumeID: 300,
+ OperationType: OpTypeVacuum,
+ SourceNode: "node1",
+ TaskID: "task-recent",
+ StartTime: time.Now(),
+ EstimatedSize: 1024 * 1024 * 1024,
+ Status: "in_progress",
+ }
+
+ // Add stale operation (24 hours ago)
+ staleOp := &PendingOperation{
+ VolumeID: 301,
+ OperationType: OpTypeErasureCoding,
+ SourceNode: "node2",
+ TaskID: "task-stale",
+ StartTime: time.Now().Add(-24 * time.Hour),
+ EstimatedSize: 2 * 1024 * 1024 * 1024,
+ Status: "in_progress",
+ }
+
+ pendingOps.AddOperation(recentOp)
+ pendingOps.AddOperation(staleOp)
+
+ // Clean up operations older than 1 hour
+ removedCount := pendingOps.CleanupStaleOperations(1 * time.Hour)
+
+ if removedCount != 1 {
+ t.Errorf("Expected to remove 1 stale operation, removed %d", removedCount)
+ }
+
+ // Recent operation should still exist
+ if !pendingOps.HasPendingOperationOnVolume(300) {
+ t.Errorf("Expected recent operation on volume 300 to still exist")
+ }
+
+ // Stale operation should be removed
+ if pendingOps.HasPendingOperationOnVolume(301) {
+ t.Errorf("Expected stale operation on volume 301 to be removed")
+ }
+}
diff --git a/weed/admin/static/css/admin.css b/weed/admin/static/css/admin.css
index c69876060..a945d320e 100644
--- a/weed/admin/static/css/admin.css
+++ b/weed/admin/static/css/admin.css
@@ -9,6 +9,7 @@
z-index: 100;
padding: 48px 0 0;
box-shadow: inset -1px 0 0 rgba(0, 0, 0, .1);
+ overflow-y: auto;
}
.sidebar-heading {
diff --git a/weed/admin/topology/active_topology.go b/weed/admin/topology/active_topology.go
new file mode 100644
index 000000000..9ce63bfa7
--- /dev/null
+++ b/weed/admin/topology/active_topology.go
@@ -0,0 +1,741 @@
+package topology
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
+)
+
+// TaskType represents different types of maintenance operations
+type TaskType string
+
+// TaskStatus represents the current status of a task
+type TaskStatus string
+
+// Common task type constants
+const (
+ TaskTypeVacuum TaskType = "vacuum"
+ TaskTypeBalance TaskType = "balance"
+ TaskTypeErasureCoding TaskType = "erasure_coding"
+ TaskTypeReplication TaskType = "replication"
+)
+
+// Common task status constants
+const (
+ TaskStatusPending TaskStatus = "pending"
+ TaskStatusInProgress TaskStatus = "in_progress"
+ TaskStatusCompleted TaskStatus = "completed"
+)
+
+// taskState represents the current state of tasks affecting the topology (internal)
+type taskState struct {
+ VolumeID uint32 `json:"volume_id"`
+ TaskType TaskType `json:"task_type"`
+ SourceServer string `json:"source_server"`
+ SourceDisk uint32 `json:"source_disk"`
+ TargetServer string `json:"target_server,omitempty"`
+ TargetDisk uint32 `json:"target_disk,omitempty"`
+ Status TaskStatus `json:"status"`
+ StartedAt time.Time `json:"started_at"`
+ CompletedAt time.Time `json:"completed_at,omitempty"`
+}
+
+// DiskInfo represents a disk with its current state and ongoing tasks (public for external access)
+type DiskInfo struct {
+ NodeID string `json:"node_id"`
+ DiskID uint32 `json:"disk_id"`
+ DiskType string `json:"disk_type"`
+ DataCenter string `json:"data_center"`
+ Rack string `json:"rack"`
+ DiskInfo *master_pb.DiskInfo `json:"disk_info"`
+ LoadCount int `json:"load_count"` // Number of active tasks
+}
+
+// activeDisk represents internal disk state (private)
+type activeDisk struct {
+ *DiskInfo
+ pendingTasks []*taskState
+ assignedTasks []*taskState
+ recentTasks []*taskState // Completed in last N seconds
+}
+
+// activeNode represents a node with its disks (private)
+type activeNode struct {
+ nodeID string
+ dataCenter string
+ rack string
+ nodeInfo *master_pb.DataNodeInfo
+ disks map[uint32]*activeDisk // DiskID -> activeDisk
+}
+
+// ActiveTopology provides a real-time view of cluster state with task awareness
+type ActiveTopology struct {
+ // Core topology from master
+ topologyInfo *master_pb.TopologyInfo
+ lastUpdated time.Time
+
+ // Structured topology for easy access (private)
+ nodes map[string]*activeNode // NodeID -> activeNode
+ disks map[string]*activeDisk // "NodeID:DiskID" -> activeDisk
+
+ // Task states affecting the topology (private)
+ pendingTasks map[string]*taskState
+ assignedTasks map[string]*taskState
+ recentTasks map[string]*taskState
+
+ // Configuration
+ recentTaskWindowSeconds int
+
+ // Synchronization
+ mutex sync.RWMutex
+}
+
+// NewActiveTopology creates a new ActiveTopology instance
+func NewActiveTopology(recentTaskWindowSeconds int) *ActiveTopology {
+ if recentTaskWindowSeconds <= 0 {
+ recentTaskWindowSeconds = 10 // Default 10 seconds
+ }
+
+ return &ActiveTopology{
+ nodes: make(map[string]*activeNode),
+ disks: make(map[string]*activeDisk),
+ pendingTasks: make(map[string]*taskState),
+ assignedTasks: make(map[string]*taskState),
+ recentTasks: make(map[string]*taskState),
+ recentTaskWindowSeconds: recentTaskWindowSeconds,
+ }
+}
+
+// UpdateTopology updates the topology information from master
+func (at *ActiveTopology) UpdateTopology(topologyInfo *master_pb.TopologyInfo) error {
+ at.mutex.Lock()
+ defer at.mutex.Unlock()
+
+ at.topologyInfo = topologyInfo
+ at.lastUpdated = time.Now()
+
+ // Rebuild structured topology
+ at.nodes = make(map[string]*activeNode)
+ at.disks = make(map[string]*activeDisk)
+
+ for _, dc := range topologyInfo.DataCenterInfos {
+ for _, rack := range dc.RackInfos {
+ for _, nodeInfo := range rack.DataNodeInfos {
+ node := &activeNode{
+ nodeID: nodeInfo.Id,
+ dataCenter: dc.Id,
+ rack: rack.Id,
+ nodeInfo: nodeInfo,
+ disks: make(map[uint32]*activeDisk),
+ }
+
+ // Add disks for this node
+ for diskType, diskInfo := range nodeInfo.DiskInfos {
+ disk := &activeDisk{
+ DiskInfo: &DiskInfo{
+ NodeID: nodeInfo.Id,
+ DiskID: diskInfo.DiskId,
+ DiskType: diskType,
+ DataCenter: dc.Id,
+ Rack: rack.Id,
+ DiskInfo: diskInfo,
+ },
+ }
+
+ diskKey := fmt.Sprintf("%s:%d", nodeInfo.Id, diskInfo.DiskId)
+ node.disks[diskInfo.DiskId] = disk
+ at.disks[diskKey] = disk
+ }
+
+ at.nodes[nodeInfo.Id] = node
+ }
+ }
+ }
+
+ // Reassign task states to updated topology
+ at.reassignTaskStates()
+
+ glog.V(1).Infof("ActiveTopology updated: %d nodes, %d disks", len(at.nodes), len(at.disks))
+ return nil
+}
+
+// AddPendingTask adds a pending task to the topology
+func (at *ActiveTopology) AddPendingTask(taskID string, taskType TaskType, volumeID uint32,
+ sourceServer string, sourceDisk uint32, targetServer string, targetDisk uint32) {
+ at.mutex.Lock()
+ defer at.mutex.Unlock()
+
+ task := &taskState{
+ VolumeID: volumeID,
+ TaskType: taskType,
+ SourceServer: sourceServer,
+ SourceDisk: sourceDisk,
+ TargetServer: targetServer,
+ TargetDisk: targetDisk,
+ Status: TaskStatusPending,
+ StartedAt: time.Now(),
+ }
+
+ at.pendingTasks[taskID] = task
+ at.assignTaskToDisk(task)
+}
+
+// AssignTask moves a task from pending to assigned
+func (at *ActiveTopology) AssignTask(taskID string) error {
+ at.mutex.Lock()
+ defer at.mutex.Unlock()
+
+ task, exists := at.pendingTasks[taskID]
+ if !exists {
+ return fmt.Errorf("pending task %s not found", taskID)
+ }
+
+ delete(at.pendingTasks, taskID)
+ task.Status = TaskStatusInProgress
+ at.assignedTasks[taskID] = task
+ at.reassignTaskStates()
+
+ return nil
+}
+
+// CompleteTask moves a task from assigned to recent
+func (at *ActiveTopology) CompleteTask(taskID string) error {
+ at.mutex.Lock()
+ defer at.mutex.Unlock()
+
+ task, exists := at.assignedTasks[taskID]
+ if !exists {
+ return fmt.Errorf("assigned task %s not found", taskID)
+ }
+
+ delete(at.assignedTasks, taskID)
+ task.Status = TaskStatusCompleted
+ task.CompletedAt = time.Now()
+ at.recentTasks[taskID] = task
+ at.reassignTaskStates()
+
+ // Clean up old recent tasks
+ at.cleanupRecentTasks()
+
+ return nil
+}
+
+// GetAvailableDisks returns disks that can accept new tasks of the given type
+func (at *ActiveTopology) GetAvailableDisks(taskType TaskType, excludeNodeID string) []*DiskInfo {
+ at.mutex.RLock()
+ defer at.mutex.RUnlock()
+
+ var available []*DiskInfo
+
+ for _, disk := range at.disks {
+ if disk.NodeID == excludeNodeID {
+ continue // Skip excluded node
+ }
+
+ if at.isDiskAvailable(disk, taskType) {
+ // Create a copy with current load count
+ diskCopy := *disk.DiskInfo
+ diskCopy.LoadCount = len(disk.pendingTasks) + len(disk.assignedTasks)
+ available = append(available, &diskCopy)
+ }
+ }
+
+ return available
+}
+
+// GetDiskLoad returns the current load on a disk (number of active tasks)
+func (at *ActiveTopology) GetDiskLoad(nodeID string, diskID uint32) int {
+ at.mutex.RLock()
+ defer at.mutex.RUnlock()
+
+ diskKey := fmt.Sprintf("%s:%d", nodeID, diskID)
+ disk, exists := at.disks[diskKey]
+ if !exists {
+ return 0
+ }
+
+ return len(disk.pendingTasks) + len(disk.assignedTasks)
+}
+
+// HasRecentTaskForVolume checks if a volume had a recent task (to avoid immediate re-detection)
+func (at *ActiveTopology) HasRecentTaskForVolume(volumeID uint32, taskType TaskType) bool {
+ at.mutex.RLock()
+ defer at.mutex.RUnlock()
+
+ for _, task := range at.recentTasks {
+ if task.VolumeID == volumeID && task.TaskType == taskType {
+ return true
+ }
+ }
+
+ return false
+}
+
+// GetAllNodes returns information about all nodes (public interface)
+func (at *ActiveTopology) GetAllNodes() map[string]*master_pb.DataNodeInfo {
+ at.mutex.RLock()
+ defer at.mutex.RUnlock()
+
+ result := make(map[string]*master_pb.DataNodeInfo)
+ for nodeID, node := range at.nodes {
+ result[nodeID] = node.nodeInfo
+ }
+ return result
+}
+
+// GetTopologyInfo returns the current topology information (read-only access)
+func (at *ActiveTopology) GetTopologyInfo() *master_pb.TopologyInfo {
+ at.mutex.RLock()
+ defer at.mutex.RUnlock()
+ return at.topologyInfo
+}
+
+// GetNodeDisks returns all disks for a specific node
+func (at *ActiveTopology) GetNodeDisks(nodeID string) []*DiskInfo {
+ at.mutex.RLock()
+ defer at.mutex.RUnlock()
+
+ node, exists := at.nodes[nodeID]
+ if !exists {
+ return nil
+ }
+
+ var disks []*DiskInfo
+ for _, disk := range node.disks {
+ diskCopy := *disk.DiskInfo
+ diskCopy.LoadCount = len(disk.pendingTasks) + len(disk.assignedTasks)
+ disks = append(disks, &diskCopy)
+ }
+
+ return disks
+}
+
+// DestinationPlan represents a planned destination for a volume/shard operation
+type DestinationPlan struct {
+ TargetNode string `json:"target_node"`
+ TargetDisk uint32 `json:"target_disk"`
+ TargetRack string `json:"target_rack"`
+ TargetDC string `json:"target_dc"`
+ ExpectedSize uint64 `json:"expected_size"`
+ PlacementScore float64 `json:"placement_score"`
+ Conflicts []string `json:"conflicts"`
+}
+
+// MultiDestinationPlan represents multiple planned destinations for operations like EC
+type MultiDestinationPlan struct {
+ Plans []*DestinationPlan `json:"plans"`
+ TotalShards int `json:"total_shards"`
+ SuccessfulRack int `json:"successful_racks"`
+ SuccessfulDCs int `json:"successful_dcs"`
+}
+
+// PlanBalanceDestination finds the best destination for a balance operation
+func (at *ActiveTopology) PlanBalanceDestination(volumeID uint32, sourceNode string, sourceRack string, sourceDC string, volumeSize uint64) (*DestinationPlan, error) {
+ at.mutex.RLock()
+ defer at.mutex.RUnlock()
+
+ // Get available disks, excluding the source node
+ availableDisks := at.getAvailableDisksForPlanning(TaskTypeBalance, sourceNode)
+ if len(availableDisks) == 0 {
+ return nil, fmt.Errorf("no available disks for balance operation")
+ }
+
+ // Score each disk for balance placement
+ bestDisk := at.selectBestBalanceDestination(availableDisks, sourceRack, sourceDC, volumeSize)
+ if bestDisk == nil {
+ return nil, fmt.Errorf("no suitable destination found for balance operation")
+ }
+
+ return &DestinationPlan{
+ TargetNode: bestDisk.NodeID,
+ TargetDisk: bestDisk.DiskID,
+ TargetRack: bestDisk.Rack,
+ TargetDC: bestDisk.DataCenter,
+ ExpectedSize: volumeSize,
+ PlacementScore: at.calculatePlacementScore(bestDisk, sourceRack, sourceDC),
+ Conflicts: at.checkPlacementConflicts(bestDisk, TaskTypeBalance),
+ }, nil
+}
+
+// PlanECDestinations finds multiple destinations for EC shard distribution
+func (at *ActiveTopology) PlanECDestinations(volumeID uint32, sourceNode string, sourceRack string, sourceDC string, shardsNeeded int) (*MultiDestinationPlan, error) {
+ at.mutex.RLock()
+ defer at.mutex.RUnlock()
+
+ // Get available disks for EC placement
+ availableDisks := at.getAvailableDisksForPlanning(TaskTypeErasureCoding, "")
+ if len(availableDisks) < shardsNeeded {
+ return nil, fmt.Errorf("insufficient disks for EC placement: need %d, have %d", shardsNeeded, len(availableDisks))
+ }
+
+ // Select best disks for EC placement with rack/DC diversity
+ selectedDisks := at.selectBestECDestinations(availableDisks, sourceRack, sourceDC, shardsNeeded)
+ if len(selectedDisks) < shardsNeeded {
+ return nil, fmt.Errorf("could not find %d suitable destinations for EC placement", shardsNeeded)
+ }
+
+ var plans []*DestinationPlan
+ rackCount := make(map[string]int)
+ dcCount := make(map[string]int)
+
+ for _, disk := range selectedDisks {
+ plan := &DestinationPlan{
+ TargetNode: disk.NodeID,
+ TargetDisk: disk.DiskID,
+ TargetRack: disk.Rack,
+ TargetDC: disk.DataCenter,
+ ExpectedSize: 0, // EC shards don't have predetermined size
+ PlacementScore: at.calculatePlacementScore(disk, sourceRack, sourceDC),
+ Conflicts: at.checkPlacementConflicts(disk, TaskTypeErasureCoding),
+ }
+ plans = append(plans, plan)
+
+ // Count rack and DC diversity
+ rackKey := fmt.Sprintf("%s:%s", disk.DataCenter, disk.Rack)
+ rackCount[rackKey]++
+ dcCount[disk.DataCenter]++
+ }
+
+ return &MultiDestinationPlan{
+ Plans: plans,
+ TotalShards: len(plans),
+ SuccessfulRack: len(rackCount),
+ SuccessfulDCs: len(dcCount),
+ }, nil
+}
+
+// getAvailableDisksForPlanning returns disks available for destination planning
+func (at *ActiveTopology) getAvailableDisksForPlanning(taskType TaskType, excludeNodeID string) []*activeDisk {
+ var available []*activeDisk
+
+ for _, disk := range at.disks {
+ if excludeNodeID != "" && disk.NodeID == excludeNodeID {
+ continue // Skip excluded node
+ }
+
+ if at.isDiskAvailable(disk, taskType) {
+ available = append(available, disk)
+ }
+ }
+
+ return available
+}
+
+// selectBestBalanceDestination selects the best disk for balance operation
+func (at *ActiveTopology) selectBestBalanceDestination(disks []*activeDisk, sourceRack string, sourceDC string, volumeSize uint64) *activeDisk {
+ if len(disks) == 0 {
+ return nil
+ }
+
+ var bestDisk *activeDisk
+ bestScore := -1.0
+
+ for _, disk := range disks {
+ score := at.calculateBalanceScore(disk, sourceRack, sourceDC, volumeSize)
+ if score > bestScore {
+ bestScore = score
+ bestDisk = disk
+ }
+ }
+
+ return bestDisk
+}
+
+// selectBestECDestinations selects multiple disks for EC shard placement with diversity
+func (at *ActiveTopology) selectBestECDestinations(disks []*activeDisk, sourceRack string, sourceDC string, shardsNeeded int) []*activeDisk {
+ if len(disks) == 0 {
+ return nil
+ }
+
+ // Group disks by rack and DC for diversity
+ rackGroups := make(map[string][]*activeDisk)
+ for _, disk := range disks {
+ rackKey := fmt.Sprintf("%s:%s", disk.DataCenter, disk.Rack)
+ rackGroups[rackKey] = append(rackGroups[rackKey], disk)
+ }
+
+ var selected []*activeDisk
+ usedRacks := make(map[string]bool)
+
+ // First pass: select one disk from each rack for maximum diversity
+ for rackKey, rackDisks := range rackGroups {
+ if len(selected) >= shardsNeeded {
+ break
+ }
+
+ // Select best disk from this rack
+ bestDisk := at.selectBestFromRack(rackDisks, sourceRack, sourceDC)
+ if bestDisk != nil {
+ selected = append(selected, bestDisk)
+ usedRacks[rackKey] = true
+ }
+ }
+
+ // Second pass: if we need more disks, select from racks we've already used
+ if len(selected) < shardsNeeded {
+ for _, disk := range disks {
+ if len(selected) >= shardsNeeded {
+ break
+ }
+
+ // Skip if already selected
+ alreadySelected := false
+ for _, sel := range selected {
+ if sel.NodeID == disk.NodeID && sel.DiskID == disk.DiskID {
+ alreadySelected = true
+ break
+ }
+ }
+
+ if !alreadySelected && at.isDiskAvailable(disk, TaskTypeErasureCoding) {
+ selected = append(selected, disk)
+ }
+ }
+ }
+
+ return selected
+}
+
+// selectBestFromRack selects the best disk from a rack
+func (at *ActiveTopology) selectBestFromRack(disks []*activeDisk, sourceRack string, sourceDC string) *activeDisk {
+ if len(disks) == 0 {
+ return nil
+ }
+
+ var bestDisk *activeDisk
+ bestScore := -1.0
+
+ for _, disk := range disks {
+ if !at.isDiskAvailable(disk, TaskTypeErasureCoding) {
+ continue
+ }
+
+ score := at.calculateECScore(disk, sourceRack, sourceDC)
+ if score > bestScore {
+ bestScore = score
+ bestDisk = disk
+ }
+ }
+
+ return bestDisk
+}
+
+// calculateBalanceScore calculates placement score for balance operations
+func (at *ActiveTopology) calculateBalanceScore(disk *activeDisk, sourceRack string, sourceDC string, volumeSize uint64) float64 {
+ score := 0.0
+
+ // Prefer disks with lower load
+ activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
+ score += (2.0 - float64(activeLoad)) * 40.0 // Max 80 points for load
+
+ // Prefer disks with more free space
+ if disk.DiskInfo.DiskInfo.MaxVolumeCount > 0 {
+ freeRatio := float64(disk.DiskInfo.DiskInfo.MaxVolumeCount-disk.DiskInfo.DiskInfo.VolumeCount) / float64(disk.DiskInfo.DiskInfo.MaxVolumeCount)
+ score += freeRatio * 20.0 // Max 20 points for free space
+ }
+
+ // Rack diversity bonus (prefer different rack)
+ if disk.Rack != sourceRack {
+ score += 10.0
+ }
+
+ // DC diversity bonus (prefer different DC)
+ if disk.DataCenter != sourceDC {
+ score += 5.0
+ }
+
+ return score
+}
+
+// calculateECScore calculates placement score for EC operations
+func (at *ActiveTopology) calculateECScore(disk *activeDisk, sourceRack string, sourceDC string) float64 {
+ score := 0.0
+
+ // Prefer disks with lower load
+ activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
+ score += (2.0 - float64(activeLoad)) * 30.0 // Max 60 points for load
+
+ // Prefer disks with more free space
+ if disk.DiskInfo.DiskInfo.MaxVolumeCount > 0 {
+ freeRatio := float64(disk.DiskInfo.DiskInfo.MaxVolumeCount-disk.DiskInfo.DiskInfo.VolumeCount) / float64(disk.DiskInfo.DiskInfo.MaxVolumeCount)
+ score += freeRatio * 20.0 // Max 20 points for free space
+ }
+
+ // Strong rack diversity preference for EC
+ if disk.Rack != sourceRack {
+ score += 20.0
+ }
+
+ // Strong DC diversity preference for EC
+ if disk.DataCenter != sourceDC {
+ score += 15.0
+ }
+
+ return score
+}
+
+// calculatePlacementScore calculates overall placement quality score
+func (at *ActiveTopology) calculatePlacementScore(disk *activeDisk, sourceRack string, sourceDC string) float64 {
+ score := 0.0
+
+ // Load factor
+ activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
+ loadScore := (2.0 - float64(activeLoad)) / 2.0 // Normalize to 0-1
+ score += loadScore * 0.4
+
+ // Capacity factor
+ if disk.DiskInfo.DiskInfo.MaxVolumeCount > 0 {
+ freeRatio := float64(disk.DiskInfo.DiskInfo.MaxVolumeCount-disk.DiskInfo.DiskInfo.VolumeCount) / float64(disk.DiskInfo.DiskInfo.MaxVolumeCount)
+ score += freeRatio * 0.3
+ }
+
+ // Diversity factor
+ diversityScore := 0.0
+ if disk.Rack != sourceRack {
+ diversityScore += 0.5
+ }
+ if disk.DataCenter != sourceDC {
+ diversityScore += 0.5
+ }
+ score += diversityScore * 0.3
+
+ return score // Score between 0.0 and 1.0
+}
+
+// checkPlacementConflicts checks for placement rule violations
+func (at *ActiveTopology) checkPlacementConflicts(disk *activeDisk, taskType TaskType) []string {
+ var conflicts []string
+
+ // Check load limits
+ activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
+ if activeLoad >= 2 {
+ conflicts = append(conflicts, fmt.Sprintf("disk_load_high_%d", activeLoad))
+ }
+
+ // Check capacity limits
+ if disk.DiskInfo.DiskInfo.MaxVolumeCount > 0 {
+ usageRatio := float64(disk.DiskInfo.DiskInfo.VolumeCount) / float64(disk.DiskInfo.DiskInfo.MaxVolumeCount)
+ if usageRatio > 0.9 {
+ conflicts = append(conflicts, "disk_capacity_high")
+ }
+ }
+
+ // Check for conflicting task types
+ for _, task := range disk.assignedTasks {
+ if at.areTaskTypesConflicting(task.TaskType, taskType) {
+ conflicts = append(conflicts, fmt.Sprintf("task_conflict_%s", task.TaskType))
+ }
+ }
+
+ return conflicts
+}
+
+// Private methods
+
+// reassignTaskStates assigns tasks to the appropriate disks
+func (at *ActiveTopology) reassignTaskStates() {
+ // Clear existing task assignments
+ for _, disk := range at.disks {
+ disk.pendingTasks = nil
+ disk.assignedTasks = nil
+ disk.recentTasks = nil
+ }
+
+ // Reassign pending tasks
+ for _, task := range at.pendingTasks {
+ at.assignTaskToDisk(task)
+ }
+
+ // Reassign assigned tasks
+ for _, task := range at.assignedTasks {
+ at.assignTaskToDisk(task)
+ }
+
+ // Reassign recent tasks
+ for _, task := range at.recentTasks {
+ at.assignTaskToDisk(task)
+ }
+}
+
+// assignTaskToDisk assigns a task to the appropriate disk(s)
+func (at *ActiveTopology) assignTaskToDisk(task *taskState) {
+ // Assign to source disk
+ sourceKey := fmt.Sprintf("%s:%d", task.SourceServer, task.SourceDisk)
+ if sourceDisk, exists := at.disks[sourceKey]; exists {
+ switch task.Status {
+ case TaskStatusPending:
+ sourceDisk.pendingTasks = append(sourceDisk.pendingTasks, task)
+ case TaskStatusInProgress:
+ sourceDisk.assignedTasks = append(sourceDisk.assignedTasks, task)
+ case TaskStatusCompleted:
+ sourceDisk.recentTasks = append(sourceDisk.recentTasks, task)
+ }
+ }
+
+ // Assign to target disk if it exists and is different from source
+ if task.TargetServer != "" && (task.TargetServer != task.SourceServer || task.TargetDisk != task.SourceDisk) {
+ targetKey := fmt.Sprintf("%s:%d", task.TargetServer, task.TargetDisk)
+ if targetDisk, exists := at.disks[targetKey]; exists {
+ switch task.Status {
+ case TaskStatusPending:
+ targetDisk.pendingTasks = append(targetDisk.pendingTasks, task)
+ case TaskStatusInProgress:
+ targetDisk.assignedTasks = append(targetDisk.assignedTasks, task)
+ case TaskStatusCompleted:
+ targetDisk.recentTasks = append(targetDisk.recentTasks, task)
+ }
+ }
+ }
+}
+
+// isDiskAvailable checks if a disk can accept new tasks
+func (at *ActiveTopology) isDiskAvailable(disk *activeDisk, taskType TaskType) bool {
+ // Check if disk has too many active tasks
+ activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
+ if activeLoad >= 2 { // Max 2 concurrent tasks per disk
+ return false
+ }
+
+ // Check for conflicting task types
+ for _, task := range disk.assignedTasks {
+ if at.areTaskTypesConflicting(task.TaskType, taskType) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// areTaskTypesConflicting checks if two task types conflict
+func (at *ActiveTopology) areTaskTypesConflicting(existing, new TaskType) bool {
+ // Examples of conflicting task types
+ conflictMap := map[TaskType][]TaskType{
+ TaskTypeVacuum: {TaskTypeBalance, TaskTypeErasureCoding},
+ TaskTypeBalance: {TaskTypeVacuum, TaskTypeErasureCoding},
+ TaskTypeErasureCoding: {TaskTypeVacuum, TaskTypeBalance},
+ }
+
+ if conflicts, exists := conflictMap[existing]; exists {
+ for _, conflictType := range conflicts {
+ if conflictType == new {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// cleanupRecentTasks removes old recent tasks
+func (at *ActiveTopology) cleanupRecentTasks() {
+ cutoff := time.Now().Add(-time.Duration(at.recentTaskWindowSeconds) * time.Second)
+
+ for taskID, task := range at.recentTasks {
+ if task.CompletedAt.Before(cutoff) {
+ delete(at.recentTasks, taskID)
+ }
+ }
+}
diff --git a/weed/admin/topology/active_topology_test.go b/weed/admin/topology/active_topology_test.go
new file mode 100644
index 000000000..9f2f09c29
--- /dev/null
+++ b/weed/admin/topology/active_topology_test.go
@@ -0,0 +1,654 @@
+package topology
+
+import (
+ "testing"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TestActiveTopologyBasicOperations tests basic topology management
+func TestActiveTopologyBasicOperations(t *testing.T) {
+ topology := NewActiveTopology(10)
+ assert.NotNil(t, topology)
+ assert.Equal(t, 10, topology.recentTaskWindowSeconds)
+
+ // Test empty topology
+ assert.Equal(t, 0, len(topology.nodes))
+ assert.Equal(t, 0, len(topology.disks))
+ assert.Equal(t, 0, len(topology.pendingTasks))
+}
+
+// TestActiveTopologyUpdate tests topology updates from master
+func TestActiveTopologyUpdate(t *testing.T) {
+ topology := NewActiveTopology(10)
+
+ // Create sample topology info
+ topologyInfo := createSampleTopology()
+
+ err := topology.UpdateTopology(topologyInfo)
+ require.NoError(t, err)
+
+ // Verify topology structure
+ assert.Equal(t, 2, len(topology.nodes)) // 2 nodes
+ assert.Equal(t, 4, len(topology.disks)) // 4 disks total (2 per node)
+
+ // Verify node structure
+ node1, exists := topology.nodes["10.0.0.1:8080"]
+ require.True(t, exists)
+ assert.Equal(t, "dc1", node1.dataCenter)
+ assert.Equal(t, "rack1", node1.rack)
+ assert.Equal(t, 2, len(node1.disks))
+
+ // Verify disk structure
+ disk1, exists := topology.disks["10.0.0.1:8080:0"]
+ require.True(t, exists)
+ assert.Equal(t, uint32(0), disk1.DiskID)
+ assert.Equal(t, "hdd", disk1.DiskType)
+ assert.Equal(t, "dc1", disk1.DataCenter)
+}
+
+// TestTaskLifecycle tests the complete task lifecycle
+func TestTaskLifecycle(t *testing.T) {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createSampleTopology())
+
+ taskID := "balance-001"
+
+ // 1. Add pending task
+ topology.AddPendingTask(taskID, TaskTypeBalance, 1001,
+ "10.0.0.1:8080", 0, "10.0.0.2:8080", 1)
+
+ // Verify pending state
+ assert.Equal(t, 1, len(topology.pendingTasks))
+ assert.Equal(t, 0, len(topology.assignedTasks))
+ assert.Equal(t, 0, len(topology.recentTasks))
+
+ task := topology.pendingTasks[taskID]
+ assert.Equal(t, TaskStatusPending, task.Status)
+ assert.Equal(t, uint32(1001), task.VolumeID)
+
+ // Verify task assigned to disks
+ sourceDisk := topology.disks["10.0.0.1:8080:0"]
+ targetDisk := topology.disks["10.0.0.2:8080:1"]
+ assert.Equal(t, 1, len(sourceDisk.pendingTasks))
+ assert.Equal(t, 1, len(targetDisk.pendingTasks))
+
+ // 2. Assign task
+ err := topology.AssignTask(taskID)
+ require.NoError(t, err)
+
+ // Verify assigned state
+ assert.Equal(t, 0, len(topology.pendingTasks))
+ assert.Equal(t, 1, len(topology.assignedTasks))
+ assert.Equal(t, 0, len(topology.recentTasks))
+
+ task = topology.assignedTasks[taskID]
+ assert.Equal(t, TaskStatusInProgress, task.Status)
+
+ // Verify task moved to assigned on disks
+ assert.Equal(t, 0, len(sourceDisk.pendingTasks))
+ assert.Equal(t, 1, len(sourceDisk.assignedTasks))
+ assert.Equal(t, 0, len(targetDisk.pendingTasks))
+ assert.Equal(t, 1, len(targetDisk.assignedTasks))
+
+ // 3. Complete task
+ err = topology.CompleteTask(taskID)
+ require.NoError(t, err)
+
+ // Verify completed state
+ assert.Equal(t, 0, len(topology.pendingTasks))
+ assert.Equal(t, 0, len(topology.assignedTasks))
+ assert.Equal(t, 1, len(topology.recentTasks))
+
+ task = topology.recentTasks[taskID]
+ assert.Equal(t, TaskStatusCompleted, task.Status)
+ assert.False(t, task.CompletedAt.IsZero())
+}
+
+// TestTaskDetectionScenarios tests various task detection scenarios
+func TestTaskDetectionScenarios(t *testing.T) {
+ tests := []struct {
+ name string
+ scenario func() *ActiveTopology
+ expectedTasks map[string]bool // taskType -> shouldDetect
+ }{
+ {
+ name: "Empty cluster - no tasks needed",
+ scenario: func() *ActiveTopology {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createEmptyTopology())
+ return topology
+ },
+ expectedTasks: map[string]bool{
+ "balance": false,
+ "vacuum": false,
+ "ec": false,
+ },
+ },
+ {
+ name: "Unbalanced cluster - balance task needed",
+ scenario: func() *ActiveTopology {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createUnbalancedTopology())
+ return topology
+ },
+ expectedTasks: map[string]bool{
+ "balance": true,
+ "vacuum": false,
+ "ec": false,
+ },
+ },
+ {
+ name: "High garbage ratio - vacuum task needed",
+ scenario: func() *ActiveTopology {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createHighGarbageTopology())
+ return topology
+ },
+ expectedTasks: map[string]bool{
+ "balance": false,
+ "vacuum": true,
+ "ec": false,
+ },
+ },
+ {
+ name: "Large volumes - EC task needed",
+ scenario: func() *ActiveTopology {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createLargeVolumeTopology())
+ return topology
+ },
+ expectedTasks: map[string]bool{
+ "balance": false,
+ "vacuum": false,
+ "ec": true,
+ },
+ },
+ {
+ name: "Recent tasks - no immediate re-detection",
+ scenario: func() *ActiveTopology {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createUnbalancedTopology())
+ // Add recent balance task
+ topology.recentTasks["recent-balance"] = &taskState{
+ VolumeID: 1001,
+ TaskType: TaskTypeBalance,
+ Status: TaskStatusCompleted,
+ CompletedAt: time.Now().Add(-5 * time.Second), // 5 seconds ago
+ }
+ return topology
+ },
+ expectedTasks: map[string]bool{
+ "balance": false, // Should not detect due to recent task
+ "vacuum": false,
+ "ec": false,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ topology := tt.scenario()
+
+ // Test balance task detection
+ shouldDetectBalance := tt.expectedTasks["balance"]
+ actualDetectBalance := !topology.HasRecentTaskForVolume(1001, TaskTypeBalance)
+ if shouldDetectBalance {
+ assert.True(t, actualDetectBalance, "Should detect balance task")
+ } else {
+ // Note: In real implementation, task detection would be more sophisticated
+ // This is a simplified test of the recent task prevention mechanism
+ }
+
+ // Test that recent tasks prevent re-detection
+ if len(topology.recentTasks) > 0 {
+ for _, task := range topology.recentTasks {
+ hasRecent := topology.HasRecentTaskForVolume(task.VolumeID, task.TaskType)
+ assert.True(t, hasRecent, "Should find recent task for volume %d", task.VolumeID)
+ }
+ }
+ })
+ }
+}
+
+// TestTargetSelectionScenarios tests target selection for different task types
+func TestTargetSelectionScenarios(t *testing.T) {
+ tests := []struct {
+ name string
+ topology *ActiveTopology
+ taskType TaskType
+ excludeNode string
+ expectedTargets int
+ expectedBestTarget string
+ }{
+ {
+ name: "Balance task - find least loaded disk",
+ topology: createTopologyWithLoad(),
+ taskType: TaskTypeBalance,
+ excludeNode: "10.0.0.1:8080", // Exclude source node
+ expectedTargets: 2, // 2 disks on other node
+ },
+ {
+ name: "EC task - find multiple available disks",
+ topology: createTopologyForEC(),
+ taskType: TaskTypeErasureCoding,
+ excludeNode: "", // Don't exclude any nodes
+ expectedTargets: 4, // All 4 disks available
+ },
+ {
+ name: "Vacuum task - avoid conflicting disks",
+ topology: createTopologyWithConflicts(),
+ taskType: TaskTypeVacuum,
+ excludeNode: "",
+ expectedTargets: 1, // Only 1 disk without conflicts (conflicts exclude more disks)
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ availableDisks := tt.topology.GetAvailableDisks(tt.taskType, tt.excludeNode)
+ assert.Equal(t, tt.expectedTargets, len(availableDisks),
+ "Expected %d available disks, got %d", tt.expectedTargets, len(availableDisks))
+
+ // Verify disks are actually available
+ for _, disk := range availableDisks {
+ assert.NotEqual(t, tt.excludeNode, disk.NodeID,
+ "Available disk should not be on excluded node")
+
+ load := tt.topology.GetDiskLoad(disk.NodeID, disk.DiskID)
+ assert.Less(t, load, 2, "Disk load should be less than 2")
+ }
+ })
+ }
+}
+
+// TestDiskLoadCalculation tests disk load calculation
+func TestDiskLoadCalculation(t *testing.T) {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createSampleTopology())
+
+ // Initially no load
+ load := topology.GetDiskLoad("10.0.0.1:8080", 0)
+ assert.Equal(t, 0, load)
+
+ // Add pending task
+ topology.AddPendingTask("task1", TaskTypeBalance, 1001,
+ "10.0.0.1:8080", 0, "10.0.0.2:8080", 1)
+
+ // Check load increased
+ load = topology.GetDiskLoad("10.0.0.1:8080", 0)
+ assert.Equal(t, 1, load)
+
+ // Add another task to same disk
+ topology.AddPendingTask("task2", TaskTypeVacuum, 1002,
+ "10.0.0.1:8080", 0, "", 0)
+
+ load = topology.GetDiskLoad("10.0.0.1:8080", 0)
+ assert.Equal(t, 2, load)
+
+ // Move one task to assigned
+ topology.AssignTask("task1")
+
+ // Load should still be 2 (1 pending + 1 assigned)
+ load = topology.GetDiskLoad("10.0.0.1:8080", 0)
+ assert.Equal(t, 2, load)
+
+ // Complete one task
+ topology.CompleteTask("task1")
+
+ // Load should decrease to 1
+ load = topology.GetDiskLoad("10.0.0.1:8080", 0)
+ assert.Equal(t, 1, load)
+}
+
+// TestTaskConflictDetection tests task conflict detection
+func TestTaskConflictDetection(t *testing.T) {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createSampleTopology())
+
+ // Add a balance task
+ topology.AddPendingTask("balance1", TaskTypeBalance, 1001,
+ "10.0.0.1:8080", 0, "10.0.0.2:8080", 1)
+ topology.AssignTask("balance1")
+
+ // Try to get available disks for vacuum (conflicts with balance)
+ availableDisks := topology.GetAvailableDisks(TaskTypeVacuum, "")
+
+ // Source disk should not be available due to conflict
+ sourceDiskAvailable := false
+ for _, disk := range availableDisks {
+ if disk.NodeID == "10.0.0.1:8080" && disk.DiskID == 0 {
+ sourceDiskAvailable = true
+ break
+ }
+ }
+ assert.False(t, sourceDiskAvailable, "Source disk should not be available due to task conflict")
+}
+
+// TestPublicInterfaces tests the public interface methods
+func TestPublicInterfaces(t *testing.T) {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createSampleTopology())
+
+ // Test GetAllNodes
+ nodes := topology.GetAllNodes()
+ assert.Equal(t, 2, len(nodes))
+ assert.Contains(t, nodes, "10.0.0.1:8080")
+ assert.Contains(t, nodes, "10.0.0.2:8080")
+
+ // Test GetNodeDisks
+ disks := topology.GetNodeDisks("10.0.0.1:8080")
+ assert.Equal(t, 2, len(disks))
+
+ // Test with non-existent node
+ disks = topology.GetNodeDisks("non-existent")
+ assert.Nil(t, disks)
+}
+
+// Helper functions to create test topologies
+
+func createSampleTopology() *master_pb.TopologyInfo {
+ return &master_pb.TopologyInfo{
+ DataCenterInfos: []*master_pb.DataCenterInfo{
+ {
+ Id: "dc1",
+ RackInfos: []*master_pb.RackInfo{
+ {
+ Id: "rack1",
+ DataNodeInfos: []*master_pb.DataNodeInfo{
+ {
+ Id: "10.0.0.1:8080",
+ DiskInfos: map[string]*master_pb.DiskInfo{
+ "hdd": {DiskId: 0, VolumeCount: 10, MaxVolumeCount: 100},
+ "ssd": {DiskId: 1, VolumeCount: 5, MaxVolumeCount: 50},
+ },
+ },
+ {
+ Id: "10.0.0.2:8080",
+ DiskInfos: map[string]*master_pb.DiskInfo{
+ "hdd": {DiskId: 0, VolumeCount: 8, MaxVolumeCount: 100},
+ "ssd": {DiskId: 1, VolumeCount: 3, MaxVolumeCount: 50},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func createEmptyTopology() *master_pb.TopologyInfo {
+ return &master_pb.TopologyInfo{
+ DataCenterInfos: []*master_pb.DataCenterInfo{
+ {
+ Id: "dc1",
+ RackInfos: []*master_pb.RackInfo{
+ {
+ Id: "rack1",
+ DataNodeInfos: []*master_pb.DataNodeInfo{
+ {
+ Id: "10.0.0.1:8080",
+ DiskInfos: map[string]*master_pb.DiskInfo{
+ "hdd": {DiskId: 0, VolumeCount: 0, MaxVolumeCount: 100},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func createUnbalancedTopology() *master_pb.TopologyInfo {
+ return &master_pb.TopologyInfo{
+ DataCenterInfos: []*master_pb.DataCenterInfo{
+ {
+ Id: "dc1",
+ RackInfos: []*master_pb.RackInfo{
+ {
+ Id: "rack1",
+ DataNodeInfos: []*master_pb.DataNodeInfo{
+ {
+ Id: "10.0.0.1:8080",
+ DiskInfos: map[string]*master_pb.DiskInfo{
+ "hdd": {DiskId: 0, VolumeCount: 90, MaxVolumeCount: 100}, // Very loaded
+ },
+ },
+ {
+ Id: "10.0.0.2:8080",
+ DiskInfos: map[string]*master_pb.DiskInfo{
+ "hdd": {DiskId: 0, VolumeCount: 10, MaxVolumeCount: 100}, // Lightly loaded
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func createHighGarbageTopology() *master_pb.TopologyInfo {
+ // In a real implementation, this would include volume-level garbage metrics
+ return createSampleTopology()
+}
+
+func createLargeVolumeTopology() *master_pb.TopologyInfo {
+ // In a real implementation, this would include volume-level size metrics
+ return createSampleTopology()
+}
+
+func createTopologyWithLoad() *ActiveTopology {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createSampleTopology())
+
+ // Add some existing tasks to create load
+ topology.AddPendingTask("existing1", TaskTypeVacuum, 2001,
+ "10.0.0.1:8080", 0, "", 0)
+ topology.AssignTask("existing1")
+
+ return topology
+}
+
+func createTopologyForEC() *ActiveTopology {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createSampleTopology())
+ return topology
+}
+
+func createTopologyWithConflicts() *ActiveTopology {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createSampleTopology())
+
+ // Add conflicting tasks
+ topology.AddPendingTask("balance1", TaskTypeBalance, 3001,
+ "10.0.0.1:8080", 0, "10.0.0.2:8080", 0)
+ topology.AssignTask("balance1")
+
+ topology.AddPendingTask("ec1", TaskTypeErasureCoding, 3002,
+ "10.0.0.1:8080", 1, "", 0)
+ topology.AssignTask("ec1")
+
+ return topology
+}
+
+// TestDestinationPlanning tests destination planning functionality
+func TestDestinationPlanning(t *testing.T) {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createSampleTopology())
+
+ // Test balance destination planning
+ t.Run("Balance destination planning", func(t *testing.T) {
+ plan, err := topology.PlanBalanceDestination(1001, "10.0.0.1:8080", "rack1", "dc1", 1024*1024) // 1MB
+ require.NoError(t, err)
+ require.NotNil(t, plan)
+
+ // Should not target the source node
+ assert.NotEqual(t, "10.0.0.1:8080", plan.TargetNode)
+ assert.Equal(t, "10.0.0.2:8080", plan.TargetNode)
+ assert.NotEmpty(t, plan.TargetRack)
+ assert.NotEmpty(t, plan.TargetDC)
+ assert.Greater(t, plan.PlacementScore, 0.0)
+ })
+
+ // Test EC destination planning
+ t.Run("EC destination planning", func(t *testing.T) {
+ multiPlan, err := topology.PlanECDestinations(1002, "10.0.0.1:8080", "rack1", "dc1", 3) // Ask for 3 shards - source node can be included
+ require.NoError(t, err)
+ require.NotNil(t, multiPlan)
+ assert.Greater(t, len(multiPlan.Plans), 0)
+ assert.LessOrEqual(t, len(multiPlan.Plans), 3) // Should get at most 3 shards
+ assert.Equal(t, len(multiPlan.Plans), multiPlan.TotalShards)
+
+ // Check that all plans have valid target nodes
+ for _, plan := range multiPlan.Plans {
+ assert.NotEmpty(t, plan.TargetNode)
+ assert.NotEmpty(t, plan.TargetRack)
+ assert.NotEmpty(t, plan.TargetDC)
+ assert.GreaterOrEqual(t, plan.PlacementScore, 0.0)
+ }
+
+ // Check diversity metrics
+ assert.GreaterOrEqual(t, multiPlan.SuccessfulRack, 1)
+ assert.GreaterOrEqual(t, multiPlan.SuccessfulDCs, 1)
+ })
+
+ // Test destination planning with load
+ t.Run("Destination planning considers load", func(t *testing.T) {
+ // Add load to one disk
+ topology.AddPendingTask("task1", TaskTypeBalance, 2001,
+ "10.0.0.2:8080", 0, "", 0)
+
+ plan, err := topology.PlanBalanceDestination(1003, "10.0.0.1:8080", "rack1", "dc1", 1024*1024)
+ require.NoError(t, err)
+ require.NotNil(t, plan)
+
+ // Should prefer less loaded disk (disk 1 over disk 0 on node2)
+ assert.Equal(t, "10.0.0.2:8080", plan.TargetNode)
+ assert.Equal(t, uint32(1), plan.TargetDisk) // Should prefer SSD (disk 1) which has no load
+ })
+
+ // Test insufficient destinations
+ t.Run("Handle insufficient destinations", func(t *testing.T) {
+ // Try to plan for more EC shards than available disks
+ multiPlan, err := topology.PlanECDestinations(1004, "10.0.0.1:8080", "rack1", "dc1", 100)
+
+ // Should get an error for insufficient disks
+ assert.Error(t, err)
+ assert.Nil(t, multiPlan)
+ })
+}
+
+// TestDestinationPlanningWithActiveTopology tests the integration between task detection and destination planning
+func TestDestinationPlanningWithActiveTopology(t *testing.T) {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createUnbalancedTopology())
+
+ // Test that tasks are created with destinations
+ t.Run("Balance task with destination", func(t *testing.T) {
+ // Simulate what the balance detector would create
+ sourceNode := "10.0.0.1:8080" // Overloaded node
+ volumeID := uint32(1001)
+
+ plan, err := topology.PlanBalanceDestination(volumeID, sourceNode, "rack1", "dc1", 1024*1024)
+ require.NoError(t, err)
+ require.NotNil(t, plan)
+
+ // Verify the destination is different from source
+ assert.NotEqual(t, sourceNode, plan.TargetNode)
+ assert.Equal(t, "10.0.0.2:8080", plan.TargetNode) // Should be the lightly loaded node
+
+ // Verify placement quality
+ assert.Greater(t, plan.PlacementScore, 0.0)
+ assert.LessOrEqual(t, plan.PlacementScore, 1.0)
+ })
+
+ // Test task state integration
+ t.Run("Task state affects future planning", func(t *testing.T) {
+ volumeID := uint32(1002)
+ sourceNode := "10.0.0.1:8080"
+ targetNode := "10.0.0.2:8080"
+
+ // Plan first destination
+ plan1, err := topology.PlanBalanceDestination(volumeID, sourceNode, "rack1", "dc1", 1024*1024)
+ require.NoError(t, err)
+ require.NotNil(t, plan1)
+
+ // Add a pending task to the target
+ topology.AddPendingTask("task1", TaskTypeBalance, volumeID, sourceNode, 0, targetNode, 0)
+
+ // Plan another destination - should consider the pending task load
+ plan2, err := topology.PlanBalanceDestination(1003, sourceNode, "rack1", "dc1", 1024*1024)
+ require.NoError(t, err)
+ require.NotNil(t, plan2)
+
+ // The placement score should reflect the increased load
+ // (This test might need adjustment based on the actual scoring algorithm)
+ glog.V(1).Infof("Plan1 score: %.3f, Plan2 score: %.3f", plan1.PlacementScore, plan2.PlacementScore)
+ })
+}
+
+// TestECDestinationPlanningDetailed tests the EC destination planning with multiple shards
+func TestECDestinationPlanningDetailed(t *testing.T) {
+ topology := NewActiveTopology(10)
+ topology.UpdateTopology(createSampleTopology())
+
+ t.Run("EC multiple destinations", func(t *testing.T) {
+ // Plan for 3 EC shards (now including source node, we have 4 disks total)
+ multiPlan, err := topology.PlanECDestinations(1005, "10.0.0.1:8080", "rack1", "dc1", 3)
+ require.NoError(t, err)
+ require.NotNil(t, multiPlan)
+
+ // Should get 3 destinations (can include source node's disks)
+ assert.Equal(t, 3, len(multiPlan.Plans))
+ assert.Equal(t, 3, multiPlan.TotalShards)
+
+ // Count node distribution - source node can now be included
+ nodeCount := make(map[string]int)
+ for _, plan := range multiPlan.Plans {
+ nodeCount[plan.TargetNode]++
+ }
+
+ // Should distribute across available nodes (both nodes can be used)
+ assert.GreaterOrEqual(t, len(nodeCount), 1, "Should use at least 1 node")
+ assert.LessOrEqual(t, len(nodeCount), 2, "Should use at most 2 nodes")
+ glog.V(1).Infof("EC destinations node distribution: %v", nodeCount)
+
+ glog.V(1).Infof("EC destinations: %d plans across %d racks, %d DCs",
+ multiPlan.TotalShards, multiPlan.SuccessfulRack, multiPlan.SuccessfulDCs)
+ })
+
+ t.Run("EC destination planning with task conflicts", func(t *testing.T) {
+ // Create a fresh topology for this test to avoid conflicts from previous test
+ freshTopology := NewActiveTopology(10)
+ freshTopology.UpdateTopology(createSampleTopology())
+
+ // Add tasks to create conflicts on some disks
+ freshTopology.AddPendingTask("conflict1", TaskTypeVacuum, 2001, "10.0.0.2:8080", 0, "", 0)
+ freshTopology.AddPendingTask("conflict2", TaskTypeBalance, 2002, "10.0.0.1:8080", 0, "", 0)
+ freshTopology.AssignTask("conflict1")
+ freshTopology.AssignTask("conflict2")
+
+ // Plan EC destinations - should still succeed using available disks
+ multiPlan, err := freshTopology.PlanECDestinations(1006, "10.0.0.1:8080", "rack1", "dc1", 2)
+ require.NoError(t, err)
+ require.NotNil(t, multiPlan)
+
+ // Should get destinations (using disks that don't have conflicts)
+ assert.GreaterOrEqual(t, len(multiPlan.Plans), 1)
+ assert.LessOrEqual(t, len(multiPlan.Plans), 2)
+
+ // Available disks should be: node1/disk1 and node2/disk1 (since disk0 on both nodes have conflicts)
+ for _, plan := range multiPlan.Plans {
+ assert.Equal(t, uint32(1), plan.TargetDisk, "Should prefer disk 1 which has no conflicts")
+ }
+
+ glog.V(1).Infof("EC destination planning with conflicts: found %d destinations", len(multiPlan.Plans))
+ })
+}
diff --git a/weed/admin/view/app/cluster_collections.templ b/weed/admin/view/app/cluster_collections.templ
index 9099fe112..d4765ea86 100644
--- a/weed/admin/view/app/cluster_collections.templ
+++ b/weed/admin/view/app/cluster_collections.templ
@@ -22,7 +22,7 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
<div id="collections-content">
<!-- Summary Cards -->
<div class="row mb-4">
- <div class="col-xl-3 col-md-6 mb-4">
+ <div class="col-xl-2 col-lg-3 col-md-4 col-sm-6 mb-4">
<div class="card border-left-primary shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
@@ -42,13 +42,13 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
</div>
</div>
- <div class="col-xl-3 col-md-6 mb-4">
+ <div class="col-xl-2 col-lg-3 col-md-4 col-sm-6 mb-4">
<div class="card border-left-info shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-info text-uppercase mb-1">
- Total Volumes
+ Regular Volumes
</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
{fmt.Sprintf("%d", data.TotalVolumes)}
@@ -62,7 +62,27 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
</div>
</div>
- <div class="col-xl-3 col-md-6 mb-4">
+ <div class="col-xl-2 col-lg-3 col-md-4 col-sm-6 mb-4">
+ <div class="card border-left-success shadow h-100 py-2">
+ <div class="card-body">
+ <div class="row no-gutters align-items-center">
+ <div class="col mr-2">
+ <div class="text-xs font-weight-bold text-success text-uppercase mb-1">
+ EC Volumes
+ </div>
+ <div class="h5 mb-0 font-weight-bold text-gray-800">
+ {fmt.Sprintf("%d", data.TotalEcVolumes)}
+ </div>
+ </div>
+ <div class="col-auto">
+ <i class="fas fa-th-large fa-2x text-gray-300"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="col-xl-2 col-lg-3 col-md-4 col-sm-6 mb-4">
<div class="card border-left-warning shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
@@ -76,19 +96,19 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
</div>
<div class="col-auto">
<i class="fas fa-file fa-2x text-gray-300"></i>
+ </div>
</div>
</div>
</div>
</div>
- </div>
- <div class="col-xl-3 col-md-6 mb-4">
+ <div class="col-xl-2 col-lg-3 col-md-4 col-sm-6 mb-4">
<div class="card border-left-secondary shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-secondary text-uppercase mb-1">
- Total Storage Size
+ Total Storage Size (Logical)
</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
{formatBytes(data.TotalSize)}
@@ -117,9 +137,10 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
<thead>
<tr>
<th>Collection Name</th>
- <th>Volumes</th>
+ <th>Regular Volumes</th>
+ <th>EC Volumes</th>
<th>Files</th>
- <th>Size</th>
+ <th>Size (Logical)</th>
<th>Disk Types</th>
<th>Actions</th>
</tr>
@@ -128,7 +149,7 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
for _, collection := range data.Collections {
<tr>
<td>
- <a href={templ.SafeURL(fmt.Sprintf("/cluster/volumes?collection=%s", collection.Name))} class="text-decoration-none">
+ <a href={templ.SafeURL(fmt.Sprintf("/cluster/collections/%s", collection.Name))} class="text-decoration-none">
<strong>{collection.Name}</strong>
</a>
</td>
@@ -136,7 +157,23 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
<a href={templ.SafeURL(fmt.Sprintf("/cluster/volumes?collection=%s", collection.Name))} class="text-decoration-none">
<div class="d-flex align-items-center">
<i class="fas fa-database me-2 text-muted"></i>
- {fmt.Sprintf("%d", collection.VolumeCount)}
+ if collection.VolumeCount > 0 {
+ {fmt.Sprintf("%d", collection.VolumeCount)}
+ } else {
+ <span class="text-muted">0</span>
+ }
+ </div>
+ </a>
+ </td>
+ <td>
+ <a href={templ.SafeURL(fmt.Sprintf("/cluster/ec-shards?collection=%s", collection.Name))} class="text-decoration-none">
+ <div class="d-flex align-items-center">
+ <i class="fas fa-th-large me-2 text-muted"></i>
+ if collection.EcVolumeCount > 0 {
+ {fmt.Sprintf("%d", collection.EcVolumeCount)}
+ } else {
+ <span class="text-muted">0</span>
+ }
</div>
</a>
</td>
@@ -171,6 +208,7 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
data-name={collection.Name}
data-datacenter={collection.DataCenter}
data-volume-count={fmt.Sprintf("%d", collection.VolumeCount)}
+ data-ec-volume-count={fmt.Sprintf("%d", collection.EcVolumeCount)}
data-file-count={fmt.Sprintf("%d", collection.FileCount)}
data-total-size={fmt.Sprintf("%d", collection.TotalSize)}
data-disk-types={formatDiskTypes(collection.DiskTypes)}>
@@ -223,6 +261,7 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
name: button.getAttribute('data-name'),
datacenter: button.getAttribute('data-datacenter'),
volumeCount: parseInt(button.getAttribute('data-volume-count')),
+ ecVolumeCount: parseInt(button.getAttribute('data-ec-volume-count')),
fileCount: parseInt(button.getAttribute('data-file-count')),
totalSize: parseInt(button.getAttribute('data-total-size')),
diskTypes: button.getAttribute('data-disk-types')
@@ -260,19 +299,25 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
'<div class="col-md-6">' +
'<h6 class="text-primary"><i class="fas fa-chart-bar me-1"></i>Storage Statistics</h6>' +
'<table class="table table-sm">' +
- '<tr><td><strong>Total Volumes:</strong></td><td>' +
+ '<tr><td><strong>Regular Volumes:</strong></td><td>' +
'<div class="d-flex align-items-center">' +
'<i class="fas fa-database me-2 text-muted"></i>' +
'<span>' + collection.volumeCount.toLocaleString() + '</span>' +
'</div>' +
'</td></tr>' +
+ '<tr><td><strong>EC Volumes:</strong></td><td>' +
+ '<div class="d-flex align-items-center">' +
+ '<i class="fas fa-th-large me-2 text-muted"></i>' +
+ '<span>' + collection.ecVolumeCount.toLocaleString() + '</span>' +
+ '</div>' +
+ '</td></tr>' +
'<tr><td><strong>Total Files:</strong></td><td>' +
'<div class="d-flex align-items-center">' +
'<i class="fas fa-file me-2 text-muted"></i>' +
'<span>' + collection.fileCount.toLocaleString() + '</span>' +
'</div>' +
'</td></tr>' +
- '<tr><td><strong>Total Size:</strong></td><td>' +
+ '<tr><td><strong>Total Size (Logical):</strong></td><td>' +
'<div class="d-flex align-items-center">' +
'<i class="fas fa-hdd me-2 text-muted"></i>' +
'<span>' + formatBytes(collection.totalSize) + '</span>' +
@@ -288,6 +333,9 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
'<a href="/cluster/volumes?collection=' + encodeURIComponent(collection.name) + '" class="btn btn-outline-primary">' +
'<i class="fas fa-database me-1"></i>View Volumes' +
'</a>' +
+ '<a href="/cluster/ec-shards?collection=' + encodeURIComponent(collection.name) + '" class="btn btn-outline-secondary">' +
+ '<i class="fas fa-th-large me-1"></i>View EC Volumes' +
+ '</a>' +
'<a href="/files?collection=' + encodeURIComponent(collection.name) + '" class="btn btn-outline-info">' +
'<i class="fas fa-folder me-1"></i>Browse Files' +
'</a>' +
@@ -295,6 +343,7 @@ templ ClusterCollections(data dash.ClusterCollectionsData) {
'</div>' +
'</div>' +
'</div>' +
+ '</div>' +
'<div class="modal-footer">' +
'<button type="button" class="btn btn-secondary" data-bs-dismiss="modal">Close</button>' +
'</div>' +
diff --git a/weed/admin/view/app/cluster_collections_templ.go b/weed/admin/view/app/cluster_collections_templ.go
index 58384c462..9f1d1e5f1 100644
--- a/weed/admin/view/app/cluster_collections_templ.go
+++ b/weed/admin/view/app/cluster_collections_templ.go
@@ -34,7 +34,7 @@ func ClusterCollections(data dash.ClusterCollectionsData) templ.Component {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom\"><h1 class=\"h2\"><i class=\"fas fa-layer-group me-2\"></i>Cluster Collections</h1><div class=\"btn-toolbar mb-2 mb-md-0\"><div class=\"btn-group me-2\"><button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"exportCollections()\"><i class=\"fas fa-download me-1\"></i>Export</button></div></div></div><div id=\"collections-content\"><!-- Summary Cards --><div class=\"row mb-4\"><div class=\"col-xl-3 col-md-6 mb-4\"><div class=\"card border-left-primary shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-primary text-uppercase mb-1\">Total Collections</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom\"><h1 class=\"h2\"><i class=\"fas fa-layer-group me-2\"></i>Cluster Collections</h1><div class=\"btn-toolbar mb-2 mb-md-0\"><div class=\"btn-group me-2\"><button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"exportCollections()\"><i class=\"fas fa-download me-1\"></i>Export</button></div></div></div><div id=\"collections-content\"><!-- Summary Cards --><div class=\"row mb-4\"><div class=\"col-xl-2 col-lg-3 col-md-4 col-sm-6 mb-4\"><div class=\"card border-left-primary shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-primary text-uppercase mb-1\">Total Collections</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -47,7 +47,7 @@ func ClusterCollections(data dash.ClusterCollectionsData) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "</div></div><div class=\"col-auto\"><i class=\"fas fa-layer-group fa-2x text-gray-300\"></i></div></div></div></div></div><div class=\"col-xl-3 col-md-6 mb-4\"><div class=\"card border-left-info shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-info text-uppercase mb-1\">Total Volumes</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "</div></div><div class=\"col-auto\"><i class=\"fas fa-layer-group fa-2x text-gray-300\"></i></div></div></div></div></div><div class=\"col-xl-2 col-lg-3 col-md-4 col-sm-6 mb-4\"><div class=\"card border-left-info shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-info text-uppercase mb-1\">Regular Volumes</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -60,284 +60,350 @@ func ClusterCollections(data dash.ClusterCollectionsData) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</div></div><div class=\"col-auto\"><i class=\"fas fa-database fa-2x text-gray-300\"></i></div></div></div></div></div><div class=\"col-xl-3 col-md-6 mb-4\"><div class=\"card border-left-warning shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-warning text-uppercase mb-1\">Total Files</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</div></div><div class=\"col-auto\"><i class=\"fas fa-database fa-2x text-gray-300\"></i></div></div></div></div></div><div class=\"col-xl-2 col-lg-3 col-md-4 col-sm-6 mb-4\"><div class=\"card border-left-success shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-success text-uppercase mb-1\">EC Volumes</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
- templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalFiles))
+ templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEcVolumes))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 74, Col: 71}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 74, Col: 75}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "</div></div><div class=\"col-auto\"><i class=\"fas fa-file fa-2x text-gray-300\"></i></div></div></div></div></div><div class=\"col-xl-3 col-md-6 mb-4\"><div class=\"card border-left-secondary shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-secondary text-uppercase mb-1\">Total Storage Size</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "</div></div><div class=\"col-auto\"><i class=\"fas fa-th-large fa-2x text-gray-300\"></i></div></div></div></div></div><div class=\"col-xl-2 col-lg-3 col-md-4 col-sm-6 mb-4\"><div class=\"card border-left-warning shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-warning text-uppercase mb-1\">Total Files</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
- templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize))
+ templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalFiles))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 94, Col: 64}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 94, Col: 71}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</div></div><div class=\"col-auto\"><i class=\"fas fa-hdd fa-2x text-gray-300\"></i></div></div></div></div></div></div><!-- Collections Table --><div class=\"card shadow mb-4\"><div class=\"card-header py-3\"><h6 class=\"m-0 font-weight-bold text-primary\"><i class=\"fas fa-layer-group me-2\"></i>Collection Details</h6></div><div class=\"card-body\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</div></div><div class=\"col-auto\"><i class=\"fas fa-file fa-2x text-gray-300\"></i></div></div></div></div></div><div class=\"col-xl-2 col-lg-3 col-md-4 col-sm-6 mb-4\"><div class=\"card border-left-secondary shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-secondary text-uppercase mb-1\">Total Storage Size (Logical)</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var6 string
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 114, Col: 64}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "</div></div><div class=\"col-auto\"><i class=\"fas fa-hdd fa-2x text-gray-300\"></i></div></div></div></div></div></div><!-- Collections Table --><div class=\"card shadow mb-4\"><div class=\"card-header py-3\"><h6 class=\"m-0 font-weight-bold text-primary\"><i class=\"fas fa-layer-group me-2\"></i>Collection Details</h6></div><div class=\"card-body\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if len(data.Collections) > 0 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<div class=\"table-responsive\"><table class=\"table table-hover\" id=\"collectionsTable\"><thead><tr><th>Collection Name</th><th>Volumes</th><th>Files</th><th>Size</th><th>Disk Types</th><th>Actions</th></tr></thead> <tbody>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "<div class=\"table-responsive\"><table class=\"table table-hover\" id=\"collectionsTable\"><thead><tr><th>Collection Name</th><th>Regular Volumes</th><th>EC Volumes</th><th>Files</th><th>Size (Logical)</th><th>Disk Types</th><th>Actions</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, collection := range data.Collections {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "<tr><td><a href=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<tr><td><a href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var6 templ.SafeURL
- templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("/cluster/volumes?collection=%s", collection.Name)))
+ var templ_7745c5c3_Var7 templ.SafeURL
+ templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("/cluster/collections/%s", collection.Name)))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 131, Col: 130}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 152, Col: 123}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "\" class=\"text-decoration-none\"><strong>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "\" class=\"text-decoration-none\"><strong>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var7 string
- templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(collection.Name)
+ var templ_7745c5c3_Var8 string
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(collection.Name)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 132, Col: 68}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 153, Col: 68}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "</strong></a></td><td><a href=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "</strong></a></td><td><a href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var8 templ.SafeURL
- templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("/cluster/volumes?collection=%s", collection.Name)))
+ var templ_7745c5c3_Var9 templ.SafeURL
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("/cluster/volumes?collection=%s", collection.Name)))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 136, Col: 130}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 157, Col: 130}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "\" class=\"text-decoration-none\"><div class=\"d-flex align-items-center\"><i class=\"fas fa-database me-2 text-muted\"></i> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "\" class=\"text-decoration-none\"><div class=\"d-flex align-items-center\"><i class=\"fas fa-database me-2 text-muted\"></i> ")
+ if collection.VolumeCount > 0 {
+ var templ_7745c5c3_Var10 string
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.VolumeCount))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 161, Col: 94}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "<span class=\"text-muted\">0</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "</div></a></td><td><a href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var9 string
- templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.VolumeCount))
+ var templ_7745c5c3_Var11 templ.SafeURL
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("/cluster/ec-shards?collection=%s", collection.Name)))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 139, Col: 90}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 169, Col: 132}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "\" class=\"text-decoration-none\"><div class=\"d-flex align-items-center\"><i class=\"fas fa-th-large me-2 text-muted\"></i> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "</div></a></td><td><div class=\"d-flex align-items-center\"><i class=\"fas fa-file me-2 text-muted\"></i> ")
+ if collection.EcVolumeCount > 0 {
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.EcVolumeCount))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 173, Col: 96}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "<span class=\"text-muted\">0</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "</div></a></td><td><div class=\"d-flex align-items-center\"><i class=\"fas fa-file me-2 text-muted\"></i> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var10 string
- templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.FileCount))
+ var templ_7745c5c3_Var13 string
+ templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.FileCount))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 146, Col: 88}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 183, Col: 88}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</div></td><td><div class=\"d-flex align-items-center\"><i class=\"fas fa-hdd me-2 text-muted\"></i> ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "</div></td><td><div class=\"d-flex align-items-center\"><i class=\"fas fa-hdd me-2 text-muted\"></i> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var11 string
- templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(collection.TotalSize))
+ var templ_7745c5c3_Var14 string
+ templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(collection.TotalSize))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 152, Col: 82}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 189, Col: 82}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "</div></td><td>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "</div></td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for i, diskType := range collection.DiskTypes {
if i > 0 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "<span class=\"me-1\"></span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "<span class=\"me-1\"></span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var12 = []any{fmt.Sprintf("badge bg-%s me-1", getDiskTypeColor(diskType))}
- templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var12...)
+ var templ_7745c5c3_Var15 = []any{fmt.Sprintf("badge bg-%s me-1", getDiskTypeColor(diskType))}
+ templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var15...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "<span class=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "<span class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var13 string
- templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var12).String())
+ var templ_7745c5c3_Var16 string
+ templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var15).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 1, Col: 0}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var14 string
- templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(diskType)
+ var templ_7745c5c3_Var17 string
+ templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(diskType)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 160, Col: 131}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 197, Col: 131}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "</span> ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</span> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
if len(collection.DiskTypes) == 0 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "<span class=\"text-muted\">Unknown</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "<span class=\"text-muted\">Unknown</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</td><td><button type=\"button\" class=\"btn btn-outline-primary btn-sm\" title=\"View Details\" data-action=\"view-details\" data-name=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</td><td><button type=\"button\" class=\"btn btn-outline-primary btn-sm\" title=\"View Details\" data-action=\"view-details\" data-name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var18 string
+ templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(collection.Name)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 208, Col: 78}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "\" data-datacenter=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var15 string
- templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(collection.Name)
+ var templ_7745c5c3_Var19 string
+ templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(collection.DataCenter)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 171, Col: 78}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 209, Col: 90}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "\" data-datacenter=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "\" data-volume-count=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var16 string
- templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(collection.DataCenter)
+ var templ_7745c5c3_Var20 string
+ templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.VolumeCount))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 172, Col: 90}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 210, Col: 112}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "\" data-volume-count=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "\" data-ec-volume-count=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var17 string
- templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.VolumeCount))
+ var templ_7745c5c3_Var21 string
+ templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.EcVolumeCount))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 173, Col: 112}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 211, Col: 117}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "\" data-file-count=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "\" data-file-count=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var18 string
- templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.FileCount))
+ var templ_7745c5c3_Var22 string
+ templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.FileCount))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 174, Col: 108}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 212, Col: 108}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "\" data-total-size=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "\" data-total-size=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var19 string
- templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.TotalSize))
+ var templ_7745c5c3_Var23 string
+ templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.TotalSize))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 175, Col: 108}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 213, Col: 108}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "\" data-disk-types=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "\" data-disk-types=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var20 string
- templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(formatDiskTypes(collection.DiskTypes))
+ var templ_7745c5c3_Var24 string
+ templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(formatDiskTypes(collection.DiskTypes))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 176, Col: 106}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 214, Col: 106}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "\"><i class=\"fas fa-eye\"></i></button></td></tr>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "\"><i class=\"fas fa-eye\"></i></button></td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "</tbody></table></div>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</tbody></table></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<div class=\"text-center py-5\"><i class=\"fas fa-layer-group fa-3x text-muted mb-3\"></i><h5 class=\"text-muted\">No Collections Found</h5><p class=\"text-muted\">No collections are currently configured in the cluster.</p></div>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<div class=\"text-center py-5\"><i class=\"fas fa-layer-group fa-3x text-muted mb-3\"></i><h5 class=\"text-muted\">No Collections Found</h5><p class=\"text-muted\">No collections are currently configured in the cluster.</p></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "</div></div><!-- Last Updated --><div class=\"row\"><div class=\"col-12\"><small class=\"text-muted\"><i class=\"fas fa-clock me-1\"></i> Last updated: ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "</div></div><!-- Last Updated --><div class=\"row\"><div class=\"col-12\"><small class=\"text-muted\"><i class=\"fas fa-clock me-1\"></i> Last updated: ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var21 string
- templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05"))
+ var templ_7745c5c3_Var25 string
+ templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05"))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 200, Col: 81}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 238, Col: 81}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "</small></div></div></div><!-- JavaScript for cluster collections functionality --><script>\n document.addEventListener('DOMContentLoaded', function() {\n // Handle collection action buttons\n document.addEventListener('click', function(e) {\n const button = e.target.closest('[data-action]');\n if (!button) return;\n \n const action = button.getAttribute('data-action');\n \n switch(action) {\n case 'view-details':\n const collectionData = {\n name: button.getAttribute('data-name'),\n datacenter: button.getAttribute('data-datacenter'),\n volumeCount: parseInt(button.getAttribute('data-volume-count')),\n fileCount: parseInt(button.getAttribute('data-file-count')),\n totalSize: parseInt(button.getAttribute('data-total-size')),\n diskTypes: button.getAttribute('data-disk-types')\n };\n showCollectionDetails(collectionData);\n break;\n }\n });\n });\n \n function showCollectionDetails(collection) {\n const modalHtml = '<div class=\"modal fade\" id=\"collectionDetailsModal\" tabindex=\"-1\">' +\n '<div class=\"modal-dialog modal-lg\">' +\n '<div class=\"modal-content\">' +\n '<div class=\"modal-header\">' +\n '<h5 class=\"modal-title\"><i class=\"fas fa-layer-group me-2\"></i>Collection Details: ' + collection.name + '</h5>' +\n '<button type=\"button\" class=\"btn-close\" data-bs-dismiss=\"modal\"></button>' +\n '</div>' +\n '<div class=\"modal-body\">' +\n '<div class=\"row\">' +\n '<div class=\"col-md-6\">' +\n '<h6 class=\"text-primary\"><i class=\"fas fa-info-circle me-1\"></i>Basic Information</h6>' +\n '<table class=\"table table-sm\">' +\n '<tr><td><strong>Collection Name:</strong></td><td><code>' + collection.name + '</code></td></tr>' +\n '<tr><td><strong>Data Center:</strong></td><td>' +\n (collection.datacenter ? '<span class=\"badge bg-light text-dark\">' + collection.datacenter + '</span>' : '<span class=\"text-muted\">N/A</span>') +\n '</td></tr>' +\n '<tr><td><strong>Disk Types:</strong></td><td>' +\n (collection.diskTypes ? collection.diskTypes.split(', ').map(type => \n '<span class=\"badge bg-' + getDiskTypeBadgeColor(type) + ' me-1\">' + type + '</span>'\n ).join('') : '<span class=\"text-muted\">Unknown</span>') +\n '</td></tr>' +\n '</table>' +\n '</div>' +\n '<div class=\"col-md-6\">' +\n '<h6 class=\"text-primary\"><i class=\"fas fa-chart-bar me-1\"></i>Storage Statistics</h6>' +\n '<table class=\"table table-sm\">' +\n '<tr><td><strong>Total Volumes:</strong></td><td>' +\n '<div class=\"d-flex align-items-center\">' +\n '<i class=\"fas fa-database me-2 text-muted\"></i>' +\n '<span>' + collection.volumeCount.toLocaleString() + '</span>' +\n '</div>' +\n '</td></tr>' +\n '<tr><td><strong>Total Files:</strong></td><td>' +\n '<div class=\"d-flex align-items-center\">' +\n '<i class=\"fas fa-file me-2 text-muted\"></i>' +\n '<span>' + collection.fileCount.toLocaleString() + '</span>' +\n '</div>' +\n '</td></tr>' +\n '<tr><td><strong>Total Size:</strong></td><td>' +\n '<div class=\"d-flex align-items-center\">' +\n '<i class=\"fas fa-hdd me-2 text-muted\"></i>' +\n '<span>' + formatBytes(collection.totalSize) + '</span>' +\n '</div>' +\n '</td></tr>' +\n '</table>' +\n '</div>' +\n '</div>' +\n '<div class=\"row mt-3\">' +\n '<div class=\"col-12\">' +\n '<h6 class=\"text-primary\"><i class=\"fas fa-link me-1\"></i>Quick Actions</h6>' +\n '<div class=\"d-grid gap-2 d-md-flex\">' +\n '<a href=\"/cluster/volumes?collection=' + encodeURIComponent(collection.name) + '\" class=\"btn btn-outline-primary\">' +\n '<i class=\"fas fa-database me-1\"></i>View Volumes' +\n '</a>' +\n '<a href=\"/files?collection=' + encodeURIComponent(collection.name) + '\" class=\"btn btn-outline-info\">' +\n '<i class=\"fas fa-folder me-1\"></i>Browse Files' +\n '</a>' +\n '</div>' +\n '</div>' +\n '</div>' +\n '</div>' +\n '<div class=\"modal-footer\">' +\n '<button type=\"button\" class=\"btn btn-secondary\" data-bs-dismiss=\"modal\">Close</button>' +\n '</div>' +\n '</div>' +\n '</div>' +\n '</div>';\n \n // Remove existing modal if present\n const existingModal = document.getElementById('collectionDetailsModal');\n if (existingModal) {\n existingModal.remove();\n }\n \n // Add modal to body and show\n document.body.insertAdjacentHTML('beforeend', modalHtml);\n const modal = new bootstrap.Modal(document.getElementById('collectionDetailsModal'));\n modal.show();\n \n // Remove modal when hidden\n document.getElementById('collectionDetailsModal').addEventListener('hidden.bs.modal', function() {\n this.remove();\n });\n }\n \n function getDiskTypeBadgeColor(diskType) {\n switch(diskType.toLowerCase()) {\n case 'ssd':\n return 'primary';\n case 'hdd':\n case '':\n return 'secondary';\n default:\n return 'info';\n }\n }\n \n function formatBytes(bytes) {\n if (bytes === 0) return '0 Bytes';\n const k = 1024;\n const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB'];\n const i = Math.floor(Math.log(bytes) / Math.log(k));\n return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];\n }\n \n function exportCollections() {\n // Simple CSV export of collections list\n const rows = Array.from(document.querySelectorAll('#collectionsTable tbody tr')).map(row => {\n const cells = row.querySelectorAll('td');\n if (cells.length > 1) {\n return {\n name: cells[0].textContent.trim(),\n volumes: cells[1].textContent.trim(),\n files: cells[2].textContent.trim(),\n size: cells[3].textContent.trim(),\n diskTypes: cells[4].textContent.trim()\n };\n }\n return null;\n }).filter(row => row !== null);\n \n const csvContent = \"data:text/csv;charset=utf-8,\" + \n \"Collection Name,Volumes,Files,Size,Disk Types\\n\" +\n rows.map(r => '\"' + r.name + '\",\"' + r.volumes + '\",\"' + r.files + '\",\"' + r.size + '\",\"' + r.diskTypes + '\"').join(\"\\n\");\n \n const encodedUri = encodeURI(csvContent);\n const link = document.createElement(\"a\");\n link.setAttribute(\"href\", encodedUri);\n link.setAttribute(\"download\", \"collections.csv\");\n document.body.appendChild(link);\n link.click();\n document.body.removeChild(link);\n }\n </script>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "</small></div></div></div><!-- JavaScript for cluster collections functionality --><script>\n document.addEventListener('DOMContentLoaded', function() {\n // Handle collection action buttons\n document.addEventListener('click', function(e) {\n const button = e.target.closest('[data-action]');\n if (!button) return;\n \n const action = button.getAttribute('data-action');\n \n switch(action) {\n case 'view-details':\n const collectionData = {\n name: button.getAttribute('data-name'),\n datacenter: button.getAttribute('data-datacenter'),\n volumeCount: parseInt(button.getAttribute('data-volume-count')),\n ecVolumeCount: parseInt(button.getAttribute('data-ec-volume-count')),\n fileCount: parseInt(button.getAttribute('data-file-count')),\n totalSize: parseInt(button.getAttribute('data-total-size')),\n diskTypes: button.getAttribute('data-disk-types')\n };\n showCollectionDetails(collectionData);\n break;\n }\n });\n });\n \n function showCollectionDetails(collection) {\n const modalHtml = '<div class=\"modal fade\" id=\"collectionDetailsModal\" tabindex=\"-1\">' +\n '<div class=\"modal-dialog modal-lg\">' +\n '<div class=\"modal-content\">' +\n '<div class=\"modal-header\">' +\n '<h5 class=\"modal-title\"><i class=\"fas fa-layer-group me-2\"></i>Collection Details: ' + collection.name + '</h5>' +\n '<button type=\"button\" class=\"btn-close\" data-bs-dismiss=\"modal\"></button>' +\n '</div>' +\n '<div class=\"modal-body\">' +\n '<div class=\"row\">' +\n '<div class=\"col-md-6\">' +\n '<h6 class=\"text-primary\"><i class=\"fas fa-info-circle me-1\"></i>Basic Information</h6>' +\n '<table class=\"table table-sm\">' +\n '<tr><td><strong>Collection Name:</strong></td><td><code>' + collection.name + '</code></td></tr>' +\n '<tr><td><strong>Data Center:</strong></td><td>' +\n (collection.datacenter ? '<span class=\"badge bg-light text-dark\">' + collection.datacenter + '</span>' : '<span class=\"text-muted\">N/A</span>') +\n '</td></tr>' +\n '<tr><td><strong>Disk Types:</strong></td><td>' +\n (collection.diskTypes ? collection.diskTypes.split(', ').map(type => \n '<span class=\"badge bg-' + getDiskTypeBadgeColor(type) + ' me-1\">' + type + '</span>'\n ).join('') : '<span class=\"text-muted\">Unknown</span>') +\n '</td></tr>' +\n '</table>' +\n '</div>' +\n '<div class=\"col-md-6\">' +\n '<h6 class=\"text-primary\"><i class=\"fas fa-chart-bar me-1\"></i>Storage Statistics</h6>' +\n '<table class=\"table table-sm\">' +\n '<tr><td><strong>Regular Volumes:</strong></td><td>' +\n '<div class=\"d-flex align-items-center\">' +\n '<i class=\"fas fa-database me-2 text-muted\"></i>' +\n '<span>' + collection.volumeCount.toLocaleString() + '</span>' +\n '</div>' +\n '</td></tr>' +\n '<tr><td><strong>EC Volumes:</strong></td><td>' +\n '<div class=\"d-flex align-items-center\">' +\n '<i class=\"fas fa-th-large me-2 text-muted\"></i>' +\n '<span>' + collection.ecVolumeCount.toLocaleString() + '</span>' +\n '</div>' +\n '</td></tr>' +\n '<tr><td><strong>Total Files:</strong></td><td>' +\n '<div class=\"d-flex align-items-center\">' +\n '<i class=\"fas fa-file me-2 text-muted\"></i>' +\n '<span>' + collection.fileCount.toLocaleString() + '</span>' +\n '</div>' +\n '</td></tr>' +\n '<tr><td><strong>Total Size (Logical):</strong></td><td>' +\n '<div class=\"d-flex align-items-center\">' +\n '<i class=\"fas fa-hdd me-2 text-muted\"></i>' +\n '<span>' + formatBytes(collection.totalSize) + '</span>' +\n '</div>' +\n '</td></tr>' +\n '</table>' +\n '</div>' +\n '</div>' +\n '<div class=\"row mt-3\">' +\n '<div class=\"col-12\">' +\n '<h6 class=\"text-primary\"><i class=\"fas fa-link me-1\"></i>Quick Actions</h6>' +\n '<div class=\"d-grid gap-2 d-md-flex\">' +\n '<a href=\"/cluster/volumes?collection=' + encodeURIComponent(collection.name) + '\" class=\"btn btn-outline-primary\">' +\n '<i class=\"fas fa-database me-1\"></i>View Volumes' +\n '</a>' +\n '<a href=\"/cluster/ec-shards?collection=' + encodeURIComponent(collection.name) + '\" class=\"btn btn-outline-secondary\">' +\n '<i class=\"fas fa-th-large me-1\"></i>View EC Volumes' +\n '</a>' +\n '<a href=\"/files?collection=' + encodeURIComponent(collection.name) + '\" class=\"btn btn-outline-info\">' +\n '<i class=\"fas fa-folder me-1\"></i>Browse Files' +\n '</a>' +\n '</div>' +\n '</div>' +\n '</div>' +\n '</div>' +\n '</div>' +\n '<div class=\"modal-footer\">' +\n '<button type=\"button\" class=\"btn btn-secondary\" data-bs-dismiss=\"modal\">Close</button>' +\n '</div>' +\n '</div>' +\n '</div>' +\n '</div>';\n \n // Remove existing modal if present\n const existingModal = document.getElementById('collectionDetailsModal');\n if (existingModal) {\n existingModal.remove();\n }\n \n // Add modal to body and show\n document.body.insertAdjacentHTML('beforeend', modalHtml);\n const modal = new bootstrap.Modal(document.getElementById('collectionDetailsModal'));\n modal.show();\n \n // Remove modal when hidden\n document.getElementById('collectionDetailsModal').addEventListener('hidden.bs.modal', function() {\n this.remove();\n });\n }\n \n function getDiskTypeBadgeColor(diskType) {\n switch(diskType.toLowerCase()) {\n case 'ssd':\n return 'primary';\n case 'hdd':\n case '':\n return 'secondary';\n default:\n return 'info';\n }\n }\n \n function formatBytes(bytes) {\n if (bytes === 0) return '0 Bytes';\n const k = 1024;\n const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB'];\n const i = Math.floor(Math.log(bytes) / Math.log(k));\n return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];\n }\n \n function exportCollections() {\n // Simple CSV export of collections list\n const rows = Array.from(document.querySelectorAll('#collectionsTable tbody tr')).map(row => {\n const cells = row.querySelectorAll('td');\n if (cells.length > 1) {\n return {\n name: cells[0].textContent.trim(),\n volumes: cells[1].textContent.trim(),\n files: cells[2].textContent.trim(),\n size: cells[3].textContent.trim(),\n diskTypes: cells[4].textContent.trim()\n };\n }\n return null;\n }).filter(row => row !== null);\n \n const csvContent = \"data:text/csv;charset=utf-8,\" + \n \"Collection Name,Volumes,Files,Size,Disk Types\\n\" +\n rows.map(r => '\"' + r.name + '\",\"' + r.volumes + '\",\"' + r.files + '\",\"' + r.size + '\",\"' + r.diskTypes + '\"').join(\"\\n\");\n \n const encodedUri = encodeURI(csvContent);\n const link = document.createElement(\"a\");\n link.setAttribute(\"href\", encodedUri);\n link.setAttribute(\"download\", \"collections.csv\");\n document.body.appendChild(link);\n link.click();\n document.body.removeChild(link);\n }\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
diff --git a/weed/admin/view/app/cluster_ec_shards.templ b/weed/admin/view/app/cluster_ec_shards.templ
new file mode 100644
index 000000000..a3e8fc0ec
--- /dev/null
+++ b/weed/admin/view/app/cluster_ec_shards.templ
@@ -0,0 +1,455 @@
+package app
+
+import (
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/dash"
+)
+
+templ ClusterEcShards(data dash.ClusterEcShardsData) {
+ <div class="d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom">
+ <div>
+ <h1 class="h2">
+ <i class="fas fa-th-large me-2"></i>EC Shards
+ </h1>
+ if data.FilterCollection != "" {
+ <div class="d-flex align-items-center mt-2">
+ if data.FilterCollection == "default" {
+ <span class="badge bg-secondary text-white me-2">
+ <i class="fas fa-filter me-1"></i>Collection: default
+ </span>
+ } else {
+ <span class="badge bg-info text-white me-2">
+ <i class="fas fa-filter me-1"></i>Collection: {data.FilterCollection}
+ </span>
+ }
+ <a href="/cluster/ec-shards" class="btn btn-sm btn-outline-secondary">
+ <i class="fas fa-times me-1"></i>Clear Filter
+ </a>
+ </div>
+ }
+ </div>
+ <div class="btn-toolbar mb-2 mb-md-0">
+ <div class="btn-group me-2">
+ <select class="form-select form-select-sm me-2" id="pageSizeSelect" onchange="changePageSize()" style="width: auto;">
+ <option value="50" if data.PageSize == 50 { selected="selected" }>50 per page</option>
+ <option value="100" if data.PageSize == 100 { selected="selected" }>100 per page</option>
+ <option value="200" if data.PageSize == 200 { selected="selected" }>200 per page</option>
+ <option value="500" if data.PageSize == 500 { selected="selected" }>500 per page</option>
+ </select>
+ <button type="button" class="btn btn-sm btn-outline-primary" onclick="exportEcShards()">
+ <i class="fas fa-download me-1"></i>Export
+ </button>
+ </div>
+ </div>
+ </div>
+
+ <!-- Statistics Cards -->
+ <div class="row mb-4">
+ <div class="col-md-3">
+ <div class="card text-bg-primary">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">Total Shards</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.TotalShards)}</h4>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-puzzle-piece fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="card text-bg-info">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">EC Volumes</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.TotalVolumes)}</h4>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-database fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="card text-bg-success">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">Healthy Volumes</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.VolumesWithAllShards)}</h4>
+ <small>Complete (14/14 shards)</small>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-check-circle fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="card text-bg-warning">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">Degraded Volumes</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.VolumesWithMissingShards)}</h4>
+ <small>Incomplete/Critical</small>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-exclamation-triangle fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <!-- Shards Table -->
+ <div class="table-responsive">
+ <table class="table table-striped table-hover" id="ecShardsTable">
+ <thead>
+ <tr>
+ <th>
+ <a href="#" onclick="sortBy('volume_id')" class="text-dark text-decoration-none">
+ Volume ID
+ if data.SortBy == "volume_id" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+
+ if data.ShowCollectionColumn {
+ <th>
+ <a href="#" onclick="sortBy('collection')" class="text-dark text-decoration-none">
+ Collection
+ if data.SortBy == "collection" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ }
+ <th>
+ <a href="#" onclick="sortBy('server')" class="text-dark text-decoration-none">
+ Server
+ if data.SortBy == "server" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ if data.ShowDataCenterColumn {
+ <th>
+ <a href="#" onclick="sortBy('datacenter')" class="text-dark text-decoration-none">
+ Data Center
+ if data.SortBy == "datacenter" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ }
+ if data.ShowRackColumn {
+ <th>
+ <a href="#" onclick="sortBy('rack')" class="text-dark text-decoration-none">
+ Rack
+ if data.SortBy == "rack" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ }
+ <th class="text-dark">Distribution</th>
+ <th class="text-dark">Status</th>
+ <th class="text-dark">Actions</th>
+ </tr>
+ </thead>
+ <tbody>
+ for _, shard := range data.EcShards {
+ <tr>
+ <td>
+ <span class="fw-bold">{fmt.Sprintf("%d", shard.VolumeID)}</span>
+ </td>
+ if data.ShowCollectionColumn {
+ <td>
+ if shard.Collection != "" {
+ <a href="/cluster/ec-shards?collection={shard.Collection}" class="text-decoration-none">
+ <span class="badge bg-info text-white">{shard.Collection}</span>
+ </a>
+ } else {
+ <a href="/cluster/ec-shards?collection=default" class="text-decoration-none">
+ <span class="badge bg-secondary text-white">default</span>
+ </a>
+ }
+ </td>
+ }
+ <td>
+ <code class="small">{shard.Server}</code>
+ </td>
+ if data.ShowDataCenterColumn {
+ <td>
+ <span class="badge bg-outline-primary">{shard.DataCenter}</span>
+ </td>
+ }
+ if data.ShowRackColumn {
+ <td>
+ <span class="badge bg-outline-secondary">{shard.Rack}</span>
+ </td>
+ }
+ <td>
+ @displayShardDistribution(shard, data.EcShards)
+ </td>
+ <td>
+ @displayVolumeStatus(shard)
+ </td>
+ <td>
+ <div class="btn-group" role="group">
+ <button type="button" class="btn btn-sm btn-outline-primary"
+ onclick="showShardDetails(event)"
+ data-volume-id={ fmt.Sprintf("%d", shard.VolumeID) }
+ title="View EC volume details">
+ <i class="fas fa-info-circle"></i>
+ </button>
+ if !shard.IsComplete {
+ <button type="button" class="btn btn-sm btn-outline-warning"
+ onclick="repairVolume(event)"
+ data-volume-id={ fmt.Sprintf("%d", shard.VolumeID) }
+ title="Repair missing shards">
+ <i class="fas fa-wrench"></i>
+ </button>
+ }
+ </div>
+ </td>
+ </tr>
+ }
+ </tbody>
+ </table>
+ </div>
+
+ <!-- Pagination -->
+ if data.TotalPages > 1 {
+ <nav aria-label="EC Shards pagination">
+ <ul class="pagination justify-content-center">
+ if data.CurrentPage > 1 {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.CurrentPage-1) }>
+ <i class="fas fa-chevron-left"></i>
+ </a>
+ </li>
+ }
+
+ <!-- First page -->
+ if data.CurrentPage > 3 {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(1)">1</a>
+ </li>
+ if data.CurrentPage > 4 {
+ <li class="page-item disabled">
+ <span class="page-link">...</span>
+ </li>
+ }
+ }
+
+ <!-- Current page and neighbors -->
+ if data.CurrentPage > 1 && data.CurrentPage-1 >= 1 {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.CurrentPage-1) }>{fmt.Sprintf("%d", data.CurrentPage-1)}</a>
+ </li>
+ }
+
+ <li class="page-item active">
+ <span class="page-link">{fmt.Sprintf("%d", data.CurrentPage)}</span>
+ </li>
+
+ if data.CurrentPage < data.TotalPages && data.CurrentPage+1 <= data.TotalPages {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.CurrentPage+1) }>{fmt.Sprintf("%d", data.CurrentPage+1)}</a>
+ </li>
+ }
+
+ <!-- Last page -->
+ if data.CurrentPage < data.TotalPages-2 {
+ if data.CurrentPage < data.TotalPages-3 {
+ <li class="page-item disabled">
+ <span class="page-link">...</span>
+ </li>
+ }
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.TotalPages) }>{fmt.Sprintf("%d", data.TotalPages)}</a>
+ </li>
+ }
+
+ if data.CurrentPage < data.TotalPages {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.CurrentPage+1) }>
+ <i class="fas fa-chevron-right"></i>
+ </a>
+ </li>
+ }
+ </ul>
+ </nav>
+ }
+
+
+ <!-- JavaScript -->
+ <script>
+ function sortBy(field) {
+ const currentSort = "{data.SortBy}";
+ const currentOrder = "{data.SortOrder}";
+ let newOrder = 'asc';
+
+ if (currentSort === field && currentOrder === 'asc') {
+ newOrder = 'desc';
+ }
+
+ updateUrl({
+ sortBy: field,
+ sortOrder: newOrder,
+ page: 1
+ });
+ }
+
+ function goToPage(event) {
+ // Get data from the link element (not any child elements)
+ const link = event.target.closest('a');
+ const page = link.getAttribute('data-page');
+ updateUrl({ page: page });
+ }
+
+ function changePageSize() {
+ const pageSize = document.getElementById('pageSizeSelect').value;
+ updateUrl({ pageSize: pageSize, page: 1 });
+ }
+
+ function updateUrl(params) {
+ const url = new URL(window.location);
+ Object.keys(params).forEach(key => {
+ if (params[key]) {
+ url.searchParams.set(key, params[key]);
+ } else {
+ url.searchParams.delete(key);
+ }
+ });
+ window.location.href = url.toString();
+ }
+
+ function exportEcShards() {
+ const url = new URL('/api/cluster/ec-shards/export', window.location.origin);
+ const params = new URLSearchParams(window.location.search);
+ params.forEach((value, key) => {
+ url.searchParams.set(key, value);
+ });
+ window.open(url.toString(), '_blank');
+ }
+
+ function showShardDetails(event) {
+ // Get data from the button element (not the icon inside it)
+ const button = event.target.closest('button');
+ const volumeId = button.getAttribute('data-volume-id');
+
+ // Navigate to the EC volume details page
+ window.location.href = `/cluster/ec-volumes/${volumeId}`;
+ }
+
+ function repairVolume(event) {
+ // Get data from the button element (not the icon inside it)
+ const button = event.target.closest('button');
+ const volumeId = button.getAttribute('data-volume-id');
+ if (confirm(`Are you sure you want to repair missing shards for volume ${volumeId}?`)) {
+ fetch(`/api/cluster/volumes/${volumeId}/repair`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ }
+ })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ alert('Repair initiated successfully');
+ location.reload();
+ } else {
+ alert('Failed to initiate repair: ' + data.error);
+ }
+ })
+ .catch(error => {
+ alert('Error: ' + error.message);
+ });
+ }
+ }
+ </script>
+}
+
+// displayShardDistribution shows the distribution summary for a volume's shards
+templ displayShardDistribution(shard dash.EcShardWithInfo, allShards []dash.EcShardWithInfo) {
+ <div class="small">
+ <i class="fas fa-sitemap me-1"></i>
+ { calculateDistributionSummary(shard.VolumeID, allShards) }
+ </div>
+}
+
+// displayVolumeStatus shows an improved status display
+templ displayVolumeStatus(shard dash.EcShardWithInfo) {
+ if shard.IsComplete {
+ <span class="badge bg-success"><i class="fas fa-check me-1"></i>Complete</span>
+ } else {
+ if len(shard.MissingShards) > 10 {
+ <span class="badge bg-danger"><i class="fas fa-skull me-1"></i>Critical ({fmt.Sprintf("%d", len(shard.MissingShards))} missing)</span>
+ } else if len(shard.MissingShards) > 6 {
+ <span class="badge bg-warning"><i class="fas fa-exclamation-triangle me-1"></i>Degraded ({fmt.Sprintf("%d", len(shard.MissingShards))} missing)</span>
+ } else if len(shard.MissingShards) > 2 {
+ <span class="badge bg-warning"><i class="fas fa-info-circle me-1"></i>Incomplete ({fmt.Sprintf("%d", len(shard.MissingShards))} missing)</span>
+ } else {
+ <span class="badge bg-info"><i class="fas fa-info-circle me-1"></i>Minor Issues ({fmt.Sprintf("%d", len(shard.MissingShards))} missing)</span>
+ }
+ }
+}
+
+// calculateDistributionSummary calculates and formats the distribution summary
+func calculateDistributionSummary(volumeID uint32, allShards []dash.EcShardWithInfo) string {
+ dataCenters := make(map[string]bool)
+ racks := make(map[string]bool)
+ servers := make(map[string]bool)
+
+ for _, s := range allShards {
+ if s.VolumeID == volumeID {
+ dataCenters[s.DataCenter] = true
+ racks[s.Rack] = true
+ servers[s.Server] = true
+ }
+ }
+
+ return fmt.Sprintf("%d DCs, %d racks, %d servers", len(dataCenters), len(racks), len(servers))
+}
+
diff --git a/weed/admin/view/app/cluster_ec_shards_templ.go b/weed/admin/view/app/cluster_ec_shards_templ.go
new file mode 100644
index 000000000..3c883a93c
--- /dev/null
+++ b/weed/admin/view/app/cluster_ec_shards_templ.go
@@ -0,0 +1,840 @@
+// Code generated by templ - DO NOT EDIT.
+
+// templ: version: v0.3.906
+package app
+
+//lint:file-ignore SA4006 This context is only used if a nested component is present.
+
+import "github.com/a-h/templ"
+import templruntime "github.com/a-h/templ/runtime"
+
+import (
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/dash"
+)
+
+func ClusterEcShards(data dash.ClusterEcShardsData) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var1 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var1 == nil {
+ templ_7745c5c3_Var1 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom\"><div><h1 class=\"h2\"><i class=\"fas fa-th-large me-2\"></i>EC Shards</h1>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.FilterCollection != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<div class=\"d-flex align-items-center mt-2\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.FilterCollection == "default" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "<span class=\"badge bg-secondary text-white me-2\"><i class=\"fas fa-filter me-1\"></i>Collection: default</span> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "<span class=\"badge bg-info text-white me-2\"><i class=\"fas fa-filter me-1\"></i>Collection: ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var2 string
+ templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.FilterCollection)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 22, Col: 96}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</span> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<a href=\"/cluster/ec-shards\" class=\"btn btn-sm btn-outline-secondary\"><i class=\"fas fa-times me-1\"></i>Clear Filter</a></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "</div><div class=\"btn-toolbar mb-2 mb-md-0\"><div class=\"btn-group me-2\"><select class=\"form-select form-select-sm me-2\" id=\"pageSizeSelect\" onchange=\"changePageSize()\" style=\"width: auto;\"><option value=\"50\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 50 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " selected=\"selected\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, ">50 per page</option> <option value=\"100\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 100 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " selected=\"selected\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, ">100 per page</option> <option value=\"200\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 200 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, " selected=\"selected\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, ">200 per page</option> <option value=\"500\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 500 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, " selected=\"selected\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, ">500 per page</option></select> <button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"exportEcShards()\"><i class=\"fas fa-download me-1\"></i>Export</button></div></div></div><!-- Statistics Cards --><div class=\"row mb-4\"><div class=\"col-md-3\"><div class=\"card text-bg-primary\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Total Shards</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var3 string
+ templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 54, Col: 81}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "</h4></div><div class=\"align-self-center\"><i class=\"fas fa-puzzle-piece fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-info\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">EC Volumes</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var4 string
+ templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 69, Col: 82}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "</h4></div><div class=\"align-self-center\"><i class=\"fas fa-database fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-success\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Healthy Volumes</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var5 string
+ templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumesWithAllShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 84, Col: 90}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "</h4><small>Complete (14/14 shards)</small></div><div class=\"align-self-center\"><i class=\"fas fa-check-circle fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-warning\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Degraded Volumes</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var6 string
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumesWithMissingShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 100, Col: 94}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "</h4><small>Incomplete/Critical</small></div><div class=\"align-self-center\"><i class=\"fas fa-exclamation-triangle fa-2x\"></i></div></div></div></div></div></div><!-- Shards Table --><div class=\"table-responsive\"><table class=\"table table-striped table-hover\" id=\"ecShardsTable\"><thead><tr><th><a href=\"#\" onclick=\"sortBy('volume_id')\" class=\"text-dark text-decoration-none\">Volume ID ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "volume_id" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</a></th>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowCollectionColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "<th><a href=\"#\" onclick=\"sortBy('collection')\" class=\"text-dark text-decoration-none\">Collection ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "collection" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "</a></th>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "<th><a href=\"#\" onclick=\"sortBy('server')\" class=\"text-dark text-decoration-none\">Server ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "server" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</a></th>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowDataCenterColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<th><a href=\"#\" onclick=\"sortBy('datacenter')\" class=\"text-dark text-decoration-none\">Data Center ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "datacenter" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "</a></th>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ if data.ShowRackColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "<th><a href=\"#\" onclick=\"sortBy('rack')\" class=\"text-dark text-decoration-none\">Rack ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "rack" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "</a></th>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<th class=\"text-dark\">Distribution</th><th class=\"text-dark\">Status</th><th class=\"text-dark\">Actions</th></tr></thead> <tbody>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for _, shard := range data.EcShards {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "<tr><td><span class=\"fw-bold\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var7 string
+ templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shard.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 203, Col: 84}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "</span></td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowCollectionColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "<td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if shard.Collection != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "<a href=\"/cluster/ec-shards?collection={shard.Collection}\" class=\"text-decoration-none\"><span class=\"badge bg-info text-white\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var8 string
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Collection)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 209, Col: 96}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "</span></a>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "<a href=\"/cluster/ec-shards?collection=default\" class=\"text-decoration-none\"><span class=\"badge bg-secondary text-white\">default</span></a>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "</td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "<td><code class=\"small\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var9 string
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Server)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 219, Col: 61}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "</code></td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowDataCenterColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "<td><span class=\"badge bg-outline-primary\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var10 string
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(shard.DataCenter)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 223, Col: 88}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</span></td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ if data.ShowRackColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "<td><span class=\"badge bg-outline-secondary\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var11 string
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Rack)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 228, Col: 84}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</span></td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "<td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = displayShardDistribution(shard, data.EcShards).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = displayVolumeStatus(shard).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "</td><td><div class=\"btn-group\" role=\"group\"><button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"showShardDetails(event)\" data-volume-id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shard.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 241, Col: 90}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "\" title=\"View EC volume details\"><i class=\"fas fa-info-circle\"></i></button> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if !shard.IsComplete {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "<button type=\"button\" class=\"btn btn-sm btn-outline-warning\" onclick=\"repairVolume(event)\" data-volume-id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var13 string
+ templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shard.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 248, Col: 94}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "\" title=\"Repair missing shards\"><i class=\"fas fa-wrench\"></i></button>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "</div></td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "</tbody></table></div><!-- Pagination -->")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.TotalPages > 1 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "<nav aria-label=\"EC Shards pagination\"><ul class=\"pagination justify-content-center\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.CurrentPage > 1 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var14 string
+ templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage-1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 267, Col: 129}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "\"><i class=\"fas fa-chevron-left\"></i></a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "<!-- First page -->")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.CurrentPage > 3 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(1)\">1</a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.CurrentPage > 4 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "<!-- Current page and neighbors -->")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.CurrentPage > 1 && data.CurrentPage-1 >= 1 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var15 string
+ templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage-1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 288, Col: 129}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var16 string
+ templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage-1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 288, Col: 170}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "</a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "<li class=\"page-item active\"><span class=\"page-link\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var17 string
+ templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 293, Col: 80}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "</span></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.CurrentPage < data.TotalPages && data.CurrentPage+1 <= data.TotalPages {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var18 string
+ templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 298, Col: 129}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var19 string
+ templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 298, Col: 170}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "</a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, "<!-- Last page -->")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.CurrentPage < data.TotalPages-2 {
+ if data.CurrentPage < data.TotalPages-3 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 82, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 83, " <li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var20 string
+ templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 310, Col: 126}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 84, "\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var21 string
+ templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 310, Col: 164}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 85, "</a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ if data.CurrentPage < data.TotalPages {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var22 string
+ templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 316, Col: 129}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, "\"><i class=\"fas fa-chevron-right\"></i></a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, "</ul></nav>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, "<!-- JavaScript --><script>\n function sortBy(field) {\n const currentSort = \"{data.SortBy}\";\n const currentOrder = \"{data.SortOrder}\";\n let newOrder = 'asc';\n \n if (currentSort === field && currentOrder === 'asc') {\n newOrder = 'desc';\n }\n \n updateUrl({\n sortBy: field,\n sortOrder: newOrder,\n page: 1\n });\n }\n\n function goToPage(event) {\n // Get data from the link element (not any child elements)\n const link = event.target.closest('a');\n const page = link.getAttribute('data-page');\n updateUrl({ page: page });\n }\n\n function changePageSize() {\n const pageSize = document.getElementById('pageSizeSelect').value;\n updateUrl({ pageSize: pageSize, page: 1 });\n }\n\n function updateUrl(params) {\n const url = new URL(window.location);\n Object.keys(params).forEach(key => {\n if (params[key]) {\n url.searchParams.set(key, params[key]);\n } else {\n url.searchParams.delete(key);\n }\n });\n window.location.href = url.toString();\n }\n\n function exportEcShards() {\n const url = new URL('/api/cluster/ec-shards/export', window.location.origin);\n const params = new URLSearchParams(window.location.search);\n params.forEach((value, key) => {\n url.searchParams.set(key, value);\n });\n window.open(url.toString(), '_blank');\n }\n\n function showShardDetails(event) {\n // Get data from the button element (not the icon inside it)\n const button = event.target.closest('button');\n const volumeId = button.getAttribute('data-volume-id');\n \n // Navigate to the EC volume details page\n window.location.href = `/cluster/ec-volumes/${volumeId}`;\n }\n\n function repairVolume(event) {\n // Get data from the button element (not the icon inside it)\n const button = event.target.closest('button');\n const volumeId = button.getAttribute('data-volume-id');\n if (confirm(`Are you sure you want to repair missing shards for volume ${volumeId}?`)) {\n fetch(`/api/cluster/volumes/${volumeId}/repair`, {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n }\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n alert('Repair initiated successfully');\n location.reload();\n } else {\n alert('Failed to initiate repair: ' + data.error);\n }\n })\n .catch(error => {\n alert('Error: ' + error.message);\n });\n }\n }\n </script>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
+// displayShardDistribution shows the distribution summary for a volume's shards
+func displayShardDistribution(shard dash.EcShardWithInfo, allShards []dash.EcShardWithInfo) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var23 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var23 == nil {
+ templ_7745c5c3_Var23 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, "<div class=\"small\"><i class=\"fas fa-sitemap me-1\"></i> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var24 string
+ templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(calculateDistributionSummary(shard.VolumeID, allShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 418, Col: 65}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 91, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
+// displayVolumeStatus shows an improved status display
+func displayVolumeStatus(shard dash.EcShardWithInfo) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var25 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var25 == nil {
+ templ_7745c5c3_Var25 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ if shard.IsComplete {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, "<span class=\"badge bg-success\"><i class=\"fas fa-check me-1\"></i>Complete</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ if len(shard.MissingShards) > 10 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "<span class=\"badge bg-danger\"><i class=\"fas fa-skull me-1\"></i>Critical (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var26 string
+ templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(shard.MissingShards)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 428, Col: 129}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, " missing)</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if len(shard.MissingShards) > 6 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 95, "<span class=\"badge bg-warning\"><i class=\"fas fa-exclamation-triangle me-1\"></i>Degraded (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var27 string
+ templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(shard.MissingShards)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 430, Col: 145}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 96, " missing)</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if len(shard.MissingShards) > 2 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 97, "<span class=\"badge bg-warning\"><i class=\"fas fa-info-circle me-1\"></i>Incomplete (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var28 string
+ templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(shard.MissingShards)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 432, Col: 138}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 98, " missing)</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 99, "<span class=\"badge bg-info\"><i class=\"fas fa-info-circle me-1\"></i>Minor Issues (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var29 string
+ templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(shard.MissingShards)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 434, Col: 137}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 100, " missing)</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ }
+ return nil
+ })
+}
+
+// calculateDistributionSummary calculates and formats the distribution summary
+func calculateDistributionSummary(volumeID uint32, allShards []dash.EcShardWithInfo) string {
+ dataCenters := make(map[string]bool)
+ racks := make(map[string]bool)
+ servers := make(map[string]bool)
+
+ for _, s := range allShards {
+ if s.VolumeID == volumeID {
+ dataCenters[s.DataCenter] = true
+ racks[s.Rack] = true
+ servers[s.Server] = true
+ }
+ }
+
+ return fmt.Sprintf("%d DCs, %d racks, %d servers", len(dataCenters), len(racks), len(servers))
+}
+
+var _ = templruntime.GeneratedTemplate
diff --git a/weed/admin/view/app/cluster_ec_volumes.templ b/weed/admin/view/app/cluster_ec_volumes.templ
new file mode 100644
index 000000000..aafa621aa
--- /dev/null
+++ b/weed/admin/view/app/cluster_ec_volumes.templ
@@ -0,0 +1,775 @@
+package app
+
+import (
+ "fmt"
+ "strings"
+ "github.com/seaweedfs/seaweedfs/weed/admin/dash"
+)
+
+templ ClusterEcVolumes(data dash.ClusterEcVolumesData) {
+<!DOCTYPE html>
+<html lang="en">
+<head>
+ <title>EC Volumes - SeaweedFS</title>
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css" rel="stylesheet">
+ <link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css" rel="stylesheet">
+</head>
+<body>
+ <div class="container-fluid">
+ <div class="row">
+ <div class="col-12">
+ <h2 class="mb-4">
+ <i class="fas fa-database me-2"></i>EC Volumes
+ <small class="text-muted">({fmt.Sprintf("%d", data.TotalVolumes)} volumes)</small>
+ </h2>
+ </div>
+ </div>
+
+ <!-- Statistics Cards -->
+ <div class="row mb-4">
+ <div class="col-md-3">
+ <div class="card text-bg-primary">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">Total Volumes</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.TotalVolumes)}</h4>
+ <small>EC encoded volumes</small>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-cubes fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="card text-bg-info">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">Total Shards</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.TotalShards)}</h4>
+ <small>Distributed shards</small>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-puzzle-piece fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="card text-bg-success">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">Complete Volumes</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.CompleteVolumes)}</h4>
+ <small>All shards present</small>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-check-circle fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="card text-bg-warning">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">Incomplete Volumes</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.IncompleteVolumes)}</h4>
+ <small>Missing shards</small>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-exclamation-triangle fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <!-- EC Storage Information Note -->
+ <div class="alert alert-info mb-4" role="alert">
+ <i class="fas fa-info-circle me-2"></i>
+ <strong>EC Storage Note:</strong>
+ EC volumes use erasure coding (10+4) which stores data across 14 shards with redundancy.
+ Physical storage is approximately 1.4x the original logical data size due to 4 parity shards.
+ </div>
+
+ <!-- Volumes Table -->
+ <div class="d-flex justify-content-between align-items-center mb-3">
+ <div class="d-flex align-items-center">
+ <span class="me-3">
+ Showing {fmt.Sprintf("%d", (data.Page-1)*data.PageSize + 1)} to {fmt.Sprintf("%d", func() int {
+ end := data.Page * data.PageSize
+ if end > data.TotalVolumes {
+ return data.TotalVolumes
+ }
+ return end
+ }())} of {fmt.Sprintf("%d", data.TotalVolumes)} volumes
+ </span>
+
+ <div class="d-flex align-items-center">
+ <label for="pageSize" class="form-label me-2 mb-0">Show:</label>
+ <select id="pageSize" class="form-select form-select-sm" style="width: auto;" onchange="changePageSize(this.value)">
+ <option value="5" if data.PageSize == 5 { selected }>5</option>
+ <option value="10" if data.PageSize == 10 { selected }>10</option>
+ <option value="25" if data.PageSize == 25 { selected }>25</option>
+ <option value="50" if data.PageSize == 50 { selected }>50</option>
+ <option value="100" if data.PageSize == 100 { selected }>100</option>
+ </select>
+ <span class="ms-2">per page</span>
+ </div>
+ </div>
+
+ if data.Collection != "" {
+ <div>
+ if data.Collection == "default" {
+ <span class="badge bg-secondary text-white">Collection: default</span>
+ } else {
+ <span class="badge bg-info text-white">Collection: {data.Collection}</span>
+ }
+ <a href="/cluster/ec-shards" class="btn btn-sm btn-outline-secondary ms-2">Clear Filter</a>
+ </div>
+ }
+ </div>
+
+ <div class="table-responsive">
+ <table class="table table-striped table-hover" id="ecVolumesTable">
+ <thead>
+ <tr>
+ <th>
+ <a href="#" onclick="sortBy('volume_id')" class="text-dark text-decoration-none">
+ Volume ID
+ if data.SortBy == "volume_id" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ if data.ShowCollectionColumn {
+ <th>
+ <a href="#" onclick="sortBy('collection')" class="text-dark text-decoration-none">
+ Collection
+ if data.SortBy == "collection" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ }
+ <th>
+ <a href="#" onclick="sortBy('total_shards')" class="text-dark text-decoration-none">
+ Shard Count
+ if data.SortBy == "total_shards" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ <th class="text-dark">Shard Size</th>
+ <th class="text-dark">Shard Locations</th>
+ <th>
+ <a href="#" onclick="sortBy('completeness')" class="text-dark text-decoration-none">
+ Status
+ if data.SortBy == "completeness" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ if data.ShowDataCenterColumn {
+ <th class="text-dark">Data Centers</th>
+ }
+ <th class="text-dark">Actions</th>
+ </tr>
+ </thead>
+ <tbody>
+ for _, volume := range data.EcVolumes {
+ <tr>
+ <td>
+ <strong>{fmt.Sprintf("%d", volume.VolumeID)}</strong>
+ </td>
+ if data.ShowCollectionColumn {
+ <td>
+ if volume.Collection != "" {
+ <a href="/cluster/ec-shards?collection={volume.Collection}" class="text-decoration-none">
+ <span class="badge bg-info text-white">{volume.Collection}</span>
+ </a>
+ } else {
+ <a href="/cluster/ec-shards?collection=default" class="text-decoration-none">
+ <span class="badge bg-secondary text-white">default</span>
+ </a>
+ }
+ </td>
+ }
+ <td>
+ <span class="badge bg-primary">{fmt.Sprintf("%d/14", volume.TotalShards)}</span>
+ </td>
+ <td>
+ @displayShardSizes(volume.ShardSizes)
+ </td>
+ <td>
+ @displayVolumeDistribution(volume)
+ </td>
+ <td>
+ @displayEcVolumeStatus(volume)
+ </td>
+ if data.ShowDataCenterColumn {
+ <td>
+ for i, dc := range volume.DataCenters {
+ if i > 0 {
+ <span>, </span>
+ }
+ <span class="badge bg-primary text-white">{dc}</span>
+ }
+ </td>
+ }
+ <td>
+ <div class="btn-group" role="group">
+ <button type="button" class="btn btn-sm btn-outline-primary"
+ onclick="showVolumeDetails(event)"
+ data-volume-id={ fmt.Sprintf("%d", volume.VolumeID) }
+ title="View EC volume details">
+ <i class="fas fa-info-circle"></i>
+ </button>
+ if !volume.IsComplete {
+ <button type="button" class="btn btn-sm btn-outline-warning"
+ onclick="repairVolume(event)"
+ data-volume-id={ fmt.Sprintf("%d", volume.VolumeID) }
+ title="Repair missing shards">
+ <i class="fas fa-wrench"></i>
+ </button>
+ }
+ </div>
+ </td>
+ </tr>
+ }
+ </tbody>
+ </table>
+ </div>
+
+ <!-- Pagination -->
+ if data.TotalPages > 1 {
+ <nav aria-label="EC Volumes pagination">
+ <ul class="pagination justify-content-center">
+ if data.Page > 1 {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page="1">First</a>
+ </li>
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.Page-1) }>Previous</a>
+ </li>
+ }
+
+ for i := 1; i <= data.TotalPages; i++ {
+ if i == data.Page {
+ <li class="page-item active">
+ <span class="page-link">{fmt.Sprintf("%d", i)}</span>
+ </li>
+ } else if i <= 3 || i > data.TotalPages-3 || (i >= data.Page-2 && i <= data.Page+2) {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", i) }>{fmt.Sprintf("%d", i)}</a>
+ </li>
+ } else if i == 4 && data.Page > 6 {
+ <li class="page-item disabled">
+ <span class="page-link">...</span>
+ </li>
+ } else if i == data.TotalPages-3 && data.Page < data.TotalPages-5 {
+ <li class="page-item disabled">
+ <span class="page-link">...</span>
+ </li>
+ }
+ }
+
+ if data.Page < data.TotalPages {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.Page+1) }>Next</a>
+ </li>
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.TotalPages) }>Last</a>
+ </li>
+ }
+ </ul>
+ </nav>
+ }
+ </div>
+
+ <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/js/bootstrap.bundle.min.js"></script>
+ <script>
+ // Sorting functionality
+ function sortBy(field) {
+ const currentSort = new URLSearchParams(window.location.search).get('sort_by');
+ const currentOrder = new URLSearchParams(window.location.search).get('sort_order') || 'asc';
+
+ let newOrder = 'asc';
+ if (currentSort === field && currentOrder === 'asc') {
+ newOrder = 'desc';
+ }
+
+ const url = new URL(window.location);
+ url.searchParams.set('sort_by', field);
+ url.searchParams.set('sort_order', newOrder);
+ url.searchParams.set('page', '1'); // Reset to first page
+ window.location.href = url.toString();
+ }
+
+ // Pagination functionality
+ function goToPage(event) {
+ event.preventDefault();
+ const page = event.target.closest('a').getAttribute('data-page');
+ const url = new URL(window.location);
+ url.searchParams.set('page', page);
+ window.location.href = url.toString();
+ }
+
+ // Page size functionality
+ function changePageSize(newPageSize) {
+ const url = new URL(window.location);
+ url.searchParams.set('page_size', newPageSize);
+ url.searchParams.set('page', '1'); // Reset to first page when changing page size
+ window.location.href = url.toString();
+ }
+
+ // Volume details
+ function showVolumeDetails(event) {
+ const volumeId = event.target.closest('button').getAttribute('data-volume-id');
+ window.location.href = `/cluster/ec-volumes/${volumeId}`;
+ }
+
+ // Repair volume
+ function repairVolume(event) {
+ const volumeId = event.target.closest('button').getAttribute('data-volume-id');
+ if (confirm(`Are you sure you want to repair missing shards for volume ${volumeId}?`)) {
+ // TODO: Implement repair functionality
+ alert('Repair functionality will be implemented soon.');
+ }
+ }
+ </script>
+</body>
+</html>
+}
+
+// displayShardLocationsHTML renders shard locations as proper HTML
+templ displayShardLocationsHTML(shardLocations map[int]string) {
+ if len(shardLocations) == 0 {
+ <span class="text-muted">No shards</span>
+ } else {
+ for i, serverInfo := range groupShardsByServer(shardLocations) {
+ if i > 0 {
+ <br/>
+ }
+ <strong>
+ <a href={ templ.URL("/cluster/volume-servers/" + serverInfo.Server) } class="text-primary text-decoration-none">
+ { serverInfo.Server }
+ </a>:
+ </strong> { serverInfo.ShardRanges }
+ }
+ }
+}
+
+// displayShardSizes renders shard sizes in a compact format
+templ displayShardSizes(shardSizes map[int]int64) {
+ if len(shardSizes) == 0 {
+ <span class="text-muted">-</span>
+ } else {
+ @renderShardSizesContent(shardSizes)
+ }
+}
+
+// renderShardSizesContent renders the content of shard sizes
+templ renderShardSizesContent(shardSizes map[int]int64) {
+ if areAllShardSizesSame(shardSizes) {
+ // All shards have the same size, show just the common size
+ <span class="text-success">{getCommonShardSize(shardSizes)}</span>
+ } else {
+ // Shards have different sizes, show individual sizes
+ <div class="shard-sizes" style="max-width: 300px;">
+ { formatIndividualShardSizes(shardSizes) }
+ </div>
+ }
+}
+
+// ServerShardInfo represents server and its shard ranges with sizes
+type ServerShardInfo struct {
+ Server string
+ ShardRanges string
+}
+
+// groupShardsByServer groups shards by server and formats ranges
+func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo {
+ if len(shardLocations) == 0 {
+ return []ServerShardInfo{}
+ }
+
+ // Group shards by server
+ serverShards := make(map[string][]int)
+ for shardId, server := range shardLocations {
+ serverShards[server] = append(serverShards[server], shardId)
+ }
+
+ var serverInfos []ServerShardInfo
+ for server, shards := range serverShards {
+ // Sort shards for each server
+ for i := 0; i < len(shards); i++ {
+ for j := i + 1; j < len(shards); j++ {
+ if shards[i] > shards[j] {
+ shards[i], shards[j] = shards[j], shards[i]
+ }
+ }
+ }
+
+ // Format shard ranges compactly
+ shardRanges := formatShardRanges(shards)
+ serverInfos = append(serverInfos, ServerShardInfo{
+ Server: server,
+ ShardRanges: shardRanges,
+ })
+ }
+
+ // Sort by server name
+ for i := 0; i < len(serverInfos); i++ {
+ for j := i + 1; j < len(serverInfos); j++ {
+ if serverInfos[i].Server > serverInfos[j].Server {
+ serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
+ }
+ }
+ }
+
+ return serverInfos
+}
+
+// groupShardsByServerWithSizes groups shards by server and formats ranges with sizes
+func groupShardsByServerWithSizes(shardLocations map[int]string, shardSizes map[int]int64) []ServerShardInfo {
+ if len(shardLocations) == 0 {
+ return []ServerShardInfo{}
+ }
+
+ // Group shards by server
+ serverShards := make(map[string][]int)
+ for shardId, server := range shardLocations {
+ serverShards[server] = append(serverShards[server], shardId)
+ }
+
+ var serverInfos []ServerShardInfo
+ for server, shards := range serverShards {
+ // Sort shards for each server
+ for i := 0; i < len(shards); i++ {
+ for j := i + 1; j < len(shards); j++ {
+ if shards[i] > shards[j] {
+ shards[i], shards[j] = shards[j], shards[i]
+ }
+ }
+ }
+
+ // Format shard ranges compactly with sizes
+ shardRanges := formatShardRangesWithSizes(shards, shardSizes)
+ serverInfos = append(serverInfos, ServerShardInfo{
+ Server: server,
+ ShardRanges: shardRanges,
+ })
+ }
+
+ // Sort by server name
+ for i := 0; i < len(serverInfos); i++ {
+ for j := i + 1; j < len(serverInfos); j++ {
+ if serverInfos[i].Server > serverInfos[j].Server {
+ serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
+ }
+ }
+ }
+
+ return serverInfos
+}
+
+// Helper function to format shard ranges compactly (e.g., "0-3,7,9-11")
+func formatShardRanges(shards []int) string {
+ if len(shards) == 0 {
+ return ""
+ }
+
+ var ranges []string
+ start := shards[0]
+ end := shards[0]
+
+ for i := 1; i < len(shards); i++ {
+ if shards[i] == end+1 {
+ end = shards[i]
+ } else {
+ if start == end {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+ start = shards[i]
+ end = shards[i]
+ }
+ }
+
+ // Add the last range
+ if start == end {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+
+ return strings.Join(ranges, ",")
+}
+
+// Helper function to format shard ranges with sizes (e.g., "0(1.2MB),1-3(2.5MB),7(800KB)")
+func formatShardRangesWithSizes(shards []int, shardSizes map[int]int64) string {
+ if len(shards) == 0 {
+ return ""
+ }
+
+ var ranges []string
+ start := shards[0]
+ end := shards[0]
+ var totalSize int64
+
+ for i := 1; i < len(shards); i++ {
+ if shards[i] == end+1 {
+ end = shards[i]
+ totalSize += shardSizes[shards[i]]
+ } else {
+ // Add current range with size
+ if start == end {
+ size := shardSizes[start]
+ if size > 0 {
+ ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size)))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ }
+ } else {
+ // Calculate total size for the range
+ rangeSize := shardSizes[start]
+ for j := start + 1; j <= end; j++ {
+ rangeSize += shardSizes[j]
+ }
+ if rangeSize > 0 {
+ ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize)))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+ }
+ start = shards[i]
+ end = shards[i]
+ totalSize = shardSizes[shards[i]]
+ }
+ }
+
+ // Add the last range
+ if start == end {
+ size := shardSizes[start]
+ if size > 0 {
+ ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size)))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ }
+ } else {
+ // Calculate total size for the range
+ rangeSize := shardSizes[start]
+ for j := start + 1; j <= end; j++ {
+ rangeSize += shardSizes[j]
+ }
+ if rangeSize > 0 {
+ ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize)))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+ }
+
+ return strings.Join(ranges, ",")
+}
+
+// Helper function to convert bytes to human readable format
+func bytesToHumanReadable(bytes int64) string {
+ const unit = 1024
+ if bytes < unit {
+ return fmt.Sprintf("%dB", bytes)
+ }
+ div, exp := int64(unit), 0
+ for n := bytes / unit; n >= unit; n /= unit {
+ div *= unit
+ exp++
+ }
+ return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp])
+}
+
+// Helper function to format missing shards
+func formatMissingShards(missingShards []int) string {
+ if len(missingShards) == 0 {
+ return ""
+ }
+
+ var shardStrs []string
+ for _, shard := range missingShards {
+ shardStrs = append(shardStrs, fmt.Sprintf("%d", shard))
+ }
+
+ return strings.Join(shardStrs, ", ")
+}
+
+// Helper function to check if all shard sizes are the same
+func areAllShardSizesSame(shardSizes map[int]int64) bool {
+ if len(shardSizes) <= 1 {
+ return true
+ }
+
+ var firstSize int64 = -1
+ for _, size := range shardSizes {
+ if firstSize == -1 {
+ firstSize = size
+ } else if size != firstSize {
+ return false
+ }
+ }
+ return true
+}
+
+// Helper function to get the common shard size (when all shards are the same size)
+func getCommonShardSize(shardSizes map[int]int64) string {
+ for _, size := range shardSizes {
+ return bytesToHumanReadable(size)
+ }
+ return "-"
+}
+
+// Helper function to format individual shard sizes
+func formatIndividualShardSizes(shardSizes map[int]int64) string {
+ if len(shardSizes) == 0 {
+ return ""
+ }
+
+ // Group shards by size for more compact display
+ sizeGroups := make(map[int64][]int)
+ for shardId, size := range shardSizes {
+ sizeGroups[size] = append(sizeGroups[size], shardId)
+ }
+
+ // If there are only 1-2 different sizes, show them grouped
+ if len(sizeGroups) <= 3 {
+ var groupStrs []string
+ for size, shardIds := range sizeGroups {
+ // Sort shard IDs
+ for i := 0; i < len(shardIds); i++ {
+ for j := i + 1; j < len(shardIds); j++ {
+ if shardIds[i] > shardIds[j] {
+ shardIds[i], shardIds[j] = shardIds[j], shardIds[i]
+ }
+ }
+ }
+
+ var idRanges []string
+ if len(shardIds) <= 4 {
+ // Show individual IDs if few shards
+ for _, id := range shardIds {
+ idRanges = append(idRanges, fmt.Sprintf("%d", id))
+ }
+ } else {
+ // Show count if many shards
+ idRanges = append(idRanges, fmt.Sprintf("%d shards", len(shardIds)))
+ }
+ groupStrs = append(groupStrs, fmt.Sprintf("%s: %s", strings.Join(idRanges, ","), bytesToHumanReadable(size)))
+ }
+ return strings.Join(groupStrs, " | ")
+ }
+
+ // If too many different sizes, show summary
+ return fmt.Sprintf("%d different sizes", len(sizeGroups))
+}
+
+// displayVolumeDistribution shows the distribution summary for a volume
+templ displayVolumeDistribution(volume dash.EcVolumeWithShards) {
+ <div class="small">
+ <i class="fas fa-sitemap me-1"></i>
+ { calculateVolumeDistributionSummary(volume) }
+ </div>
+}
+
+// displayEcVolumeStatus shows an improved status display for EC volumes
+templ displayEcVolumeStatus(volume dash.EcVolumeWithShards) {
+ if volume.IsComplete {
+ <span class="badge bg-success"><i class="fas fa-check me-1"></i>Complete</span>
+ } else {
+ if len(volume.MissingShards) > 10 {
+ <span class="badge bg-danger"><i class="fas fa-skull me-1"></i>Critical ({fmt.Sprintf("%d", len(volume.MissingShards))} missing)</span>
+ } else if len(volume.MissingShards) > 6 {
+ <span class="badge bg-warning"><i class="fas fa-exclamation-triangle me-1"></i>Degraded ({fmt.Sprintf("%d", len(volume.MissingShards))} missing)</span>
+ } else if len(volume.MissingShards) > 2 {
+ <span class="badge bg-warning"><i class="fas fa-info-circle me-1"></i>Incomplete ({fmt.Sprintf("%d", len(volume.MissingShards))} missing)</span>
+ } else {
+ <span class="badge bg-info"><i class="fas fa-info-circle me-1"></i>Minor Issues ({fmt.Sprintf("%d", len(volume.MissingShards))} missing)</span>
+ }
+ }
+}
+
+// calculateVolumeDistributionSummary calculates and formats the distribution summary for a volume
+func calculateVolumeDistributionSummary(volume dash.EcVolumeWithShards) string {
+ dataCenters := make(map[string]bool)
+ racks := make(map[string]bool)
+ servers := make(map[string]bool)
+
+ // Count unique servers from shard locations
+ for _, server := range volume.ShardLocations {
+ servers[server] = true
+ }
+
+ // Use the DataCenters field if available
+ for _, dc := range volume.DataCenters {
+ dataCenters[dc] = true
+ }
+
+ // Use the Servers field if available
+ for _, server := range volume.Servers {
+ servers[server] = true
+ }
+
+ // Use the Racks field if available
+ for _, rack := range volume.Racks {
+ racks[rack] = true
+ }
+
+ // If we don't have rack information, estimate it from servers as fallback
+ rackCount := len(racks)
+ if rackCount == 0 {
+ // Fallback estimation - assume each server might be in a different rack
+ rackCount = len(servers)
+ if len(dataCenters) > 0 {
+ // More conservative estimate if we have DC info
+ rackCount = (len(servers) + len(dataCenters) - 1) / len(dataCenters)
+ if rackCount == 0 {
+ rackCount = 1
+ }
+ }
+ }
+
+ return fmt.Sprintf("%d DCs, %d racks, %d servers", len(dataCenters), rackCount, len(servers))
+} \ No newline at end of file
diff --git a/weed/admin/view/app/cluster_ec_volumes_templ.go b/weed/admin/view/app/cluster_ec_volumes_templ.go
new file mode 100644
index 000000000..419739e7c
--- /dev/null
+++ b/weed/admin/view/app/cluster_ec_volumes_templ.go
@@ -0,0 +1,1313 @@
+// Code generated by templ - DO NOT EDIT.
+
+// templ: version: v0.3.906
+package app
+
+//lint:file-ignore SA4006 This context is only used if a nested component is present.
+
+import "github.com/a-h/templ"
+import templruntime "github.com/a-h/templ/runtime"
+
+import (
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/dash"
+ "strings"
+)
+
+func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var1 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var1 == nil {
+ templ_7745c5c3_Var1 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<!doctype html><html lang=\"en\"><head><title>EC Volumes - SeaweedFS</title><meta charset=\"utf-8\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1\"><link href=\"https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css\" rel=\"stylesheet\"><link href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css\" rel=\"stylesheet\"></head><body><div class=\"container-fluid\"><div class=\"row\"><div class=\"col-12\"><h2 class=\"mb-4\"><i class=\"fas fa-database me-2\"></i>EC Volumes <small class=\"text-muted\">(")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var2 string
+ templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 25, Col: 84}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, " volumes)</small></h2></div></div><!-- Statistics Cards --><div class=\"row mb-4\"><div class=\"col-md-3\"><div class=\"card text-bg-primary\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Total Volumes</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var3 string
+ templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 38, Col: 86}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</h4><small>EC encoded volumes</small></div><div class=\"align-self-center\"><i class=\"fas fa-cubes fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-info\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Total Shards</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var4 string
+ templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 54, Col: 85}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "</h4><small>Distributed shards</small></div><div class=\"align-self-center\"><i class=\"fas fa-puzzle-piece fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-success\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Complete Volumes</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var5 string
+ templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CompleteVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 70, Col: 89}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</h4><small>All shards present</small></div><div class=\"align-self-center\"><i class=\"fas fa-check-circle fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-warning\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Incomplete Volumes</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var6 string
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.IncompleteVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 86, Col: 91}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "</h4><small>Missing shards</small></div><div class=\"align-self-center\"><i class=\"fas fa-exclamation-triangle fa-2x\"></i></div></div></div></div></div></div><!-- EC Storage Information Note --><div class=\"alert alert-info mb-4\" role=\"alert\"><i class=\"fas fa-info-circle me-2\"></i> <strong>EC Storage Note:</strong> EC volumes use erasure coding (10+4) which stores data across 14 shards with redundancy. Physical storage is approximately 1.4x the original logical data size due to 4 parity shards.</div><!-- Volumes Table --><div class=\"d-flex justify-content-between align-items-center mb-3\"><div class=\"d-flex align-items-center\"><span class=\"me-3\">Showing ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var7 string
+ templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", (data.Page-1)*data.PageSize+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 110, Col: 79}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, " to ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var8 string
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", func() int {
+ end := data.Page * data.PageSize
+ if end > data.TotalVolumes {
+ return data.TotalVolumes
+ }
+ return end
+ }()))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 116, Col: 24}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " of ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var9 string
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 116, Col: 66}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, " volumes</span><div class=\"d-flex align-items-center\"><label for=\"pageSize\" class=\"form-label me-2 mb-0\">Show:</label> <select id=\"pageSize\" class=\"form-select form-select-sm\" style=\"width: auto;\" onchange=\"changePageSize(this.value)\"><option value=\"5\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 5 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, ">5</option> <option value=\"10\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 10 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, ">10</option> <option value=\"25\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 25 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, ">25</option> <option value=\"50\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 50 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, ">50</option> <option value=\"100\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 100 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, ">100</option></select> <span class=\"ms-2\">per page</span></div></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Collection != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "<div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Collection == "default" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "<span class=\"badge bg-secondary text-white\">Collection: default</span> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "<span class=\"badge bg-info text-white\">Collection: ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var10 string
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.Collection)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 137, Col: 91}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</span> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "<a href=\"/cluster/ec-shards\" class=\"btn btn-sm btn-outline-secondary ms-2\">Clear Filter</a></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</div><div class=\"table-responsive\"><table class=\"table table-striped table-hover\" id=\"ecVolumesTable\"><thead><tr><th><a href=\"#\" onclick=\"sortBy('volume_id')\" class=\"text-dark text-decoration-none\">Volume ID ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "volume_id" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "</a></th>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowCollectionColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "<th><a href=\"#\" onclick=\"sortBy('collection')\" class=\"text-dark text-decoration-none\">Collection ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "collection" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "</a></th>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "<th><a href=\"#\" onclick=\"sortBy('total_shards')\" class=\"text-dark text-decoration-none\">Shard Count ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "total_shards" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "</a></th><th class=\"text-dark\">Shard Size</th><th class=\"text-dark\">Shard Locations</th><th><a href=\"#\" onclick=\"sortBy('completeness')\" class=\"text-dark text-decoration-none\">Status ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "completeness" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "</a></th>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowDataCenterColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<th class=\"text-dark\">Data Centers</th>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "<th class=\"text-dark\">Actions</th></tr></thead> <tbody>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for _, volume := range data.EcVolumes {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<tr><td><strong>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var11 string
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 218, Col: 75}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "</strong></td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowCollectionColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "<td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if volume.Collection != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "<a href=\"/cluster/ec-shards?collection={volume.Collection}\" class=\"text-decoration-none\"><span class=\"badge bg-info text-white\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Collection)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 224, Col: 101}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "</span></a>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "<a href=\"/cluster/ec-shards?collection=default\" class=\"text-decoration-none\"><span class=\"badge bg-secondary text-white\">default</span></a>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "</td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "<td><span class=\"badge bg-primary\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var13 string
+ templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/14", volume.TotalShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 234, Col: 104}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "</span></td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = displayShardSizes(volume.ShardSizes).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = displayVolumeDistribution(volume).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = displayEcVolumeStatus(volume).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowDataCenterColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "<td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for i, dc := range volume.DataCenters {
+ if i > 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "<span>, </span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, " <span class=\"badge bg-primary text-white\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var14 string
+ templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(dc)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 251, Col: 85}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "</td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "<td><div class=\"btn-group\" role=\"group\"><button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"showVolumeDetails(event)\" data-volume-id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var15 string
+ templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 259, Col: 95}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "\" title=\"View EC volume details\"><i class=\"fas fa-info-circle\"></i></button> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if !volume.IsComplete {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "<button type=\"button\" class=\"btn btn-sm btn-outline-warning\" onclick=\"repairVolume(event)\" data-volume-id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var16 string
+ templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 266, Col: 99}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "\" title=\"Repair missing shards\"><i class=\"fas fa-wrench\"></i></button>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "</div></td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "</tbody></table></div><!-- Pagination -->")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.TotalPages > 1 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "<nav aria-label=\"EC Volumes pagination\"><ul class=\"pagination justify-content-center\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Page > 1 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"1\">First</a></li><li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var17 string
+ templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Page-1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 288, Col: 126}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "\">Previous</a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ for i := 1; i <= data.TotalPages; i++ {
+ if i == data.Page {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "<li class=\"page-item active\"><span class=\"page-link\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var18 string
+ templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", i))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 295, Col: 77}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "</span></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if i <= 3 || i > data.TotalPages-3 || (i >= data.Page-2 && i <= data.Page+2) {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var19 string
+ templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", i))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 299, Col: 120}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var20 string
+ templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", i))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 299, Col: 144}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "</a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if i == 4 && data.Page > 6 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if i == data.TotalPages-3 && data.Page < data.TotalPages-5 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ }
+ if data.Page < data.TotalPages {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var21 string
+ templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Page+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 314, Col: 126}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "\">Next</a></li><li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var22 string
+ templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 317, Col: 130}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, "\">Last</a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 82, "</ul></nav>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 83, "</div><script src=\"https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/js/bootstrap.bundle.min.js\"></script><script>\n // Sorting functionality\n function sortBy(field) {\n const currentSort = new URLSearchParams(window.location.search).get('sort_by');\n const currentOrder = new URLSearchParams(window.location.search).get('sort_order') || 'asc';\n \n let newOrder = 'asc';\n if (currentSort === field && currentOrder === 'asc') {\n newOrder = 'desc';\n }\n \n const url = new URL(window.location);\n url.searchParams.set('sort_by', field);\n url.searchParams.set('sort_order', newOrder);\n url.searchParams.set('page', '1'); // Reset to first page\n window.location.href = url.toString();\n }\n\n // Pagination functionality\n function goToPage(event) {\n event.preventDefault();\n const page = event.target.closest('a').getAttribute('data-page');\n const url = new URL(window.location);\n url.searchParams.set('page', page);\n window.location.href = url.toString();\n }\n\n // Page size functionality\n function changePageSize(newPageSize) {\n const url = new URL(window.location);\n url.searchParams.set('page_size', newPageSize);\n url.searchParams.set('page', '1'); // Reset to first page when changing page size\n window.location.href = url.toString();\n }\n\n // Volume details\n function showVolumeDetails(event) {\n const volumeId = event.target.closest('button').getAttribute('data-volume-id');\n window.location.href = `/cluster/ec-volumes/${volumeId}`;\n }\n\n // Repair volume\n function repairVolume(event) {\n const volumeId = event.target.closest('button').getAttribute('data-volume-id');\n if (confirm(`Are you sure you want to repair missing shards for volume ${volumeId}?`)) {\n // TODO: Implement repair functionality\n alert('Repair functionality will be implemented soon.');\n }\n }\n </script></body></html>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
+// displayShardLocationsHTML renders shard locations as proper HTML
+func displayShardLocationsHTML(shardLocations map[int]string) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var23 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var23 == nil {
+ templ_7745c5c3_Var23 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ if len(shardLocations) == 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 84, "<span class=\"text-muted\">No shards</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ for i, serverInfo := range groupShardsByServer(shardLocations) {
+ if i > 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 85, "<br>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, " <strong><a href=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var24 templ.SafeURL
+ templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinURLErrs(templ.URL("/cluster/volume-servers/" + serverInfo.Server))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 390, Col: 71}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, "\" class=\"text-primary text-decoration-none\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var25 string
+ templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(serverInfo.Server)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 391, Col: 24}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, "</a>:</strong> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var26 string
+ templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(serverInfo.ShardRanges)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 393, Col: 37}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ }
+ return nil
+ })
+}
+
+// displayShardSizes renders shard sizes in a compact format
+func displayShardSizes(shardSizes map[int]int64) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var27 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var27 == nil {
+ templ_7745c5c3_Var27 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ if len(shardSizes) == 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, "<span class=\"text-muted\">-</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = renderShardSizesContent(shardSizes).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ return nil
+ })
+}
+
+// renderShardSizesContent renders the content of shard sizes
+func renderShardSizesContent(shardSizes map[int]int64) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var28 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var28 == nil {
+ templ_7745c5c3_Var28 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ if areAllShardSizesSame(shardSizes) {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, " <span class=\"text-success\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var29 string
+ templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(getCommonShardSize(shardSizes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 411, Col: 60}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 91, "</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, " <div class=\"shard-sizes\" style=\"max-width: 300px;\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var30 string
+ templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(formatIndividualShardSizes(shardSizes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 415, Col: 43}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ return nil
+ })
+}
+
+// ServerShardInfo represents server and its shard ranges with sizes
+type ServerShardInfo struct {
+ Server string
+ ShardRanges string
+}
+
+// groupShardsByServer groups shards by server and formats ranges
+func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo {
+ if len(shardLocations) == 0 {
+ return []ServerShardInfo{}
+ }
+
+ // Group shards by server
+ serverShards := make(map[string][]int)
+ for shardId, server := range shardLocations {
+ serverShards[server] = append(serverShards[server], shardId)
+ }
+
+ var serverInfos []ServerShardInfo
+ for server, shards := range serverShards {
+ // Sort shards for each server
+ for i := 0; i < len(shards); i++ {
+ for j := i + 1; j < len(shards); j++ {
+ if shards[i] > shards[j] {
+ shards[i], shards[j] = shards[j], shards[i]
+ }
+ }
+ }
+
+ // Format shard ranges compactly
+ shardRanges := formatShardRanges(shards)
+ serverInfos = append(serverInfos, ServerShardInfo{
+ Server: server,
+ ShardRanges: shardRanges,
+ })
+ }
+
+ // Sort by server name
+ for i := 0; i < len(serverInfos); i++ {
+ for j := i + 1; j < len(serverInfos); j++ {
+ if serverInfos[i].Server > serverInfos[j].Server {
+ serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
+ }
+ }
+ }
+
+ return serverInfos
+}
+
+// groupShardsByServerWithSizes groups shards by server and formats ranges with sizes
+func groupShardsByServerWithSizes(shardLocations map[int]string, shardSizes map[int]int64) []ServerShardInfo {
+ if len(shardLocations) == 0 {
+ return []ServerShardInfo{}
+ }
+
+ // Group shards by server
+ serverShards := make(map[string][]int)
+ for shardId, server := range shardLocations {
+ serverShards[server] = append(serverShards[server], shardId)
+ }
+
+ var serverInfos []ServerShardInfo
+ for server, shards := range serverShards {
+ // Sort shards for each server
+ for i := 0; i < len(shards); i++ {
+ for j := i + 1; j < len(shards); j++ {
+ if shards[i] > shards[j] {
+ shards[i], shards[j] = shards[j], shards[i]
+ }
+ }
+ }
+
+ // Format shard ranges compactly with sizes
+ shardRanges := formatShardRangesWithSizes(shards, shardSizes)
+ serverInfos = append(serverInfos, ServerShardInfo{
+ Server: server,
+ ShardRanges: shardRanges,
+ })
+ }
+
+ // Sort by server name
+ for i := 0; i < len(serverInfos); i++ {
+ for j := i + 1; j < len(serverInfos); j++ {
+ if serverInfos[i].Server > serverInfos[j].Server {
+ serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
+ }
+ }
+ }
+
+ return serverInfos
+}
+
+// Helper function to format shard ranges compactly (e.g., "0-3,7,9-11")
+func formatShardRanges(shards []int) string {
+ if len(shards) == 0 {
+ return ""
+ }
+
+ var ranges []string
+ start := shards[0]
+ end := shards[0]
+
+ for i := 1; i < len(shards); i++ {
+ if shards[i] == end+1 {
+ end = shards[i]
+ } else {
+ if start == end {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+ start = shards[i]
+ end = shards[i]
+ }
+ }
+
+ // Add the last range
+ if start == end {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+
+ return strings.Join(ranges, ",")
+}
+
+// Helper function to format shard ranges with sizes (e.g., "0(1.2MB),1-3(2.5MB),7(800KB)")
+func formatShardRangesWithSizes(shards []int, shardSizes map[int]int64) string {
+ if len(shards) == 0 {
+ return ""
+ }
+
+ var ranges []string
+ start := shards[0]
+ end := shards[0]
+ var totalSize int64
+
+ for i := 1; i < len(shards); i++ {
+ if shards[i] == end+1 {
+ end = shards[i]
+ totalSize += shardSizes[shards[i]]
+ } else {
+ // Add current range with size
+ if start == end {
+ size := shardSizes[start]
+ if size > 0 {
+ ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size)))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ }
+ } else {
+ // Calculate total size for the range
+ rangeSize := shardSizes[start]
+ for j := start + 1; j <= end; j++ {
+ rangeSize += shardSizes[j]
+ }
+ if rangeSize > 0 {
+ ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize)))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+ }
+ start = shards[i]
+ end = shards[i]
+ totalSize = shardSizes[shards[i]]
+ }
+ }
+
+ // Add the last range
+ if start == end {
+ size := shardSizes[start]
+ if size > 0 {
+ ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size)))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ }
+ } else {
+ // Calculate total size for the range
+ rangeSize := shardSizes[start]
+ for j := start + 1; j <= end; j++ {
+ rangeSize += shardSizes[j]
+ }
+ if rangeSize > 0 {
+ ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize)))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+ }
+
+ return strings.Join(ranges, ",")
+}
+
+// Helper function to convert bytes to human readable format
+func bytesToHumanReadable(bytes int64) string {
+ const unit = 1024
+ if bytes < unit {
+ return fmt.Sprintf("%dB", bytes)
+ }
+ div, exp := int64(unit), 0
+ for n := bytes / unit; n >= unit; n /= unit {
+ div *= unit
+ exp++
+ }
+ return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp])
+}
+
+// Helper function to format missing shards
+func formatMissingShards(missingShards []int) string {
+ if len(missingShards) == 0 {
+ return ""
+ }
+
+ var shardStrs []string
+ for _, shard := range missingShards {
+ shardStrs = append(shardStrs, fmt.Sprintf("%d", shard))
+ }
+
+ return strings.Join(shardStrs, ", ")
+}
+
+// Helper function to check if all shard sizes are the same
+func areAllShardSizesSame(shardSizes map[int]int64) bool {
+ if len(shardSizes) <= 1 {
+ return true
+ }
+
+ var firstSize int64 = -1
+ for _, size := range shardSizes {
+ if firstSize == -1 {
+ firstSize = size
+ } else if size != firstSize {
+ return false
+ }
+ }
+ return true
+}
+
+// Helper function to get the common shard size (when all shards are the same size)
+func getCommonShardSize(shardSizes map[int]int64) string {
+ for _, size := range shardSizes {
+ return bytesToHumanReadable(size)
+ }
+ return "-"
+}
+
+// Helper function to format individual shard sizes
+func formatIndividualShardSizes(shardSizes map[int]int64) string {
+ if len(shardSizes) == 0 {
+ return ""
+ }
+
+ // Group shards by size for more compact display
+ sizeGroups := make(map[int64][]int)
+ for shardId, size := range shardSizes {
+ sizeGroups[size] = append(sizeGroups[size], shardId)
+ }
+
+ // If there are only 1-2 different sizes, show them grouped
+ if len(sizeGroups) <= 3 {
+ var groupStrs []string
+ for size, shardIds := range sizeGroups {
+ // Sort shard IDs
+ for i := 0; i < len(shardIds); i++ {
+ for j := i + 1; j < len(shardIds); j++ {
+ if shardIds[i] > shardIds[j] {
+ shardIds[i], shardIds[j] = shardIds[j], shardIds[i]
+ }
+ }
+ }
+
+ var idRanges []string
+ if len(shardIds) <= 4 {
+ // Show individual IDs if few shards
+ for _, id := range shardIds {
+ idRanges = append(idRanges, fmt.Sprintf("%d", id))
+ }
+ } else {
+ // Show count if many shards
+ idRanges = append(idRanges, fmt.Sprintf("%d shards", len(shardIds)))
+ }
+ groupStrs = append(groupStrs, fmt.Sprintf("%s: %s", strings.Join(idRanges, ","), bytesToHumanReadable(size)))
+ }
+ return strings.Join(groupStrs, " | ")
+ }
+
+ // If too many different sizes, show summary
+ return fmt.Sprintf("%d different sizes", len(sizeGroups))
+}
+
+// displayVolumeDistribution shows the distribution summary for a volume
+func displayVolumeDistribution(volume dash.EcVolumeWithShards) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var31 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var31 == nil {
+ templ_7745c5c3_Var31 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, "<div class=\"small\"><i class=\"fas fa-sitemap me-1\"></i> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var32 string
+ templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(calculateVolumeDistributionSummary(volume))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 713, Col: 52}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 95, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
+// displayEcVolumeStatus shows an improved status display for EC volumes
+func displayEcVolumeStatus(volume dash.EcVolumeWithShards) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var33 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var33 == nil {
+ templ_7745c5c3_Var33 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ if volume.IsComplete {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 96, "<span class=\"badge bg-success\"><i class=\"fas fa-check me-1\"></i>Complete</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ if len(volume.MissingShards) > 10 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 97, "<span class=\"badge bg-danger\"><i class=\"fas fa-skull me-1\"></i>Critical (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var34 string
+ templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 723, Col: 130}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 98, " missing)</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if len(volume.MissingShards) > 6 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 99, "<span class=\"badge bg-warning\"><i class=\"fas fa-exclamation-triangle me-1\"></i>Degraded (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var35 string
+ templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 725, Col: 146}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 100, " missing)</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if len(volume.MissingShards) > 2 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 101, "<span class=\"badge bg-warning\"><i class=\"fas fa-info-circle me-1\"></i>Incomplete (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var36 string
+ templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 727, Col: 139}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 102, " missing)</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 103, "<span class=\"badge bg-info\"><i class=\"fas fa-info-circle me-1\"></i>Minor Issues (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var37 string
+ templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 729, Col: 138}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 104, " missing)</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ }
+ return nil
+ })
+}
+
+// calculateVolumeDistributionSummary calculates and formats the distribution summary for a volume
+func calculateVolumeDistributionSummary(volume dash.EcVolumeWithShards) string {
+ dataCenters := make(map[string]bool)
+ racks := make(map[string]bool)
+ servers := make(map[string]bool)
+
+ // Count unique servers from shard locations
+ for _, server := range volume.ShardLocations {
+ servers[server] = true
+ }
+
+ // Use the DataCenters field if available
+ for _, dc := range volume.DataCenters {
+ dataCenters[dc] = true
+ }
+
+ // Use the Servers field if available
+ for _, server := range volume.Servers {
+ servers[server] = true
+ }
+
+ // Use the Racks field if available
+ for _, rack := range volume.Racks {
+ racks[rack] = true
+ }
+
+ // If we don't have rack information, estimate it from servers as fallback
+ rackCount := len(racks)
+ if rackCount == 0 {
+ // Fallback estimation - assume each server might be in a different rack
+ rackCount = len(servers)
+ if len(dataCenters) > 0 {
+ // More conservative estimate if we have DC info
+ rackCount = (len(servers) + len(dataCenters) - 1) / len(dataCenters)
+ if rackCount == 0 {
+ rackCount = 1
+ }
+ }
+ }
+
+ return fmt.Sprintf("%d DCs, %d racks, %d servers", len(dataCenters), rackCount, len(servers))
+}
+
+var _ = templruntime.GeneratedTemplate
diff --git a/weed/admin/view/app/cluster_volumes.templ b/weed/admin/view/app/cluster_volumes.templ
index b4e1b8fd3..1d84ad0cb 100644
--- a/weed/admin/view/app/cluster_volumes.templ
+++ b/weed/admin/view/app/cluster_volumes.templ
@@ -277,7 +277,7 @@ templ ClusterVolumes(data dash.ClusterVolumesData) {
@getSortIcon("size", data.SortBy, data.SortOrder)
</a>
</th>
- <th>Storage Usage</th>
+ <th>Volume Utilization</th>
<th>
<a href="#" onclick="sortTable('filecount')" class="text-decoration-none text-dark">
File Count
diff --git a/weed/admin/view/app/cluster_volumes_templ.go b/weed/admin/view/app/cluster_volumes_templ.go
index c82da952c..b10365256 100644
--- a/weed/admin/view/app/cluster_volumes_templ.go
+++ b/weed/admin/view/app/cluster_volumes_templ.go
@@ -399,7 +399,7 @@ func ClusterVolumes(data dash.ClusterVolumesData) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "</a></th><th>Storage Usage</th><th><a href=\"#\" onclick=\"sortTable('filecount')\" class=\"text-decoration-none text-dark\">File Count")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "</a></th><th>Volume Utilization</th><th><a href=\"#\" onclick=\"sortTable('filecount')\" class=\"text-decoration-none text-dark\">File Count")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
diff --git a/weed/admin/view/app/collection_details.templ b/weed/admin/view/app/collection_details.templ
new file mode 100644
index 000000000..bd11cca81
--- /dev/null
+++ b/weed/admin/view/app/collection_details.templ
@@ -0,0 +1,371 @@
+package app
+
+import (
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/dash"
+ "github.com/seaweedfs/seaweedfs/weed/util"
+)
+
+templ CollectionDetails(data dash.CollectionDetailsData) {
+ <div class="d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom">
+ <div>
+ <h1 class="h2">
+ <i class="fas fa-layer-group me-2"></i>Collection Details: {data.CollectionName}
+ </h1>
+ <nav aria-label="breadcrumb">
+ <ol class="breadcrumb">
+ <li class="breadcrumb-item"><a href="/admin" class="text-decoration-none">Dashboard</a></li>
+ <li class="breadcrumb-item"><a href="/cluster/collections" class="text-decoration-none">Collections</a></li>
+ <li class="breadcrumb-item active" aria-current="page">{data.CollectionName}</li>
+ </ol>
+ </nav>
+ </div>
+ <div class="btn-toolbar mb-2 mb-md-0">
+ <div class="btn-group me-2">
+ <button type="button" class="btn btn-sm btn-outline-secondary" onclick="history.back()">
+ <i class="fas fa-arrow-left me-1"></i>Back
+ </button>
+ <button type="button" class="btn btn-sm btn-outline-primary" onclick="window.location.reload()">
+ <i class="fas fa-refresh me-1"></i>Refresh
+ </button>
+ </div>
+ </div>
+ </div>
+
+ <!-- Collection Summary -->
+ <div class="row mb-4">
+ <div class="col-md-3">
+ <div class="card text-bg-primary">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">Regular Volumes</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.TotalVolumes)}</h4>
+ <small>Traditional volumes</small>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-database fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="card text-bg-info">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">EC Volumes</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.TotalEcVolumes)}</h4>
+ <small>Erasure coded volumes</small>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-th-large fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="card text-bg-success">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">Total Files</h6>
+ <h4 class="mb-0">{fmt.Sprintf("%d", data.TotalFiles)}</h4>
+ <small>Files stored</small>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-file fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="card text-bg-warning">
+ <div class="card-body">
+ <div class="d-flex justify-content-between">
+ <div>
+ <h6 class="card-title">Total Size (Logical)</h6>
+ <h4 class="mb-0">{util.BytesToHumanReadable(uint64(data.TotalSize))}</h4>
+ <small>Data stored (regular volumes only)</small>
+ </div>
+ <div class="align-self-center">
+ <i class="fas fa-hdd fa-2x"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <!-- Size Information Note -->
+ <div class="alert alert-info" role="alert">
+ <i class="fas fa-info-circle me-2"></i>
+ <strong>Size Information:</strong>
+ Logical size represents the actual data stored (regular volumes only).
+ EC volumes show shard counts instead of size - physical storage for EC volumes is approximately 1.4x the original data due to erasure coding redundancy.
+ </div>
+
+ <!-- Pagination Info -->
+ <div class="d-flex justify-content-between align-items-center mb-3">
+ <div class="d-flex align-items-center">
+ <span class="me-3">
+ Showing {fmt.Sprintf("%d", (data.Page-1)*data.PageSize + 1)} to {fmt.Sprintf("%d", func() int {
+ end := data.Page * data.PageSize
+ totalItems := data.TotalVolumes + data.TotalEcVolumes
+ if end > totalItems {
+ return totalItems
+ }
+ return end
+ }())} of {fmt.Sprintf("%d", data.TotalVolumes + data.TotalEcVolumes)} items
+ </span>
+
+ <div class="d-flex align-items-center">
+ <label for="pageSize" class="form-label me-2 mb-0">Show:</label>
+ <select id="pageSize" class="form-select form-select-sm" style="width: auto;" onchange="changePageSize(this.value)">
+ <option value="10" if data.PageSize == 10 { selected }>10</option>
+ <option value="25" if data.PageSize == 25 { selected }>25</option>
+ <option value="50" if data.PageSize == 50 { selected }>50</option>
+ <option value="100" if data.PageSize == 100 { selected }>100</option>
+ </select>
+ <span class="ms-2">per page</span>
+ </div>
+ </div>
+ </div>
+
+ <!-- Volumes Table -->
+ <div class="table-responsive">
+ <table class="table table-striped table-hover" id="volumesTable">
+ <thead>
+ <tr>
+ <th>
+ <a href="#" onclick="sortBy('volume_id')" class="text-dark text-decoration-none">
+ Volume ID
+ if data.SortBy == "volume_id" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ <th>
+ <a href="#" onclick="sortBy('type')" class="text-dark text-decoration-none">
+ Type
+ if data.SortBy == "type" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ <th class="text-dark">Logical Size / Shard Count</th>
+ <th class="text-dark">Files</th>
+ <th class="text-dark">Status</th>
+ <th class="text-dark">Actions</th>
+ </tr>
+ </thead>
+ <tbody>
+ // Display regular volumes
+ for _, volume := range data.RegularVolumes {
+ <tr>
+ <td>
+ <strong>{fmt.Sprintf("%d", volume.Id)}</strong>
+ </td>
+ <td>
+ <span class="badge bg-primary">
+ <i class="fas fa-database me-1"></i>Regular
+ </span>
+ </td>
+ <td>
+ {util.BytesToHumanReadable(volume.Size)}
+ </td>
+ <td>
+ {fmt.Sprintf("%d", volume.FileCount)}
+ </td>
+ <td>
+ if volume.ReadOnly {
+ <span class="badge bg-warning">Read Only</span>
+ } else {
+ <span class="badge bg-success">Read/Write</span>
+ }
+ </td>
+ <td>
+ <div class="btn-group" role="group">
+ <button type="button" class="btn btn-sm btn-outline-primary"
+ onclick="showVolumeDetails(event)"
+ data-volume-id={ fmt.Sprintf("%d", volume.Id) }
+ data-server={ volume.Server }
+ title="View volume details">
+ <i class="fas fa-info-circle"></i>
+ </button>
+ </div>
+ </td>
+ </tr>
+ }
+
+ // Display EC volumes
+ for _, ecVolume := range data.EcVolumes {
+ <tr>
+ <td>
+ <strong>{fmt.Sprintf("%d", ecVolume.VolumeID)}</strong>
+ </td>
+ <td>
+ <span class="badge bg-info">
+ <i class="fas fa-th-large me-1"></i>EC
+ </span>
+ </td>
+ <td>
+ <span class="badge bg-primary">{fmt.Sprintf("%d/14", ecVolume.TotalShards)}</span>
+ </td>
+ <td>
+ <span class="text-muted">-</span>
+ </td>
+ <td>
+ if ecVolume.IsComplete {
+ <span class="badge bg-success">
+ <i class="fas fa-check me-1"></i>Complete
+ </span>
+ } else {
+ <span class="badge bg-warning">
+ <i class="fas fa-exclamation-triangle me-1"></i>
+ Missing {fmt.Sprintf("%d", len(ecVolume.MissingShards))} shards
+ </span>
+ }
+ </td>
+ <td>
+ <div class="btn-group" role="group">
+ <button type="button" class="btn btn-sm btn-outline-info"
+ onclick="showEcVolumeDetails(event)"
+ data-volume-id={ fmt.Sprintf("%d", ecVolume.VolumeID) }
+ title="View EC volume details">
+ <i class="fas fa-info-circle"></i>
+ </button>
+ if !ecVolume.IsComplete {
+ <button type="button" class="btn btn-sm btn-outline-warning"
+ onclick="repairEcVolume(event)"
+ data-volume-id={ fmt.Sprintf("%d", ecVolume.VolumeID) }
+ title="Repair missing shards">
+ <i class="fas fa-wrench"></i>
+ </button>
+ }
+ </div>
+ </td>
+ </tr>
+ }
+ </tbody>
+ </table>
+ </div>
+
+ <!-- Pagination -->
+ if data.TotalPages > 1 {
+ <nav aria-label="Collection volumes pagination">
+ <ul class="pagination justify-content-center">
+ if data.Page > 1 {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page="1">First</a>
+ </li>
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.Page-1) }>Previous</a>
+ </li>
+ }
+
+ for i := 1; i <= data.TotalPages; i++ {
+ if i == data.Page {
+ <li class="page-item active">
+ <span class="page-link">{fmt.Sprintf("%d", i)}</span>
+ </li>
+ } else if i <= 3 || i > data.TotalPages-3 || (i >= data.Page-2 && i <= data.Page+2) {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", i) }>{fmt.Sprintf("%d", i)}</a>
+ </li>
+ } else if i == 4 && data.Page > 6 {
+ <li class="page-item disabled">
+ <span class="page-link">...</span>
+ </li>
+ } else if i == data.TotalPages-3 && data.Page < data.TotalPages-5 {
+ <li class="page-item disabled">
+ <span class="page-link">...</span>
+ </li>
+ }
+ }
+
+ if data.Page < data.TotalPages {
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.Page+1) }>Next</a>
+ </li>
+ <li class="page-item">
+ <a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.TotalPages) }>Last</a>
+ </li>
+ }
+ </ul>
+ </nav>
+ }
+
+ <script>
+ // Sorting functionality
+ function sortBy(field) {
+ const currentSort = new URLSearchParams(window.location.search).get('sort_by');
+ const currentOrder = new URLSearchParams(window.location.search).get('sort_order') || 'asc';
+
+ let newOrder = 'asc';
+ if (currentSort === field && currentOrder === 'asc') {
+ newOrder = 'desc';
+ }
+
+ const url = new URL(window.location);
+ url.searchParams.set('sort_by', field);
+ url.searchParams.set('sort_order', newOrder);
+ url.searchParams.set('page', '1'); // Reset to first page
+ window.location.href = url.toString();
+ }
+
+ // Pagination functionality
+ function goToPage(event) {
+ event.preventDefault();
+ const page = event.target.closest('a').getAttribute('data-page');
+ const url = new URL(window.location);
+ url.searchParams.set('page', page);
+ window.location.href = url.toString();
+ }
+
+ // Page size functionality
+ function changePageSize(newPageSize) {
+ const url = new URL(window.location);
+ url.searchParams.set('page_size', newPageSize);
+ url.searchParams.set('page', '1'); // Reset to first page when changing page size
+ window.location.href = url.toString();
+ }
+
+ // Volume details
+ function showVolumeDetails(event) {
+ const volumeId = event.target.closest('button').getAttribute('data-volume-id');
+ const server = event.target.closest('button').getAttribute('data-server');
+ window.location.href = `/cluster/volumes/${volumeId}/${server}`;
+ }
+
+ // EC Volume details
+ function showEcVolumeDetails(event) {
+ const volumeId = event.target.closest('button').getAttribute('data-volume-id');
+ window.location.href = `/cluster/ec-volumes/${volumeId}`;
+ }
+
+ // Repair EC Volume
+ function repairEcVolume(event) {
+ const volumeId = event.target.closest('button').getAttribute('data-volume-id');
+ if (confirm(`Are you sure you want to repair missing shards for EC volume ${volumeId}?`)) {
+ // TODO: Implement repair functionality
+ alert('Repair functionality will be implemented soon.');
+ }
+ }
+ </script>
+} \ No newline at end of file
diff --git a/weed/admin/view/app/collection_details_templ.go b/weed/admin/view/app/collection_details_templ.go
new file mode 100644
index 000000000..bb1ed9e36
--- /dev/null
+++ b/weed/admin/view/app/collection_details_templ.go
@@ -0,0 +1,567 @@
+// Code generated by templ - DO NOT EDIT.
+
+// templ: version: v0.3.906
+package app
+
+//lint:file-ignore SA4006 This context is only used if a nested component is present.
+
+import "github.com/a-h/templ"
+import templruntime "github.com/a-h/templ/runtime"
+
+import (
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/dash"
+ "github.com/seaweedfs/seaweedfs/weed/util"
+)
+
+func CollectionDetails(data dash.CollectionDetailsData) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var1 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var1 == nil {
+ templ_7745c5c3_Var1 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom\"><div><h1 class=\"h2\"><i class=\"fas fa-layer-group me-2\"></i>Collection Details: ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var2 string
+ templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.CollectionName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 13, Col: 83}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "</h1><nav aria-label=\"breadcrumb\"><ol class=\"breadcrumb\"><li class=\"breadcrumb-item\"><a href=\"/admin\" class=\"text-decoration-none\">Dashboard</a></li><li class=\"breadcrumb-item\"><a href=\"/cluster/collections\" class=\"text-decoration-none\">Collections</a></li><li class=\"breadcrumb-item active\" aria-current=\"page\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var3 string
+ templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(data.CollectionName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 19, Col: 80}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</li></ol></nav></div><div class=\"btn-toolbar mb-2 mb-md-0\"><div class=\"btn-group me-2\"><button type=\"button\" class=\"btn btn-sm btn-outline-secondary\" onclick=\"history.back()\"><i class=\"fas fa-arrow-left me-1\"></i>Back</button> <button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"window.location.reload()\"><i class=\"fas fa-refresh me-1\"></i>Refresh</button></div></div></div><!-- Collection Summary --><div class=\"row mb-4\"><div class=\"col-md-3\"><div class=\"card text-bg-primary\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Regular Volumes</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var4 string
+ templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 43, Col: 61}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "</h4><small>Traditional volumes</small></div><div class=\"align-self-center\"><i class=\"fas fa-database fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-info\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">EC Volumes</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var5 string
+ templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEcVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 59, Col: 63}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</h4><small>Erasure coded volumes</small></div><div class=\"align-self-center\"><i class=\"fas fa-th-large fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-success\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Total Files</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var6 string
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalFiles))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 75, Col: 59}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "</h4><small>Files stored</small></div><div class=\"align-self-center\"><i class=\"fas fa-file fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-warning\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Total Size (Logical)</h6><h4 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var7 string
+ templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(util.BytesToHumanReadable(uint64(data.TotalSize)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 91, Col: 74}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "</h4><small>Data stored (regular volumes only)</small></div><div class=\"align-self-center\"><i class=\"fas fa-hdd fa-2x\"></i></div></div></div></div></div></div><!-- Size Information Note --><div class=\"alert alert-info\" role=\"alert\"><i class=\"fas fa-info-circle me-2\"></i> <strong>Size Information:</strong> Logical size represents the actual data stored (regular volumes only). EC volumes show shard counts instead of size - physical storage for EC volumes is approximately 1.4x the original data due to erasure coding redundancy.</div><!-- Pagination Info --><div class=\"d-flex justify-content-between align-items-center mb-3\"><div class=\"d-flex align-items-center\"><span class=\"me-3\">Showing ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var8 string
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", (data.Page-1)*data.PageSize+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 115, Col: 63}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " to ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var9 string
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", func() int {
+ end := data.Page * data.PageSize
+ totalItems := data.TotalVolumes + data.TotalEcVolumes
+ if end > totalItems {
+ return totalItems
+ }
+ return end
+ }()))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 122, Col: 8}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, " of ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var10 string
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes+data.TotalEcVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 122, Col: 72}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " items</span><div class=\"d-flex align-items-center\"><label for=\"pageSize\" class=\"form-label me-2 mb-0\">Show:</label> <select id=\"pageSize\" class=\"form-select form-select-sm\" style=\"width: auto;\" onchange=\"changePageSize(this.value)\"><option value=\"10\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 10 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, ">10</option> <option value=\"25\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 25 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, ">25</option> <option value=\"50\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 50 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, ">50</option> <option value=\"100\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.PageSize == 100 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, ">100</option></select> <span class=\"ms-2\">per page</span></div></div></div><!-- Volumes Table --><div class=\"table-responsive\"><table class=\"table table-striped table-hover\" id=\"volumesTable\"><thead><tr><th><a href=\"#\" onclick=\"sortBy('volume_id')\" class=\"text-dark text-decoration-none\">Volume ID ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "volume_id" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "</a></th><th><a href=\"#\" onclick=\"sortBy('type')\" class=\"text-dark text-decoration-none\">Type ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "type" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "</a></th><th class=\"text-dark\">Logical Size / Shard Count</th><th class=\"text-dark\">Files</th><th class=\"text-dark\">Status</th><th class=\"text-dark\">Actions</th></tr></thead> <tbody>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for _, volume := range data.RegularVolumes {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "<tr><td><strong>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var11 string
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.Id))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 182, Col: 44}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "</strong></td><td><span class=\"badge bg-primary\"><i class=\"fas fa-database me-1\"></i>Regular</span></td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(util.BytesToHumanReadable(volume.Size))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 190, Col: 46}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var13 string
+ templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.FileCount))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 193, Col: 43}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if volume.ReadOnly {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<span class=\"badge bg-warning\">Read Only</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<span class=\"badge bg-success\">Read/Write</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</td><td><div class=\"btn-group\" role=\"group\"><button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"showVolumeDetails(event)\" data-volume-id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var14 string
+ templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.Id))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 206, Col: 55}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "\" data-server=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var15 string
+ templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Server)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 207, Col: 37}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "\" title=\"View volume details\"><i class=\"fas fa-info-circle\"></i></button></div></td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ for _, ecVolume := range data.EcVolumes {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<tr><td><strong>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var16 string
+ templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", ecVolume.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 220, Col: 52}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "</strong></td><td><span class=\"badge bg-info\"><i class=\"fas fa-th-large me-1\"></i>EC</span></td><td><span class=\"badge bg-primary\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var17 string
+ templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/14", ecVolume.TotalShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 228, Col: 81}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "</span></td><td><span class=\"text-muted\">-</span></td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if ecVolume.IsComplete {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "<span class=\"badge bg-success\"><i class=\"fas fa-check me-1\"></i>Complete</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "<span class=\"badge bg-warning\"><i class=\"fas fa-exclamation-triangle me-1\"></i> Missing ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var18 string
+ templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(ecVolume.MissingShards)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 241, Col: 64}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, " shards</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "</td><td><div class=\"btn-group\" role=\"group\"><button type=\"button\" class=\"btn btn-sm btn-outline-info\" onclick=\"showEcVolumeDetails(event)\" data-volume-id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var19 string
+ templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", ecVolume.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 249, Col: 63}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "\" title=\"View EC volume details\"><i class=\"fas fa-info-circle\"></i></button> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if !ecVolume.IsComplete {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<button type=\"button\" class=\"btn btn-sm btn-outline-warning\" onclick=\"repairEcVolume(event)\" data-volume-id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var20 string
+ templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", ecVolume.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 256, Col: 64}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "\" title=\"Repair missing shards\"><i class=\"fas fa-wrench\"></i></button>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "</div></td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "</tbody></table></div><!-- Pagination -->")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.TotalPages > 1 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "<nav aria-label=\"Collection volumes pagination\"><ul class=\"pagination justify-content-center\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Page > 1 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"1\">First</a></li><li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var21 string
+ templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Page-1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 278, Col: 104}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "\">Previous</a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ for i := 1; i <= data.TotalPages; i++ {
+ if i == data.Page {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "<li class=\"page-item active\"><span class=\"page-link\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var22 string
+ templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", i))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 285, Col: 52}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "</span></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if i <= 3 || i > data.TotalPages-3 || (i >= data.Page-2 && i <= data.Page+2) {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var23 string
+ templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", i))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 289, Col: 95}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var24 string
+ templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", i))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 289, Col: 119}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if i == 4 && data.Page > 6 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if i == data.TotalPages-3 && data.Page < data.TotalPages-5 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ }
+ if data.Page < data.TotalPages {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var25 string
+ templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Page+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 304, Col: 104}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "\">Next</a></li><li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var26 string
+ templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 307, Col: 108}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "\">Last</a></li>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "</ul></nav>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "<script>\n\t\t// Sorting functionality\n\t\tfunction sortBy(field) {\n\t\t\tconst currentSort = new URLSearchParams(window.location.search).get('sort_by');\n\t\t\tconst currentOrder = new URLSearchParams(window.location.search).get('sort_order') || 'asc';\n\t\t\t\n\t\t\tlet newOrder = 'asc';\n\t\t\tif (currentSort === field && currentOrder === 'asc') {\n\t\t\t\tnewOrder = 'desc';\n\t\t\t}\n\t\t\t\n\t\t\tconst url = new URL(window.location);\n\t\t\turl.searchParams.set('sort_by', field);\n\t\t\turl.searchParams.set('sort_order', newOrder);\n\t\t\turl.searchParams.set('page', '1'); // Reset to first page\n\t\t\twindow.location.href = url.toString();\n\t\t}\n\n\t\t// Pagination functionality\n\t\tfunction goToPage(event) {\n\t\t\tevent.preventDefault();\n\t\t\tconst page = event.target.closest('a').getAttribute('data-page');\n\t\t\tconst url = new URL(window.location);\n\t\t\turl.searchParams.set('page', page);\n\t\t\twindow.location.href = url.toString();\n\t\t}\n\n\t\t// Page size functionality\n\t\tfunction changePageSize(newPageSize) {\n\t\t\tconst url = new URL(window.location);\n\t\t\turl.searchParams.set('page_size', newPageSize);\n\t\t\turl.searchParams.set('page', '1'); // Reset to first page when changing page size\n\t\t\twindow.location.href = url.toString();\n\t\t}\n\n\t\t// Volume details\n\t\tfunction showVolumeDetails(event) {\n\t\t\tconst volumeId = event.target.closest('button').getAttribute('data-volume-id');\n\t\t\tconst server = event.target.closest('button').getAttribute('data-server');\n\t\t\twindow.location.href = `/cluster/volumes/${volumeId}/${server}`;\n\t\t}\n\n\t\t// EC Volume details\n\t\tfunction showEcVolumeDetails(event) {\n\t\t\tconst volumeId = event.target.closest('button').getAttribute('data-volume-id');\n\t\t\twindow.location.href = `/cluster/ec-volumes/${volumeId}`;\n\t\t}\n\n\t\t// Repair EC Volume\n\t\tfunction repairEcVolume(event) {\n\t\t\tconst volumeId = event.target.closest('button').getAttribute('data-volume-id');\n\t\t\tif (confirm(`Are you sure you want to repair missing shards for EC volume ${volumeId}?`)) {\n\t\t\t\t// TODO: Implement repair functionality\n\t\t\t\talert('Repair functionality will be implemented soon.');\n\t\t\t}\n\t\t}\n\t</script>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
+var _ = templruntime.GeneratedTemplate
diff --git a/weed/admin/view/app/ec_volume_details.templ b/weed/admin/view/app/ec_volume_details.templ
new file mode 100644
index 000000000..caf506d0f
--- /dev/null
+++ b/weed/admin/view/app/ec_volume_details.templ
@@ -0,0 +1,313 @@
+package app
+
+import (
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/dash"
+)
+
+templ EcVolumeDetails(data dash.EcVolumeDetailsData) {
+ <div class="d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom">
+ <div>
+ <h1 class="h2">
+ <i class="fas fa-th-large me-2"></i>EC Volume Details
+ </h1>
+ <nav aria-label="breadcrumb">
+ <ol class="breadcrumb">
+ <li class="breadcrumb-item"><a href="/admin" class="text-decoration-none">Dashboard</a></li>
+ <li class="breadcrumb-item"><a href="/cluster/ec-shards" class="text-decoration-none">EC Volumes</a></li>
+ <li class="breadcrumb-item active" aria-current="page">Volume {fmt.Sprintf("%d", data.VolumeID)}</li>
+ </ol>
+ </nav>
+ </div>
+ <div class="btn-toolbar mb-2 mb-md-0">
+ <div class="btn-group me-2">
+ <button type="button" class="btn btn-sm btn-outline-secondary" onclick="history.back()">
+ <i class="fas fa-arrow-left me-1"></i>Back
+ </button>
+ <button type="button" class="btn btn-sm btn-outline-primary" onclick="window.location.reload()">
+ <i class="fas fa-refresh me-1"></i>Refresh
+ </button>
+ </div>
+ </div>
+ </div>
+
+ <!-- EC Volume Summary -->
+ <div class="row mb-4">
+ <div class="col-md-6">
+ <div class="card">
+ <div class="card-header">
+ <h5 class="card-title mb-0">
+ <i class="fas fa-info-circle me-2"></i>Volume Information
+ </h5>
+ </div>
+ <div class="card-body">
+ <table class="table table-borderless">
+ <tr>
+ <td><strong>Volume ID:</strong></td>
+ <td>{fmt.Sprintf("%d", data.VolumeID)}</td>
+ </tr>
+ <tr>
+ <td><strong>Collection:</strong></td>
+ <td>
+ if data.Collection != "" {
+ <span class="badge bg-info">{data.Collection}</span>
+ } else {
+ <span class="text-muted">default</span>
+ }
+ </td>
+ </tr>
+ <tr>
+ <td><strong>Status:</strong></td>
+ <td>
+ if data.IsComplete {
+ <span class="badge bg-success">
+ <i class="fas fa-check me-1"></i>Complete ({data.TotalShards}/14 shards)
+ </span>
+ } else {
+ <span class="badge bg-warning">
+ <i class="fas fa-exclamation-triangle me-1"></i>Incomplete ({data.TotalShards}/14 shards)
+ </span>
+ }
+ </td>
+ </tr>
+ if !data.IsComplete {
+ <tr>
+ <td><strong>Missing Shards:</strong></td>
+ <td>
+ for i, shardID := range data.MissingShards {
+ if i > 0 {
+ <span>, </span>
+ }
+ <span class="badge bg-danger">{fmt.Sprintf("%02d", shardID)}</span>
+ }
+ </td>
+ </tr>
+ }
+ <tr>
+ <td><strong>Data Centers:</strong></td>
+ <td>
+ for i, dc := range data.DataCenters {
+ if i > 0 {
+ <span>, </span>
+ }
+ <span class="badge bg-primary">{dc}</span>
+ }
+ </td>
+ </tr>
+ <tr>
+ <td><strong>Servers:</strong></td>
+ <td>
+ <span class="text-muted">{fmt.Sprintf("%d servers", len(data.Servers))}</span>
+ </td>
+ </tr>
+ <tr>
+ <td><strong>Last Updated:</strong></td>
+ <td>
+ <span class="text-muted">{data.LastUpdated.Format("2006-01-02 15:04:05")}</span>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </div>
+ </div>
+
+ <div class="col-md-6">
+ <div class="card">
+ <div class="card-header">
+ <h5 class="card-title mb-0">
+ <i class="fas fa-chart-pie me-2"></i>Shard Distribution
+ </h5>
+ </div>
+ <div class="card-body">
+ <div class="row text-center">
+ <div class="col-4">
+ <div class="border rounded p-3">
+ <h3 class="text-primary mb-1">{fmt.Sprintf("%d", data.TotalShards)}</h3>
+ <small class="text-muted">Total Shards</small>
+ </div>
+ </div>
+ <div class="col-4">
+ <div class="border rounded p-3">
+ <h3 class="text-success mb-1">{fmt.Sprintf("%d", len(data.DataCenters))}</h3>
+ <small class="text-muted">Data Centers</small>
+ </div>
+ </div>
+ <div class="col-4">
+ <div class="border rounded p-3">
+ <h3 class="text-info mb-1">{fmt.Sprintf("%d", len(data.Servers))}</h3>
+ <small class="text-muted">Servers</small>
+ </div>
+ </div>
+ </div>
+
+ <!-- Shard Distribution Visualization -->
+ <div class="mt-3">
+ <h6>Present Shards:</h6>
+ <div class="d-flex flex-wrap gap-1">
+ for _, shard := range data.Shards {
+ <span class="badge bg-success me-1 mb-1">{fmt.Sprintf("%02d", shard.ShardID)}</span>
+ }
+ </div>
+ if len(data.MissingShards) > 0 {
+ <h6 class="mt-2">Missing Shards:</h6>
+ <div class="d-flex flex-wrap gap-1">
+ for _, shardID := range data.MissingShards {
+ <span class="badge bg-secondary me-1 mb-1">{fmt.Sprintf("%02d", shardID)}</span>
+ }
+ </div>
+ }
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <!-- Shard Details Table -->
+ <div class="card">
+ <div class="card-header">
+ <h5 class="card-title mb-0">
+ <i class="fas fa-list me-2"></i>Shard Details
+ </h5>
+ </div>
+ <div class="card-body">
+ if len(data.Shards) > 0 {
+ <div class="table-responsive">
+ <table class="table table-striped table-hover">
+ <thead>
+ <tr>
+ <th>
+ <a href="#" onclick="sortBy('shard_id')" class="text-dark text-decoration-none">
+ Shard ID
+ if data.SortBy == "shard_id" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ <th>
+ <a href="#" onclick="sortBy('server')" class="text-dark text-decoration-none">
+ Server
+ if data.SortBy == "server" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ <th>
+ <a href="#" onclick="sortBy('data_center')" class="text-dark text-decoration-none">
+ Data Center
+ if data.SortBy == "data_center" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ <th>
+ <a href="#" onclick="sortBy('rack')" class="text-dark text-decoration-none">
+ Rack
+ if data.SortBy == "rack" {
+ if data.SortOrder == "asc" {
+ <i class="fas fa-sort-up ms-1"></i>
+ } else {
+ <i class="fas fa-sort-down ms-1"></i>
+ }
+ } else {
+ <i class="fas fa-sort ms-1 text-muted"></i>
+ }
+ </a>
+ </th>
+ <th class="text-dark">Disk Type</th>
+ <th class="text-dark">Shard Size</th>
+ <th class="text-dark">Actions</th>
+ </tr>
+ </thead>
+ <tbody>
+ for _, shard := range data.Shards {
+ <tr>
+ <td>
+ <span class="badge bg-primary">{fmt.Sprintf("%02d", shard.ShardID)}</span>
+ </td>
+ <td>
+ <a href={ templ.URL("/cluster/volume-servers/" + shard.Server) } class="text-primary text-decoration-none">
+ <code class="small">{shard.Server}</code>
+ </a>
+ </td>
+ <td>
+ <span class="badge bg-primary text-white">{shard.DataCenter}</span>
+ </td>
+ <td>
+ <span class="badge bg-secondary text-white">{shard.Rack}</span>
+ </td>
+ <td>
+ <span class="text-dark">{shard.DiskType}</span>
+ </td>
+ <td>
+ <span class="text-success">{bytesToHumanReadableUint64(shard.Size)}</span>
+ </td>
+ <td>
+ <a href={ templ.SafeURL(fmt.Sprintf("http://%s/ui/index.html", shard.Server)) } target="_blank" class="btn btn-sm btn-primary">
+ <i class="fas fa-external-link-alt me-1"></i>Volume Server
+ </a>
+ </td>
+ </tr>
+ }
+ </tbody>
+ </table>
+ </div>
+ } else {
+ <div class="text-center py-4">
+ <i class="fas fa-exclamation-triangle fa-3x text-warning mb-3"></i>
+ <h5>No EC shards found</h5>
+ <p class="text-muted">This volume may not be EC encoded yet.</p>
+ </div>
+ }
+ </div>
+ </div>
+
+ <script>
+ // Sorting functionality
+ function sortBy(field) {
+ const currentSort = new URLSearchParams(window.location.search).get('sort_by');
+ const currentOrder = new URLSearchParams(window.location.search).get('sort_order') || 'asc';
+
+ let newOrder = 'asc';
+ if (currentSort === field && currentOrder === 'asc') {
+ newOrder = 'desc';
+ }
+
+ const url = new URL(window.location);
+ url.searchParams.set('sort_by', field);
+ url.searchParams.set('sort_order', newOrder);
+ window.location.href = url.toString();
+ }
+ </script>
+}
+
+// Helper function to convert bytes to human readable format (uint64 version)
+func bytesToHumanReadableUint64(bytes uint64) string {
+ const unit = 1024
+ if bytes < unit {
+ return fmt.Sprintf("%dB", bytes)
+ }
+ div, exp := uint64(unit), 0
+ for n := bytes / unit; n >= unit; n /= unit {
+ div *= unit
+ exp++
+ }
+ return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp])
+} \ No newline at end of file
diff --git a/weed/admin/view/app/ec_volume_details_templ.go b/weed/admin/view/app/ec_volume_details_templ.go
new file mode 100644
index 000000000..e96514ce7
--- /dev/null
+++ b/weed/admin/view/app/ec_volume_details_templ.go
@@ -0,0 +1,560 @@
+// Code generated by templ - DO NOT EDIT.
+
+// templ: version: v0.3.906
+package app
+
+//lint:file-ignore SA4006 This context is only used if a nested component is present.
+
+import "github.com/a-h/templ"
+import templruntime "github.com/a-h/templ/runtime"
+
+import (
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/dash"
+)
+
+func EcVolumeDetails(data dash.EcVolumeDetailsData) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var1 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var1 == nil {
+ templ_7745c5c3_Var1 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom\"><div><h1 class=\"h2\"><i class=\"fas fa-th-large me-2\"></i>EC Volume Details</h1><nav aria-label=\"breadcrumb\"><ol class=\"breadcrumb\"><li class=\"breadcrumb-item\"><a href=\"/admin\" class=\"text-decoration-none\">Dashboard</a></li><li class=\"breadcrumb-item\"><a href=\"/cluster/ec-shards\" class=\"text-decoration-none\">EC Volumes</a></li><li class=\"breadcrumb-item active\" aria-current=\"page\">Volume ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var2 string
+ templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 18, Col: 115}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "</li></ol></nav></div><div class=\"btn-toolbar mb-2 mb-md-0\"><div class=\"btn-group me-2\"><button type=\"button\" class=\"btn btn-sm btn-outline-secondary\" onclick=\"history.back()\"><i class=\"fas fa-arrow-left me-1\"></i>Back</button> <button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"window.location.reload()\"><i class=\"fas fa-refresh me-1\"></i>Refresh</button></div></div></div><!-- EC Volume Summary --><div class=\"row mb-4\"><div class=\"col-md-6\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"card-title mb-0\"><i class=\"fas fa-info-circle me-2\"></i>Volume Information</h5></div><div class=\"card-body\"><table class=\"table table-borderless\"><tr><td><strong>Volume ID:</strong></td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var3 string
+ templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 47, Col: 65}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</td></tr><tr><td><strong>Collection:</strong></td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Collection != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "<span class=\"badge bg-info\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var4 string
+ templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.Collection)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 53, Col: 80}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<span class=\"text-muted\">default</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "</td></tr><tr><td><strong>Status:</strong></td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.IsComplete {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<span class=\"badge bg-success\"><i class=\"fas fa-check me-1\"></i>Complete (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var5 string
+ templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(data.TotalShards)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 64, Col: 100}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "/14 shards)</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "<span class=\"badge bg-warning\"><i class=\"fas fa-exclamation-triangle me-1\"></i>Incomplete (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var6 string
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(data.TotalShards)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 68, Col: 117}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "/14 shards)</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if !data.IsComplete {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "<tr><td><strong>Missing Shards:</strong></td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for i, shardID := range data.MissingShards {
+ if i > 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "<span>, </span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, " <span class=\"badge bg-danger\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var7 string
+ templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shardID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 81, Col: 99}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "</td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "<tr><td><strong>Data Centers:</strong></td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for i, dc := range data.DataCenters {
+ if i > 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "<span>, </span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, " <span class=\"badge bg-primary\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var8 string
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(dc)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 93, Col: 70}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "</td></tr><tr><td><strong>Servers:</strong></td><td><span class=\"text-muted\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var9 string
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d servers", len(data.Servers)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 100, Col: 102}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</span></td></tr><tr><td><strong>Last Updated:</strong></td><td><span class=\"text-muted\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var10 string
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05"))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 106, Col: 104}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</span></td></tr></table></div></div></div><div class=\"col-md-6\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"card-title mb-0\"><i class=\"fas fa-chart-pie me-2\"></i>Shard Distribution</h5></div><div class=\"card-body\"><div class=\"row text-center\"><div class=\"col-4\"><div class=\"border rounded p-3\"><h3 class=\"text-primary mb-1\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var11 string
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 125, Col: 98}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</h3><small class=\"text-muted\">Total Shards</small></div></div><div class=\"col-4\"><div class=\"border rounded p-3\"><h3 class=\"text-success mb-1\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.DataCenters)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 131, Col: 103}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "</h3><small class=\"text-muted\">Data Centers</small></div></div><div class=\"col-4\"><div class=\"border rounded p-3\"><h3 class=\"text-info mb-1\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var13 string
+ templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Servers)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 137, Col: 96}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "</h3><small class=\"text-muted\">Servers</small></div></div></div><!-- Shard Distribution Visualization --><div class=\"mt-3\"><h6>Present Shards:</h6><div class=\"d-flex flex-wrap gap-1\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for _, shard := range data.Shards {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<span class=\"badge bg-success me-1 mb-1\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var14 string
+ templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shard.ShardID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 148, Col: 108}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if len(data.MissingShards) > 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<h6 class=\"mt-2\">Missing Shards:</h6><div class=\"d-flex flex-wrap gap-1\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for _, shardID := range data.MissingShards {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<span class=\"badge bg-secondary me-1 mb-1\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var15 string
+ templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shardID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 155, Col: 108}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "</div></div></div></div></div><!-- Shard Details Table --><div class=\"card\"><div class=\"card-header\"><h5 class=\"card-title mb-0\"><i class=\"fas fa-list me-2\"></i>Shard Details</h5></div><div class=\"card-body\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if len(data.Shards) > 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<div class=\"table-responsive\"><table class=\"table table-striped table-hover\"><thead><tr><th><a href=\"#\" onclick=\"sortBy('shard_id')\" class=\"text-dark text-decoration-none\">Shard ID ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "shard_id" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "</a></th><th><a href=\"#\" onclick=\"sortBy('server')\" class=\"text-dark text-decoration-none\">Server ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "server" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "</a></th><th><a href=\"#\" onclick=\"sortBy('data_center')\" class=\"text-dark text-decoration-none\">Data Center ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "data_center" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "</a></th><th><a href=\"#\" onclick=\"sortBy('rack')\" class=\"text-dark text-decoration-none\">Rack ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "rack" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "<i class=\"fas fa-sort-up ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "<i class=\"fas fa-sort-down ms-1\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "</a></th><th class=\"text-dark\">Disk Type</th><th class=\"text-dark\">Shard Size</th><th class=\"text-dark\">Actions</th></tr></thead> <tbody>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for _, shard := range data.Shards {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "<tr><td><span class=\"badge bg-primary\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var16 string
+ templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shard.ShardID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 243, Col: 110}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "</span></td><td><a href=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var17 templ.SafeURL
+ templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinURLErrs(templ.URL("/cluster/volume-servers/" + shard.Server))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 246, Col: 106}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "\" class=\"text-primary text-decoration-none\"><code class=\"small\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var18 string
+ templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Server)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 247, Col: 81}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "</code></a></td><td><span class=\"badge bg-primary text-white\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var19 string
+ templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(shard.DataCenter)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 251, Col: 103}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</span></td><td><span class=\"badge bg-secondary text-white\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var20 string
+ templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Rack)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 254, Col: 99}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "</span></td><td><span class=\"text-dark\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var21 string
+ templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(shard.DiskType)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 257, Col: 83}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "</span></td><td><span class=\"text-success\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var22 string
+ templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(bytesToHumanReadableUint64(shard.Size))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 260, Col: 110}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "</span></td><td><a href=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var23 templ.SafeURL
+ templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("http://%s/ui/index.html", shard.Server)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 263, Col: 121}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "\" target=\"_blank\" class=\"btn btn-sm btn-primary\"><i class=\"fas fa-external-link-alt me-1\"></i>Volume Server</a></td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "</tbody></table></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "<div class=\"text-center py-4\"><i class=\"fas fa-exclamation-triangle fa-3x text-warning mb-3\"></i><h5>No EC shards found</h5><p class=\"text-muted\">This volume may not be EC encoded yet.</p></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "</div></div><script>\n // Sorting functionality\n function sortBy(field) {\n const currentSort = new URLSearchParams(window.location.search).get('sort_by');\n const currentOrder = new URLSearchParams(window.location.search).get('sort_order') || 'asc';\n \n let newOrder = 'asc';\n if (currentSort === field && currentOrder === 'asc') {\n newOrder = 'desc';\n }\n \n const url = new URL(window.location);\n url.searchParams.set('sort_by', field);\n url.searchParams.set('sort_order', newOrder);\n window.location.href = url.toString();\n }\n </script>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
+// Helper function to convert bytes to human readable format (uint64 version)
+func bytesToHumanReadableUint64(bytes uint64) string {
+ const unit = 1024
+ if bytes < unit {
+ return fmt.Sprintf("%dB", bytes)
+ }
+ div, exp := uint64(unit), 0
+ for n := bytes / unit; n >= unit; n /= unit {
+ div *= unit
+ exp++
+ }
+ return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp])
+}
+
+var _ = templruntime.GeneratedTemplate
diff --git a/weed/admin/view/app/maintenance_config.templ b/weed/admin/view/app/maintenance_config.templ
index d560cd22c..65ef565af 100644
--- a/weed/admin/view/app/maintenance_config.templ
+++ b/weed/admin/view/app/maintenance_config.templ
@@ -47,63 +47,70 @@ templ MaintenanceConfig(data *maintenance.MaintenanceConfigData) {
<div class="mb-3">
<label for="scanInterval" class="form-label">Scan Interval (minutes)</label>
<input type="number" class="form-control" id="scanInterval"
- value={fmt.Sprintf("%.0f", float64(data.Config.ScanIntervalSeconds)/60)} min="1" max="1440">
+ value={fmt.Sprintf("%.0f", float64(data.Config.ScanIntervalSeconds)/60)}
+ placeholder="30 (default)" min="1" max="1440">
<small class="form-text text-muted">
- How often to scan for maintenance tasks (1-1440 minutes).
+ How often to scan for maintenance tasks (1-1440 minutes). <strong>Default: 30 minutes</strong>
</small>
</div>
<div class="mb-3">
<label for="workerTimeout" class="form-label">Worker Timeout (minutes)</label>
<input type="number" class="form-control" id="workerTimeout"
- value={fmt.Sprintf("%.0f", float64(data.Config.WorkerTimeoutSeconds)/60)} min="1" max="60">
+ value={fmt.Sprintf("%.0f", float64(data.Config.WorkerTimeoutSeconds)/60)}
+ placeholder="5 (default)" min="1" max="60">
<small class="form-text text-muted">
- How long to wait for worker heartbeat before considering it inactive (1-60 minutes).
+ How long to wait for worker heartbeat before considering it inactive (1-60 minutes). <strong>Default: 5 minutes</strong>
</small>
</div>
<div class="mb-3">
<label for="taskTimeout" class="form-label">Task Timeout (hours)</label>
<input type="number" class="form-control" id="taskTimeout"
- value={fmt.Sprintf("%.0f", float64(data.Config.TaskTimeoutSeconds)/3600)} min="1" max="24">
+ value={fmt.Sprintf("%.0f", float64(data.Config.TaskTimeoutSeconds)/3600)}
+ placeholder="2 (default)" min="1" max="24">
<small class="form-text text-muted">
- Maximum time allowed for a single task to complete (1-24 hours).
+ Maximum time allowed for a single task to complete (1-24 hours). <strong>Default: 2 hours</strong>
</small>
</div>
<div class="mb-3">
<label for="globalMaxConcurrent" class="form-label">Global Concurrent Limit</label>
<input type="number" class="form-control" id="globalMaxConcurrent"
- value={fmt.Sprintf("%d", data.Config.Policy.GlobalMaxConcurrent)} min="1" max="20">
+ value={fmt.Sprintf("%d", data.Config.Policy.GlobalMaxConcurrent)}
+ placeholder="4 (default)" min="1" max="20">
<small class="form-text text-muted">
- Maximum number of maintenance tasks that can run simultaneously across all workers (1-20).
+ Maximum number of maintenance tasks that can run simultaneously across all workers (1-20). <strong>Default: 4</strong>
</small>
</div>
<div class="mb-3">
<label for="maxRetries" class="form-label">Default Max Retries</label>
<input type="number" class="form-control" id="maxRetries"
- value={fmt.Sprintf("%d", data.Config.MaxRetries)} min="0" max="10">
+ value={fmt.Sprintf("%d", data.Config.MaxRetries)}
+ placeholder="3 (default)" min="0" max="10">
<small class="form-text text-muted">
- Default number of times to retry failed tasks (0-10).
+ Default number of times to retry failed tasks (0-10). <strong>Default: 3</strong>
</small>
</div>
<div class="mb-3">
<label for="retryDelay" class="form-label">Retry Delay (minutes)</label>
<input type="number" class="form-control" id="retryDelay"
- value={fmt.Sprintf("%.0f", float64(data.Config.RetryDelaySeconds)/60)} min="1" max="120">
+ value={fmt.Sprintf("%.0f", float64(data.Config.RetryDelaySeconds)/60)}
+ placeholder="15 (default)" min="1" max="120">
<small class="form-text text-muted">
- Time to wait before retrying failed tasks (1-120 minutes).
+ Time to wait before retrying failed tasks (1-120 minutes). <strong>Default: 15 minutes</strong>
</small>
</div>
<div class="mb-3">
<label for="taskRetention" class="form-label">Task Retention (days)</label>
<input type="number" class="form-control" id="taskRetention"
- value={fmt.Sprintf("%.0f", float64(data.Config.TaskRetentionSeconds)/(24*3600))} min="1" max="30">
+ value={fmt.Sprintf("%.0f", float64(data.Config.TaskRetentionSeconds)/(24*3600))}
+ placeholder="7 (default)" min="1" max="30">
<small class="form-text text-muted">
- How long to keep completed/failed task records (1-30 days).
+ How long to keep completed/failed task records (1-30 days). <strong>Default: 7 days</strong>
</small>
</div>
@@ -143,7 +150,7 @@ templ MaintenanceConfig(data *maintenance.MaintenanceConfigData) {
<i class={menuItem.Icon + " me-2"}></i>
{menuItem.DisplayName}
</h6>
- if data.Config.Policy.IsTaskEnabled(menuItem.TaskType) {
+ if menuItem.IsEnabled {
<span class="badge bg-success">Enabled</span>
} else {
<span class="badge bg-secondary">Disabled</span>
@@ -200,44 +207,60 @@ templ MaintenanceConfig(data *maintenance.MaintenanceConfigData) {
<script>
function saveConfiguration() {
- const config = {
- enabled: document.getElementById('enabled').checked,
- scan_interval_seconds: parseInt(document.getElementById('scanInterval').value) * 60, // Convert to seconds
- policy: {
- vacuum_enabled: document.getElementById('vacuumEnabled').checked,
- vacuum_garbage_ratio: parseFloat(document.getElementById('vacuumGarbageRatio').value) / 100,
- replication_fix_enabled: document.getElementById('replicationFixEnabled').checked,
- }
- };
-
- fetch('/api/maintenance/config', {
- method: 'PUT',
- headers: {
- 'Content-Type': 'application/json',
- },
- body: JSON.stringify(config)
- })
- .then(response => response.json())
- .then(data => {
- if (data.success) {
- alert('Configuration saved successfully');
- } else {
- alert('Failed to save configuration: ' + (data.error || 'Unknown error'));
- }
- })
- .catch(error => {
- alert('Error: ' + error.message);
- });
+ // First, get current configuration to preserve existing values
+ fetch('/api/maintenance/config')
+ .then(response => response.json())
+ .then(currentConfig => {
+ // Update only the fields from the form
+ const updatedConfig = {
+ ...currentConfig.config, // Preserve existing config
+ enabled: document.getElementById('enabled').checked,
+ scan_interval_seconds: parseInt(document.getElementById('scanInterval').value) * 60, // Convert to seconds
+ worker_timeout_seconds: parseInt(document.getElementById('workerTimeout').value) * 60, // Convert to seconds
+ task_timeout_seconds: parseInt(document.getElementById('taskTimeout').value) * 3600, // Convert to seconds
+ retry_delay_seconds: parseInt(document.getElementById('retryDelay').value) * 60, // Convert to seconds
+ max_retries: parseInt(document.getElementById('maxRetries').value),
+ task_retention_seconds: parseInt(document.getElementById('taskRetention').value) * 24 * 3600, // Convert to seconds
+ policy: {
+ ...currentConfig.config.policy, // Preserve existing policy
+ global_max_concurrent: parseInt(document.getElementById('globalMaxConcurrent').value)
+ }
+ };
+
+ // Send the updated configuration
+ return fetch('/api/maintenance/config', {
+ method: 'PUT',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(updatedConfig)
+ });
+ })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ alert('Configuration saved successfully');
+ location.reload(); // Reload to show updated values
+ } else {
+ alert('Failed to save configuration: ' + (data.error || 'Unknown error'));
+ }
+ })
+ .catch(error => {
+ alert('Error: ' + error.message);
+ });
}
function resetToDefaults() {
if (confirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.')) {
- // Reset form to defaults
+ // Reset form to defaults (matching DefaultMaintenanceConfig values)
document.getElementById('enabled').checked = false;
document.getElementById('scanInterval').value = '30';
- document.getElementById('vacuumEnabled').checked = false;
- document.getElementById('vacuumGarbageRatio').value = '30';
- document.getElementById('replicationFixEnabled').checked = false;
+ document.getElementById('workerTimeout').value = '5';
+ document.getElementById('taskTimeout').value = '2';
+ document.getElementById('globalMaxConcurrent').value = '4';
+ document.getElementById('maxRetries').value = '3';
+ document.getElementById('retryDelay').value = '15';
+ document.getElementById('taskRetention').value = '7';
}
}
</script>
diff --git a/weed/admin/view/app/maintenance_config_schema.templ b/weed/admin/view/app/maintenance_config_schema.templ
new file mode 100644
index 000000000..ee89cab64
--- /dev/null
+++ b/weed/admin/view/app/maintenance_config_schema.templ
@@ -0,0 +1,381 @@
+package app
+
+import (
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/admin/view/components"
+)
+
+templ MaintenanceConfigSchema(data *maintenance.MaintenanceConfigData, schema *maintenance.MaintenanceConfigSchema) {
+ <div class="container-fluid">
+ <div class="row mb-4">
+ <div class="col-12">
+ <div class="d-flex justify-content-between align-items-center">
+ <h2 class="mb-0">
+ <i class="fas fa-cogs me-2"></i>
+ Maintenance Configuration
+ </h2>
+ <div class="btn-group">
+ <a href="/maintenance/tasks" class="btn btn-outline-primary">
+ <i class="fas fa-tasks me-1"></i>
+ View Tasks
+ </a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="row">
+ <div class="col-12">
+ <div class="card">
+ <div class="card-header">
+ <h5 class="mb-0">System Settings</h5>
+ </div>
+ <div class="card-body">
+ <form id="maintenanceConfigForm">
+ <!-- Dynamically render all schema fields in order -->
+ for _, field := range schema.Fields {
+ @ConfigField(field, data.Config)
+ }
+
+ <div class="d-flex gap-2">
+ <button type="button" class="btn btn-primary" onclick="saveConfiguration()">
+ <i class="fas fa-save me-1"></i>
+ Save Configuration
+ </button>
+ <button type="button" class="btn btn-secondary" onclick="resetToDefaults()">
+ <i class="fas fa-undo me-1"></i>
+ Reset to Defaults
+ </button>
+ </div>
+ </form>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <!-- Task Configuration Cards -->
+ <div class="row mt-4">
+ <div class="col-md-4">
+ <div class="card">
+ <div class="card-header">
+ <h5 class="mb-0">
+ <i class="fas fa-broom me-2"></i>
+ Volume Vacuum
+ </h5>
+ </div>
+ <div class="card-body">
+ <p class="card-text">Reclaims disk space by removing deleted files from volumes.</p>
+ <a href="/maintenance/config/vacuum" class="btn btn-primary">Configure</a>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-4">
+ <div class="card">
+ <div class="card-header">
+ <h5 class="mb-0">
+ <i class="fas fa-balance-scale me-2"></i>
+ Volume Balance
+ </h5>
+ </div>
+ <div class="card-body">
+ <p class="card-text">Redistributes volumes across servers to optimize storage utilization.</p>
+ <a href="/maintenance/config/balance" class="btn btn-primary">Configure</a>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-4">
+ <div class="card">
+ <div class="card-header">
+ <h5 class="mb-0">
+ <i class="fas fa-shield-alt me-2"></i>
+ Erasure Coding
+ </h5>
+ </div>
+ <div class="card-body">
+ <p class="card-text">Converts volumes to erasure coded format for improved durability.</p>
+ <a href="/maintenance/config/erasure_coding" class="btn btn-primary">Configure</a>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <script>
+ function saveConfiguration() {
+ const form = document.getElementById('maintenanceConfigForm');
+ const formData = new FormData(form);
+
+ // Convert form data to JSON, handling interval fields specially
+ const config = {};
+
+ for (let [key, value] of formData.entries()) {
+ if (key.endsWith('_value')) {
+ // This is an interval value part
+ const baseKey = key.replace('_value', '');
+ const unitKey = baseKey + '_unit';
+ const unitValue = formData.get(unitKey);
+
+ if (unitValue) {
+ // Convert to seconds based on unit
+ const numValue = parseInt(value) || 0;
+ let seconds = numValue;
+ switch(unitValue) {
+ case 'minutes':
+ seconds = numValue * 60;
+ break;
+ case 'hours':
+ seconds = numValue * 3600;
+ break;
+ case 'days':
+ seconds = numValue * 24 * 3600;
+ break;
+ }
+ config[baseKey] = seconds;
+ }
+ } else if (key.endsWith('_unit')) {
+ // Skip unit keys - they're handled with their corresponding value
+ continue;
+ } else {
+ // Regular field
+ if (form.querySelector(`[name="${key}"]`).type === 'checkbox') {
+ config[key] = form.querySelector(`[name="${key}"]`).checked;
+ } else {
+ const numValue = parseFloat(value);
+ config[key] = isNaN(numValue) ? value : numValue;
+ }
+ }
+ }
+
+ fetch('/api/maintenance/config', {
+ method: 'PUT',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(config)
+ })
+ .then(response => {
+ if (response.status === 401) {
+ alert('Authentication required. Please log in first.');
+ window.location.href = '/login';
+ return;
+ }
+ return response.json();
+ })
+ .then(data => {
+ if (!data) return; // Skip if redirected to login
+ if (data.success) {
+ alert('Configuration saved successfully!');
+ location.reload();
+ } else {
+ alert('Error saving configuration: ' + (data.error || 'Unknown error'));
+ }
+ })
+ .catch(error => {
+ console.error('Error:', error);
+ alert('Error saving configuration: ' + error.message);
+ });
+ }
+
+ function resetToDefaults() {
+ if (confirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.')) {
+ fetch('/maintenance/config/defaults', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ }
+ })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ alert('Configuration reset to defaults!');
+ location.reload();
+ } else {
+ alert('Error resetting configuration: ' + (data.error || 'Unknown error'));
+ }
+ })
+ .catch(error => {
+ console.error('Error:', error);
+ alert('Error resetting configuration: ' + error.message);
+ });
+ }
+ }
+ </script>
+}
+
+// ConfigField renders a single configuration field based on schema with typed value lookup
+templ ConfigField(field *config.Field, config *maintenance.MaintenanceConfig) {
+ if field.InputType == "interval" {
+ <!-- Interval field with number input + unit dropdown -->
+ <div class="mb-3">
+ <label for={ field.JSONName } class="form-label">
+ { field.DisplayName }
+ if field.Required {
+ <span class="text-danger">*</span>
+ }
+ </label>
+ <div class="input-group">
+ <input
+ type="number"
+ class="form-control"
+ id={ field.JSONName + "_value" }
+ name={ field.JSONName + "_value" }
+ value={ fmt.Sprintf("%.0f", components.ConvertInt32SecondsToDisplayValue(getMaintenanceInt32Field(config, field.JSONName))) }
+ step="1"
+ min="1"
+ if field.Required {
+ required
+ }
+ />
+ <select
+ class="form-select"
+ id={ field.JSONName + "_unit" }
+ name={ field.JSONName + "_unit" }
+ style="max-width: 120px;"
+ if field.Required {
+ required
+ }
+ >
+ <option
+ value="minutes"
+ if components.GetInt32DisplayUnit(getMaintenanceInt32Field(config, field.JSONName)) == "minutes" {
+ selected
+ }
+ >
+ Minutes
+ </option>
+ <option
+ value="hours"
+ if components.GetInt32DisplayUnit(getMaintenanceInt32Field(config, field.JSONName)) == "hours" {
+ selected
+ }
+ >
+ Hours
+ </option>
+ <option
+ value="days"
+ if components.GetInt32DisplayUnit(getMaintenanceInt32Field(config, field.JSONName)) == "days" {
+ selected
+ }
+ >
+ Days
+ </option>
+ </select>
+ </div>
+ if field.Description != "" {
+ <div class="form-text text-muted">{ field.Description }</div>
+ }
+ </div>
+ } else if field.InputType == "checkbox" {
+ <!-- Checkbox field -->
+ <div class="mb-3">
+ <div class="form-check form-switch">
+ <input
+ class="form-check-input"
+ type="checkbox"
+ id={ field.JSONName }
+ name={ field.JSONName }
+ if getMaintenanceBoolField(config, field.JSONName) {
+ checked
+ }
+ />
+ <label class="form-check-label" for={ field.JSONName }>
+ <strong>{ field.DisplayName }</strong>
+ </label>
+ </div>
+ if field.Description != "" {
+ <div class="form-text text-muted">{ field.Description }</div>
+ }
+ </div>
+ } else {
+ <!-- Number field -->
+ <div class="mb-3">
+ <label for={ field.JSONName } class="form-label">
+ { field.DisplayName }
+ if field.Required {
+ <span class="text-danger">*</span>
+ }
+ </label>
+ <input
+ type="number"
+ class="form-control"
+ id={ field.JSONName }
+ name={ field.JSONName }
+ value={ fmt.Sprintf("%d", getMaintenanceInt32Field(config, field.JSONName)) }
+ placeholder={ field.Placeholder }
+ if field.MinValue != nil {
+ min={ fmt.Sprintf("%v", field.MinValue) }
+ }
+ if field.MaxValue != nil {
+ max={ fmt.Sprintf("%v", field.MaxValue) }
+ }
+ step={ getNumberStep(field) }
+ if field.Required {
+ required
+ }
+ />
+ if field.Description != "" {
+ <div class="form-text text-muted">{ field.Description }</div>
+ }
+ </div>
+ }
+}
+
+// Helper functions for form field types
+
+func getNumberStep(field *config.Field) string {
+ if field.Type == config.FieldTypeFloat {
+ return "0.01"
+ }
+ return "1"
+}
+
+// Typed field getters for MaintenanceConfig - no interface{} needed
+func getMaintenanceInt32Field(config *maintenance.MaintenanceConfig, fieldName string) int32 {
+ if config == nil {
+ return 0
+ }
+
+ switch fieldName {
+ case "scan_interval_seconds":
+ return config.ScanIntervalSeconds
+ case "worker_timeout_seconds":
+ return config.WorkerTimeoutSeconds
+ case "task_timeout_seconds":
+ return config.TaskTimeoutSeconds
+ case "retry_delay_seconds":
+ return config.RetryDelaySeconds
+ case "max_retries":
+ return config.MaxRetries
+ case "cleanup_interval_seconds":
+ return config.CleanupIntervalSeconds
+ case "task_retention_seconds":
+ return config.TaskRetentionSeconds
+ case "global_max_concurrent":
+ if config.Policy != nil {
+ return config.Policy.GlobalMaxConcurrent
+ }
+ return 0
+ default:
+ return 0
+ }
+}
+
+func getMaintenanceBoolField(config *maintenance.MaintenanceConfig, fieldName string) bool {
+ if config == nil {
+ return false
+ }
+
+ switch fieldName {
+ case "enabled":
+ return config.Enabled
+ default:
+ return false
+ }
+}
+
+// Helper function to convert schema to JSON for JavaScript
+templ schemaToJSON(schema *maintenance.MaintenanceConfigSchema) {
+ {`{}`}
+} \ No newline at end of file
diff --git a/weed/admin/view/app/maintenance_config_schema_templ.go b/weed/admin/view/app/maintenance_config_schema_templ.go
new file mode 100644
index 000000000..e13e2af3a
--- /dev/null
+++ b/weed/admin/view/app/maintenance_config_schema_templ.go
@@ -0,0 +1,622 @@
+// Code generated by templ - DO NOT EDIT.
+
+// templ: version: v0.3.906
+package app
+
+//lint:file-ignore SA4006 This context is only used if a nested component is present.
+
+import "github.com/a-h/templ"
+import templruntime "github.com/a-h/templ/runtime"
+
+import (
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
+ "github.com/seaweedfs/seaweedfs/weed/admin/view/components"
+)
+
+func MaintenanceConfigSchema(data *maintenance.MaintenanceConfigData, schema *maintenance.MaintenanceConfigSchema) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var1 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var1 == nil {
+ templ_7745c5c3_Var1 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"container-fluid\"><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"d-flex justify-content-between align-items-center\"><h2 class=\"mb-0\"><i class=\"fas fa-cogs me-2\"></i> Maintenance Configuration</h2><div class=\"btn-group\"><a href=\"/maintenance/tasks\" class=\"btn btn-outline-primary\"><i class=\"fas fa-tasks me-1\"></i> View Tasks</a></div></div></div></div><div class=\"row\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\">System Settings</h5></div><div class=\"card-body\"><form id=\"maintenanceConfigForm\"><!-- Dynamically render all schema fields in order -->")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for _, field := range schema.Fields {
+ templ_7745c5c3_Err = ConfigField(field, data.Config).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<div class=\"d-flex gap-2\"><button type=\"button\" class=\"btn btn-primary\" onclick=\"saveConfiguration()\"><i class=\"fas fa-save me-1\"></i> Save Configuration</button> <button type=\"button\" class=\"btn btn-secondary\" onclick=\"resetToDefaults()\"><i class=\"fas fa-undo me-1\"></i> Reset to Defaults</button></div></form></div></div></div></div><!-- Task Configuration Cards --><div class=\"row mt-4\"><div class=\"col-md-4\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-broom me-2\"></i> Volume Vacuum</h5></div><div class=\"card-body\"><p class=\"card-text\">Reclaims disk space by removing deleted files from volumes.</p><a href=\"/maintenance/config/vacuum\" class=\"btn btn-primary\">Configure</a></div></div></div><div class=\"col-md-4\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-balance-scale me-2\"></i> Volume Balance</h5></div><div class=\"card-body\"><p class=\"card-text\">Redistributes volumes across servers to optimize storage utilization.</p><a href=\"/maintenance/config/balance\" class=\"btn btn-primary\">Configure</a></div></div></div><div class=\"col-md-4\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-shield-alt me-2\"></i> Erasure Coding</h5></div><div class=\"card-body\"><p class=\"card-text\">Converts volumes to erasure coded format for improved durability.</p><a href=\"/maintenance/config/erasure_coding\" class=\"btn btn-primary\">Configure</a></div></div></div></div></div><script>\n function saveConfiguration() {\n const form = document.getElementById('maintenanceConfigForm');\n const formData = new FormData(form);\n \n // Convert form data to JSON, handling interval fields specially\n const config = {};\n \n for (let [key, value] of formData.entries()) {\n if (key.endsWith('_value')) {\n // This is an interval value part\n const baseKey = key.replace('_value', '');\n const unitKey = baseKey + '_unit';\n const unitValue = formData.get(unitKey);\n \n if (unitValue) {\n // Convert to seconds based on unit\n const numValue = parseInt(value) || 0;\n let seconds = numValue;\n switch(unitValue) {\n case 'minutes':\n seconds = numValue * 60;\n break;\n case 'hours':\n seconds = numValue * 3600;\n break;\n case 'days':\n seconds = numValue * 24 * 3600;\n break;\n }\n config[baseKey] = seconds;\n }\n } else if (key.endsWith('_unit')) {\n // Skip unit keys - they're handled with their corresponding value\n continue;\n } else {\n // Regular field\n if (form.querySelector(`[name=\"${key}\"]`).type === 'checkbox') {\n config[key] = form.querySelector(`[name=\"${key}\"]`).checked;\n } else {\n const numValue = parseFloat(value);\n config[key] = isNaN(numValue) ? value : numValue;\n }\n }\n }\n\n fetch('/api/maintenance/config', {\n method: 'PUT',\n headers: {\n 'Content-Type': 'application/json',\n },\n body: JSON.stringify(config)\n })\n .then(response => {\n if (response.status === 401) {\n alert('Authentication required. Please log in first.');\n window.location.href = '/login';\n return;\n }\n return response.json();\n })\n .then(data => {\n if (!data) return; // Skip if redirected to login\n if (data.success) {\n alert('Configuration saved successfully!');\n location.reload();\n } else {\n alert('Error saving configuration: ' + (data.error || 'Unknown error'));\n }\n })\n .catch(error => {\n console.error('Error:', error);\n alert('Error saving configuration: ' + error.message);\n });\n }\n\n function resetToDefaults() {\n if (confirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.')) {\n fetch('/maintenance/config/defaults', {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n }\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n alert('Configuration reset to defaults!');\n location.reload();\n } else {\n alert('Error resetting configuration: ' + (data.error || 'Unknown error'));\n }\n })\n .catch(error => {\n console.error('Error:', error);\n alert('Error resetting configuration: ' + error.message);\n });\n }\n }\n </script>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
+// ConfigField renders a single configuration field based on schema with typed value lookup
+func ConfigField(field *config.Field, config *maintenance.MaintenanceConfig) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var2 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var2 == nil {
+ templ_7745c5c3_Var2 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ if field.InputType == "interval" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "<!-- Interval field with number input + unit dropdown --> <div class=\"mb-3\"><label for=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var3 string
+ templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 212, Col: 39}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "\" class=\"form-label\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var4 string
+ templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 213, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<span class=\"text-danger\">*</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "</label><div class=\"input-group\"><input type=\"number\" class=\"form-control\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var5 string
+ templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_value")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 222, Col: 50}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var6 string
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_value")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 223, Col: 52}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "\" value=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var7 string
+ templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", components.ConvertInt32SecondsToDisplayValue(getMaintenanceInt32Field(config, field.JSONName))))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 224, Col: 143}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "\" step=\"1\" min=\"1\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, " required")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "> <select class=\"form-select\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var8 string
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_unit")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 233, Col: 49}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var9 string
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_unit")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 234, Col: 51}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "\" style=\"max-width: 120px;\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, " required")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "><option value=\"minutes\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if components.GetInt32DisplayUnit(getMaintenanceInt32Field(config, field.JSONName)) == "minutes" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, ">Minutes</option> <option value=\"hours\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if components.GetInt32DisplayUnit(getMaintenanceInt32Field(config, field.JSONName)) == "hours" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, ">Hours</option> <option value=\"days\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if components.GetInt32DisplayUnit(getMaintenanceInt32Field(config, field.JSONName)) == "days" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, ">Days</option></select></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Description != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "<div class=\"form-text text-muted\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var10 string
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 267, Col: 69}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if field.InputType == "checkbox" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<!-- Checkbox field --> <div class=\"mb-3\"><div class=\"form-check form-switch\"><input class=\"form-check-input\" type=\"checkbox\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var11 string
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 277, Col: 39}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 278, Col: 41}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if getMaintenanceBoolField(config, field.JSONName) {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, " checked")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "> <label class=\"form-check-label\" for=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var13 string
+ templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 283, Col: 68}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "\"><strong>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var14 string
+ templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 284, Col: 47}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "</strong></label></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Description != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "<div class=\"form-text text-muted\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var15 string
+ templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 288, Col: 69}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<!-- Number field --> <div class=\"mb-3\"><label for=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var16 string
+ templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 294, Col: 39}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "\" class=\"form-label\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var17 string
+ templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 295, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "<span class=\"text-danger\">*</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "</label> <input type=\"number\" class=\"form-control\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var18 string
+ templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 303, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var19 string
+ templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 304, Col: 37}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "\" value=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var20 string
+ templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", getMaintenanceInt32Field(config, field.JSONName)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 305, Col: 91}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "\" placeholder=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var21 string
+ templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(field.Placeholder)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 306, Col: 47}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.MinValue != nil {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, " min=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var22 string
+ templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%v", field.MinValue))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 308, Col: 59}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ if field.MaxValue != nil {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, " max=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var23 string
+ templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%v", field.MaxValue))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 311, Col: 59}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, " step=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var24 string
+ templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(getNumberStep(field))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 313, Col: 43}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, " required")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Description != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "<div class=\"form-text text-muted\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var25 string
+ templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 319, Col: 69}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ return nil
+ })
+}
+
+// Helper functions for form field types
+
+func getNumberStep(field *config.Field) string {
+ if field.Type == config.FieldTypeFloat {
+ return "0.01"
+ }
+ return "1"
+}
+
+// Typed field getters for MaintenanceConfig - no interface{} needed
+func getMaintenanceInt32Field(config *maintenance.MaintenanceConfig, fieldName string) int32 {
+ if config == nil {
+ return 0
+ }
+
+ switch fieldName {
+ case "scan_interval_seconds":
+ return config.ScanIntervalSeconds
+ case "worker_timeout_seconds":
+ return config.WorkerTimeoutSeconds
+ case "task_timeout_seconds":
+ return config.TaskTimeoutSeconds
+ case "retry_delay_seconds":
+ return config.RetryDelaySeconds
+ case "max_retries":
+ return config.MaxRetries
+ case "cleanup_interval_seconds":
+ return config.CleanupIntervalSeconds
+ case "task_retention_seconds":
+ return config.TaskRetentionSeconds
+ case "global_max_concurrent":
+ if config.Policy != nil {
+ return config.Policy.GlobalMaxConcurrent
+ }
+ return 0
+ default:
+ return 0
+ }
+}
+
+func getMaintenanceBoolField(config *maintenance.MaintenanceConfig, fieldName string) bool {
+ if config == nil {
+ return false
+ }
+
+ switch fieldName {
+ case "enabled":
+ return config.Enabled
+ default:
+ return false
+ }
+}
+
+// Helper function to convert schema to JSON for JavaScript
+func schemaToJSON(schema *maintenance.MaintenanceConfigSchema) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var26 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var26 == nil {
+ templ_7745c5c3_Var26 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ var templ_7745c5c3_Var27 string
+ templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(`{}`)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 380, Col: 9}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
+var _ = templruntime.GeneratedTemplate
diff --git a/weed/admin/view/app/maintenance_config_templ.go b/weed/admin/view/app/maintenance_config_templ.go
index 038597925..924e2facd 100644
--- a/weed/admin/view/app/maintenance_config_templ.go
+++ b/weed/admin/view/app/maintenance_config_templ.go
@@ -57,85 +57,85 @@ func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "\" min=\"1\" max=\"1440\"> <small class=\"form-text text-muted\">How often to scan for maintenance tasks (1-1440 minutes).</small></div><div class=\"mb-3\"><label for=\"workerTimeout\" class=\"form-label\">Worker Timeout (minutes)</label> <input type=\"number\" class=\"form-control\" id=\"workerTimeout\" value=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "\" placeholder=\"30 (default)\" min=\"1\" max=\"1440\"> <small class=\"form-text text-muted\">How often to scan for maintenance tasks (1-1440 minutes). <strong>Default: 30 minutes</strong></small></div><div class=\"mb-3\"><label for=\"workerTimeout\" class=\"form-label\">Worker Timeout (minutes)</label> <input type=\"number\" class=\"form-control\" id=\"workerTimeout\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", float64(data.Config.WorkerTimeoutSeconds)/60))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 59, Col: 111}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 60, Col: 111}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "\" min=\"1\" max=\"60\"> <small class=\"form-text text-muted\">How long to wait for worker heartbeat before considering it inactive (1-60 minutes).</small></div><div class=\"mb-3\"><label for=\"taskTimeout\" class=\"form-label\">Task Timeout (hours)</label> <input type=\"number\" class=\"form-control\" id=\"taskTimeout\" value=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "\" placeholder=\"5 (default)\" min=\"1\" max=\"60\"> <small class=\"form-text text-muted\">How long to wait for worker heartbeat before considering it inactive (1-60 minutes). <strong>Default: 5 minutes</strong></small></div><div class=\"mb-3\"><label for=\"taskTimeout\" class=\"form-label\">Task Timeout (hours)</label> <input type=\"number\" class=\"form-control\" id=\"taskTimeout\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", float64(data.Config.TaskTimeoutSeconds)/3600))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 68, Col: 111}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 70, Col: 111}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "\" min=\"1\" max=\"24\"> <small class=\"form-text text-muted\">Maximum time allowed for a single task to complete (1-24 hours).</small></div><div class=\"mb-3\"><label for=\"globalMaxConcurrent\" class=\"form-label\">Global Concurrent Limit</label> <input type=\"number\" class=\"form-control\" id=\"globalMaxConcurrent\" value=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "\" placeholder=\"2 (default)\" min=\"1\" max=\"24\"> <small class=\"form-text text-muted\">Maximum time allowed for a single task to complete (1-24 hours). <strong>Default: 2 hours</strong></small></div><div class=\"mb-3\"><label for=\"globalMaxConcurrent\" class=\"form-label\">Global Concurrent Limit</label> <input type=\"number\" class=\"form-control\" id=\"globalMaxConcurrent\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Config.Policy.GlobalMaxConcurrent))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 77, Col: 103}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 80, Col: 103}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "\" min=\"1\" max=\"20\"> <small class=\"form-text text-muted\">Maximum number of maintenance tasks that can run simultaneously across all workers (1-20).</small></div><div class=\"mb-3\"><label for=\"maxRetries\" class=\"form-label\">Default Max Retries</label> <input type=\"number\" class=\"form-control\" id=\"maxRetries\" value=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "\" placeholder=\"4 (default)\" min=\"1\" max=\"20\"> <small class=\"form-text text-muted\">Maximum number of maintenance tasks that can run simultaneously across all workers (1-20). <strong>Default: 4</strong></small></div><div class=\"mb-3\"><label for=\"maxRetries\" class=\"form-label\">Default Max Retries</label> <input type=\"number\" class=\"form-control\" id=\"maxRetries\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Config.MaxRetries))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 86, Col: 87}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 90, Col: 87}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "\" min=\"0\" max=\"10\"> <small class=\"form-text text-muted\">Default number of times to retry failed tasks (0-10).</small></div><div class=\"mb-3\"><label for=\"retryDelay\" class=\"form-label\">Retry Delay (minutes)</label> <input type=\"number\" class=\"form-control\" id=\"retryDelay\" value=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "\" placeholder=\"3 (default)\" min=\"0\" max=\"10\"> <small class=\"form-text text-muted\">Default number of times to retry failed tasks (0-10). <strong>Default: 3</strong></small></div><div class=\"mb-3\"><label for=\"retryDelay\" class=\"form-label\">Retry Delay (minutes)</label> <input type=\"number\" class=\"form-control\" id=\"retryDelay\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", float64(data.Config.RetryDelaySeconds)/60))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 95, Col: 108}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 100, Col: 108}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "\" min=\"1\" max=\"120\"> <small class=\"form-text text-muted\">Time to wait before retrying failed tasks (1-120 minutes).</small></div><div class=\"mb-3\"><label for=\"taskRetention\" class=\"form-label\">Task Retention (days)</label> <input type=\"number\" class=\"form-control\" id=\"taskRetention\" value=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "\" placeholder=\"15 (default)\" min=\"1\" max=\"120\"> <small class=\"form-text text-muted\">Time to wait before retrying failed tasks (1-120 minutes). <strong>Default: 15 minutes</strong></small></div><div class=\"mb-3\"><label for=\"taskRetention\" class=\"form-label\">Task Retention (days)</label> <input type=\"number\" class=\"form-control\" id=\"taskRetention\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", float64(data.Config.TaskRetentionSeconds)/(24*3600)))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 104, Col: 118}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 110, Col: 118}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "\" min=\"1\" max=\"30\"> <small class=\"form-text text-muted\">How long to keep completed/failed task records (1-30 days).</small></div><div class=\"d-flex gap-2\"><button type=\"button\" class=\"btn btn-primary\" onclick=\"saveConfiguration()\"><i class=\"fas fa-save me-1\"></i> Save Configuration</button> <button type=\"button\" class=\"btn btn-secondary\" onclick=\"resetToDefaults()\"><i class=\"fas fa-undo me-1\"></i> Reset to Defaults</button></div></form></div></div></div></div><!-- Individual Task Configuration Menu --><div class=\"row mt-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-cogs me-2\"></i> Task Configuration</h5></div><div class=\"card-body\"><p class=\"text-muted mb-3\">Configure specific settings for each maintenance task type.</p><div class=\"list-group\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "\" placeholder=\"7 (default)\" min=\"1\" max=\"30\"> <small class=\"form-text text-muted\">How long to keep completed/failed task records (1-30 days). <strong>Default: 7 days</strong></small></div><div class=\"d-flex gap-2\"><button type=\"button\" class=\"btn btn-primary\" onclick=\"saveConfiguration()\"><i class=\"fas fa-save me-1\"></i> Save Configuration</button> <button type=\"button\" class=\"btn btn-secondary\" onclick=\"resetToDefaults()\"><i class=\"fas fa-undo me-1\"></i> Reset to Defaults</button></div></form></div></div></div></div><!-- Individual Task Configuration Menu --><div class=\"row mt-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-cogs me-2\"></i> Task Configuration</h5></div><div class=\"card-body\"><p class=\"text-muted mb-3\">Configure specific settings for each maintenance task type.</p><div class=\"list-group\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -147,7 +147,7 @@ func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component
var templ_7745c5c3_Var9 templ.SafeURL
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(menuItem.Path))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 140, Col: 69}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 147, Col: 69}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
@@ -182,7 +182,7 @@ func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component
var templ_7745c5c3_Var12 string
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.DisplayName)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 144, Col: 65}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 151, Col: 65}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
@@ -192,7 +192,7 @@ func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- if data.Config.Policy.IsTaskEnabled(menuItem.TaskType) {
+ if menuItem.IsEnabled {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "<span class=\"badge bg-success\">Enabled</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
@@ -210,7 +210,7 @@ func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.Description)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 152, Col: 90}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 159, Col: 90}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
@@ -228,7 +228,7 @@ func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastScanTime.Format("2006-01-02 15:04:05"))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 173, Col: 100}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 180, Col: 100}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
@@ -241,7 +241,7 @@ func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(data.NextScanTime.Format("2006-01-02 15:04:05"))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 179, Col: 100}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 186, Col: 100}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
@@ -254,7 +254,7 @@ func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.SystemStats.TotalTasks))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 185, Col: 99}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 192, Col: 99}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
@@ -267,13 +267,13 @@ func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.SystemStats.ActiveWorkers))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 191, Col: 102}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 198, Col: 102}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</p></div></div></div></div></div></div></div></div><script>\n function saveConfiguration() {\n const config = {\n enabled: document.getElementById('enabled').checked,\n scan_interval_seconds: parseInt(document.getElementById('scanInterval').value) * 60, // Convert to seconds\n policy: {\n vacuum_enabled: document.getElementById('vacuumEnabled').checked,\n vacuum_garbage_ratio: parseFloat(document.getElementById('vacuumGarbageRatio').value) / 100,\n replication_fix_enabled: document.getElementById('replicationFixEnabled').checked,\n }\n };\n\n fetch('/api/maintenance/config', {\n method: 'PUT',\n headers: {\n 'Content-Type': 'application/json',\n },\n body: JSON.stringify(config)\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n alert('Configuration saved successfully');\n } else {\n alert('Failed to save configuration: ' + (data.error || 'Unknown error'));\n }\n })\n .catch(error => {\n alert('Error: ' + error.message);\n });\n }\n\n function resetToDefaults() {\n if (confirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.')) {\n // Reset form to defaults\n document.getElementById('enabled').checked = false;\n document.getElementById('scanInterval').value = '30';\n document.getElementById('vacuumEnabled').checked = false;\n document.getElementById('vacuumGarbageRatio').value = '30';\n document.getElementById('replicationFixEnabled').checked = false;\n }\n }\n </script>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</p></div></div></div></div></div></div></div></div><script>\n function saveConfiguration() {\n // First, get current configuration to preserve existing values\n fetch('/api/maintenance/config')\n .then(response => response.json())\n .then(currentConfig => {\n // Update only the fields from the form\n const updatedConfig = {\n ...currentConfig.config, // Preserve existing config\n enabled: document.getElementById('enabled').checked,\n scan_interval_seconds: parseInt(document.getElementById('scanInterval').value) * 60, // Convert to seconds\n worker_timeout_seconds: parseInt(document.getElementById('workerTimeout').value) * 60, // Convert to seconds\n task_timeout_seconds: parseInt(document.getElementById('taskTimeout').value) * 3600, // Convert to seconds\n retry_delay_seconds: parseInt(document.getElementById('retryDelay').value) * 60, // Convert to seconds\n max_retries: parseInt(document.getElementById('maxRetries').value),\n task_retention_seconds: parseInt(document.getElementById('taskRetention').value) * 24 * 3600, // Convert to seconds\n policy: {\n ...currentConfig.config.policy, // Preserve existing policy\n global_max_concurrent: parseInt(document.getElementById('globalMaxConcurrent').value)\n }\n };\n\n // Send the updated configuration\n return fetch('/api/maintenance/config', {\n method: 'PUT',\n headers: {\n 'Content-Type': 'application/json',\n },\n body: JSON.stringify(updatedConfig)\n });\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n alert('Configuration saved successfully');\n location.reload(); // Reload to show updated values\n } else {\n alert('Failed to save configuration: ' + (data.error || 'Unknown error'));\n }\n })\n .catch(error => {\n alert('Error: ' + error.message);\n });\n }\n\n function resetToDefaults() {\n if (confirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.')) {\n // Reset form to defaults (matching DefaultMaintenanceConfig values)\n document.getElementById('enabled').checked = false;\n document.getElementById('scanInterval').value = '30';\n document.getElementById('workerTimeout').value = '5';\n document.getElementById('taskTimeout').value = '2';\n document.getElementById('globalMaxConcurrent').value = '4';\n document.getElementById('maxRetries').value = '3';\n document.getElementById('retryDelay').value = '15';\n document.getElementById('taskRetention').value = '7';\n }\n }\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
diff --git a/weed/admin/view/app/maintenance_queue.templ b/weed/admin/view/app/maintenance_queue.templ
index 2c72c17ff..f16a72381 100644
--- a/weed/admin/view/app/maintenance_queue.templ
+++ b/weed/admin/view/app/maintenance_queue.templ
@@ -70,43 +70,118 @@ templ MaintenanceQueue(data *maintenance.MaintenanceQueueData) {
</div>
</div>
- <!-- Simple task queue display -->
- <div class="row">
+ <!-- Pending Tasks -->
+ <div class="row mb-4">
<div class="col-12">
<div class="card">
- <div class="card-header">
- <h5 class="mb-0">Task Queue</h5>
+ <div class="card-header bg-primary text-white">
+ <h5 class="mb-0">
+ <i class="fas fa-clock me-2"></i>
+ Pending Tasks
+ </h5>
</div>
<div class="card-body">
- if len(data.Tasks) == 0 {
+ if data.Stats.PendingTasks == 0 {
<div class="text-center text-muted py-4">
<i class="fas fa-clipboard-list fa-3x mb-3"></i>
- <p>No maintenance tasks in queue</p>
- <small>Tasks will appear here when the system detects maintenance needs</small>
+ <p>No pending maintenance tasks</p>
+ <small>Pending tasks will appear here when the system detects maintenance needs</small>
</div>
} else {
<div class="table-responsive">
<table class="table table-hover">
<thead>
<tr>
- <th>ID</th>
<th>Type</th>
- <th>Status</th>
+ <th>Priority</th>
<th>Volume</th>
<th>Server</th>
+ <th>Reason</th>
<th>Created</th>
</tr>
</thead>
<tbody>
for _, task := range data.Tasks {
- <tr>
- <td><code>{task.ID[:8]}...</code></td>
- <td>{string(task.Type)}</td>
- <td>{string(task.Status)}</td>
- <td>{fmt.Sprintf("%d", task.VolumeID)}</td>
- <td>{task.Server}</td>
- <td>{task.CreatedAt.Format("2006-01-02 15:04")}</td>
- </tr>
+ if string(task.Status) == "pending" {
+ <tr>
+ <td>
+ @TaskTypeIcon(task.Type)
+ {string(task.Type)}
+ </td>
+ <td>@PriorityBadge(task.Priority)</td>
+ <td>{fmt.Sprintf("%d", task.VolumeID)}</td>
+ <td><small>{task.Server}</small></td>
+ <td><small>{task.Reason}</small></td>
+ <td>{task.CreatedAt.Format("2006-01-02 15:04")}</td>
+ </tr>
+ }
+ }
+ </tbody>
+ </table>
+ </div>
+ }
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <!-- Active Tasks -->
+ <div class="row mb-4">
+ <div class="col-12">
+ <div class="card">
+ <div class="card-header bg-warning text-dark">
+ <h5 class="mb-0">
+ <i class="fas fa-running me-2"></i>
+ Active Tasks
+ </h5>
+ </div>
+ <div class="card-body">
+ if data.Stats.RunningTasks == 0 {
+ <div class="text-center text-muted py-4">
+ <i class="fas fa-tasks fa-3x mb-3"></i>
+ <p>No active maintenance tasks</p>
+ <small>Active tasks will appear here when workers start processing them</small>
+ </div>
+ } else {
+ <div class="table-responsive">
+ <table class="table table-hover">
+ <thead>
+ <tr>
+ <th>Type</th>
+ <th>Status</th>
+ <th>Progress</th>
+ <th>Volume</th>
+ <th>Worker</th>
+ <th>Started</th>
+ </tr>
+ </thead>
+ <tbody>
+ for _, task := range data.Tasks {
+ if string(task.Status) == "assigned" || string(task.Status) == "in_progress" {
+ <tr>
+ <td>
+ @TaskTypeIcon(task.Type)
+ {string(task.Type)}
+ </td>
+ <td>@StatusBadge(task.Status)</td>
+ <td>@ProgressBar(task.Progress, task.Status)</td>
+ <td>{fmt.Sprintf("%d", task.VolumeID)}</td>
+ <td>
+ if task.WorkerID != "" {
+ <small>{task.WorkerID}</small>
+ } else {
+ <span class="text-muted">-</span>
+ }
+ </td>
+ <td>
+ if task.StartedAt != nil {
+ {task.StartedAt.Format("2006-01-02 15:04")}
+ } else {
+ <span class="text-muted">-</span>
+ }
+ </td>
+ </tr>
+ }
}
</tbody>
</table>
@@ -117,36 +192,104 @@ templ MaintenanceQueue(data *maintenance.MaintenanceQueueData) {
</div>
</div>
- <!-- Workers Summary -->
- <div class="row mt-4">
+ <!-- Completed Tasks -->
+ <div class="row mb-4">
<div class="col-12">
<div class="card">
- <div class="card-header">
- <h5 class="mb-0">Active Workers</h5>
+ <div class="card-header bg-success text-white">
+ <h5 class="mb-0">
+ <i class="fas fa-check-circle me-2"></i>
+ Completed Tasks
+ </h5>
</div>
<div class="card-body">
- if len(data.Workers) == 0 {
+ if data.Stats.CompletedToday == 0 && data.Stats.FailedToday == 0 {
<div class="text-center text-muted py-4">
- <i class="fas fa-robot fa-3x mb-3"></i>
- <p>No workers are currently active</p>
- <small>Start workers using: <code>weed worker -admin=localhost:9333</code></small>
+ <i class="fas fa-check-circle fa-3x mb-3"></i>
+ <p>No completed maintenance tasks today</p>
+ <small>Completed tasks will appear here after workers finish processing them</small>
</div>
} else {
- <div class="row">
- for _, worker := range data.Workers {
- <div class="col-md-4 mb-3">
- <div class="card">
- <div class="card-body">
- <h6 class="card-title">{worker.ID}</h6>
- <p class="card-text">
- <small class="text-muted">{worker.Address}</small><br/>
- Status: {worker.Status}<br/>
- Load: {fmt.Sprintf("%d/%d", worker.CurrentLoad, worker.MaxConcurrent)}
- </p>
- </div>
- </div>
- </div>
- }
+ <div class="table-responsive">
+ <table class="table table-hover">
+ <thead>
+ <tr>
+ <th>Type</th>
+ <th>Status</th>
+ <th>Volume</th>
+ <th>Worker</th>
+ <th>Duration</th>
+ <th>Completed</th>
+ </tr>
+ </thead>
+ <tbody>
+ for _, task := range data.Tasks {
+ if string(task.Status) == "completed" || string(task.Status) == "failed" || string(task.Status) == "cancelled" {
+ if string(task.Status) == "failed" {
+ <tr class="table-danger">
+ <td>
+ @TaskTypeIcon(task.Type)
+ {string(task.Type)}
+ </td>
+ <td>@StatusBadge(task.Status)</td>
+ <td>{fmt.Sprintf("%d", task.VolumeID)}</td>
+ <td>
+ if task.WorkerID != "" {
+ <small>{task.WorkerID}</small>
+ } else {
+ <span class="text-muted">-</span>
+ }
+ </td>
+ <td>
+ if task.StartedAt != nil && task.CompletedAt != nil {
+ {formatDuration(task.CompletedAt.Sub(*task.StartedAt))}
+ } else {
+ <span class="text-muted">-</span>
+ }
+ </td>
+ <td>
+ if task.CompletedAt != nil {
+ {task.CompletedAt.Format("2006-01-02 15:04")}
+ } else {
+ <span class="text-muted">-</span>
+ }
+ </td>
+ </tr>
+ } else {
+ <tr>
+ <td>
+ @TaskTypeIcon(task.Type)
+ {string(task.Type)}
+ </td>
+ <td>@StatusBadge(task.Status)</td>
+ <td>{fmt.Sprintf("%d", task.VolumeID)}</td>
+ <td>
+ if task.WorkerID != "" {
+ <small>{task.WorkerID}</small>
+ } else {
+ <span class="text-muted">-</span>
+ }
+ </td>
+ <td>
+ if task.StartedAt != nil && task.CompletedAt != nil {
+ {formatDuration(task.CompletedAt.Sub(*task.StartedAt))}
+ } else {
+ <span class="text-muted">-</span>
+ }
+ </td>
+ <td>
+ if task.CompletedAt != nil {
+ {task.CompletedAt.Format("2006-01-02 15:04")}
+ } else {
+ <span class="text-muted">-</span>
+ }
+ </td>
+ </tr>
+ }
+ }
+ }
+ </tbody>
+ </table>
</div>
}
</div>
@@ -156,6 +299,9 @@ templ MaintenanceQueue(data *maintenance.MaintenanceQueueData) {
</div>
<script>
+ // Debug output to browser console
+ console.log("DEBUG: Maintenance Queue Template loaded");
+
// Auto-refresh every 10 seconds
setInterval(function() {
if (!document.hidden) {
@@ -163,7 +309,8 @@ templ MaintenanceQueue(data *maintenance.MaintenanceQueueData) {
}
}, 10000);
- function triggerScan() {
+ window.triggerScan = function() {
+ console.log("triggerScan called");
fetch('/api/maintenance/scan', {
method: 'POST',
headers: {
@@ -182,7 +329,12 @@ templ MaintenanceQueue(data *maintenance.MaintenanceQueueData) {
.catch(error => {
alert('Error: ' + error.message);
});
- }
+ };
+
+ window.refreshPage = function() {
+ console.log("refreshPage called");
+ window.location.reload();
+ };
</script>
}
@@ -243,32 +395,13 @@ templ ProgressBar(progress float64, status maintenance.MaintenanceTaskStatus) {
}
}
-templ WorkerStatusBadge(status string) {
- switch status {
- case "active":
- <span class="badge bg-success">Active</span>
- case "busy":
- <span class="badge bg-warning">Busy</span>
- case "inactive":
- <span class="badge bg-secondary">Inactive</span>
- default:
- <span class="badge bg-light text-dark">Unknown</span>
- }
-}
-
-// Helper functions (would be defined in Go)
-
-
-func getWorkerStatusColor(status string) string {
- switch status {
- case "active":
- return "success"
- case "busy":
- return "warning"
- case "inactive":
- return "secondary"
- default:
- return "light"
+func formatDuration(d time.Duration) string {
+ if d < time.Minute {
+ return fmt.Sprintf("%.0fs", d.Seconds())
+ } else if d < time.Hour {
+ return fmt.Sprintf("%.1fm", d.Minutes())
+ } else {
+ return fmt.Sprintf("%.1fh", d.Hours())
}
}
diff --git a/weed/admin/view/app/maintenance_queue_templ.go b/weed/admin/view/app/maintenance_queue_templ.go
index 42b5fa220..35ee421af 100644
--- a/weed/admin/view/app/maintenance_queue_templ.go
+++ b/weed/admin/view/app/maintenance_queue_templ.go
@@ -87,102 +87,103 @@ func MaintenanceQueue(data *maintenance.MaintenanceQueueData) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</h4><p class=\"text-muted mb-0\">Failed Today</p></div></div></div></div><!-- Simple task queue display --><div class=\"row\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\">Task Queue</h5></div><div class=\"card-body\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</h4><p class=\"text-muted mb-0\">Failed Today</p></div></div></div></div><!-- Pending Tasks --><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header bg-primary text-white\"><h5 class=\"mb-0\"><i class=\"fas fa-clock me-2\"></i> Pending Tasks</h5></div><div class=\"card-body\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- if len(data.Tasks) == 0 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<div class=\"text-center text-muted py-4\"><i class=\"fas fa-clipboard-list fa-3x mb-3\"></i><p>No maintenance tasks in queue</p><small>Tasks will appear here when the system detects maintenance needs</small></div>")
+ if data.Stats.PendingTasks == 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<div class=\"text-center text-muted py-4\"><i class=\"fas fa-clipboard-list fa-3x mb-3\"></i><p>No pending maintenance tasks</p><small>Pending tasks will appear here when the system detects maintenance needs</small></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "<div class=\"table-responsive\"><table class=\"table table-hover\"><thead><tr><th>ID</th><th>Type</th><th>Status</th><th>Volume</th><th>Server</th><th>Created</th></tr></thead> <tbody>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "<div class=\"table-responsive\"><table class=\"table table-hover\"><thead><tr><th>Type</th><th>Priority</th><th>Volume</th><th>Server</th><th>Reason</th><th>Created</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, task := range data.Tasks {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<tr><td><code>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var6 string
- templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID[:8])
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 103, Col: 70}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "...</code></td><td>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var7 string
- templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 104, Col: 70}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "</td><td>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var8 string
- templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Status))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 105, Col: 72}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "</td><td>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var9 string
- templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 106, Col: 85}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</td><td>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var10 string
- templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(task.Server)
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 107, Col: 64}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "</td><td>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var11 string
- templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(task.CreatedAt.Format("2006-01-02 15:04"))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 108, Col: 94}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "</td></tr>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
+ if string(task.Status) == "pending" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<tr><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var6 string
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 109, Col: 74}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = PriorityBadge(task.Priority).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var7 string
+ templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 112, Col: 89}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "</td><td><small>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var8 string
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(task.Server)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 113, Col: 75}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</small></td><td><small>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var9 string
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(task.Reason)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 114, Col: 75}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "</small></td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var10 string
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(task.CreatedAt.Format("2006-01-02 15:04"))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 115, Col: 98}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "</td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "</tbody></table></div>")
@@ -190,84 +191,374 @@ func MaintenanceQueue(data *maintenance.MaintenanceQueueData) templ.Component {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "</div></div></div></div><!-- Workers Summary --><div class=\"row mt-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\">Active Workers</h5></div><div class=\"card-body\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "</div></div></div></div><!-- Active Tasks --><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header bg-warning text-dark\"><h5 class=\"mb-0\"><i class=\"fas fa-running me-2\"></i> Active Tasks</h5></div><div class=\"card-body\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- if len(data.Workers) == 0 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "<div class=\"text-center text-muted py-4\"><i class=\"fas fa-robot fa-3x mb-3\"></i><p>No workers are currently active</p><small>Start workers using: <code>weed worker -admin=localhost:9333</code></small></div>")
+ if data.Stats.RunningTasks == 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "<div class=\"text-center text-muted py-4\"><i class=\"fas fa-tasks fa-3x mb-3\"></i><p>No active maintenance tasks</p><small>Active tasks will appear here when workers start processing them</small></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "<div class=\"row\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "<div class=\"table-responsive\"><table class=\"table table-hover\"><thead><tr><th>Type</th><th>Status</th><th>Progress</th><th>Volume</th><th>Worker</th><th>Started</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- for _, worker := range data.Workers {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "<div class=\"col-md-4 mb-3\"><div class=\"card\"><div class=\"card-body\"><h6 class=\"card-title\">")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var12 string
- templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(worker.ID)
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 140, Col: 81}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</h6><p class=\"card-text\"><small class=\"text-muted\">")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var13 string
- templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(worker.Address)
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 142, Col: 93}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "</small><br>Status: ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var14 string
- templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(worker.Status)
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 143, Col: 74}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "<br>Load: ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var15 string
- templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", worker.CurrentLoad, worker.MaxConcurrent))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 144, Col: 121}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</p></div></div></div>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
+ for _, task := range data.Tasks {
+ if string(task.Status) == "assigned" || string(task.Status) == "in_progress" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "<tr><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var11 string
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 164, Col: 74}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = StatusBadge(task.Status).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = ProgressBar(task.Progress, task.Status).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 168, Col: 89}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if task.WorkerID != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "<small>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var13 string
+ templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(task.WorkerID)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 171, Col: 81}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</small>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<span class=\"text-muted\">-</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if task.StartedAt != nil {
+ var templ_7745c5c3_Var14 string
+ templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(task.StartedAt.Format("2006-01-02 15:04"))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 178, Col: 102}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<span class=\"text-muted\">-</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "</td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "</tbody></table></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "</div></div></div></div><!-- Completed Tasks --><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header bg-success text-white\"><h5 class=\"mb-0\"><i class=\"fas fa-check-circle me-2\"></i> Completed Tasks</h5></div><div class=\"card-body\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Stats.CompletedToday == 0 && data.Stats.FailedToday == 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<div class=\"text-center text-muted py-4\"><i class=\"fas fa-check-circle fa-3x mb-3\"></i><p>No completed maintenance tasks today</p><small>Completed tasks will appear here after workers finish processing them</small></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "<div class=\"table-responsive\"><table class=\"table table-hover\"><thead><tr><th>Type</th><th>Status</th><th>Volume</th><th>Worker</th><th>Duration</th><th>Completed</th></tr></thead> <tbody>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</div>")
+ for _, task := range data.Tasks {
+ if string(task.Status) == "completed" || string(task.Status) == "failed" || string(task.Status) == "cancelled" {
+ if string(task.Status) == "failed" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<tr class=\"table-danger\"><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var15 string
+ templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 232, Col: 78}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = StatusBadge(task.Status).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var16 string
+ templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 235, Col: 93}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if task.WorkerID != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "<small>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var17 string
+ templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(task.WorkerID)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 238, Col: 85}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "</small>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "<span class=\"text-muted\">-</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if task.StartedAt != nil && task.CompletedAt != nil {
+ var templ_7745c5c3_Var18 string
+ templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(formatDuration(task.CompletedAt.Sub(*task.StartedAt)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 245, Col: 118}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<span class=\"text-muted\">-</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if task.CompletedAt != nil {
+ var templ_7745c5c3_Var19 string
+ templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(task.CompletedAt.Format("2006-01-02 15:04"))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 252, Col: 108}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<span class=\"text-muted\">-</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "</td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<tr><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var20 string
+ templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 262, Col: 78}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = StatusBadge(task.Status).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var21 string
+ templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 265, Col: 93}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if task.WorkerID != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "<small>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var22 string
+ templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(task.WorkerID)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 268, Col: 85}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "</small>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "<span class=\"text-muted\">-</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if task.StartedAt != nil && task.CompletedAt != nil {
+ var templ_7745c5c3_Var23 string
+ templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(formatDuration(task.CompletedAt.Sub(*task.StartedAt)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 275, Col: 118}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "<span class=\"text-muted\">-</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</td><td>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if task.CompletedAt != nil {
+ var templ_7745c5c3_Var24 string
+ templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(task.CompletedAt.Format("2006-01-02 15:04"))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 282, Col: 108}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "<span class=\"text-muted\">-</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</td></tr>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "</tbody></table></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</div></div></div></div></div><script>\n // Auto-refresh every 10 seconds\n setInterval(function() {\n if (!document.hidden) {\n window.location.reload();\n }\n }, 10000);\n\n function triggerScan() {\n fetch('/api/maintenance/scan', {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n }\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n alert('Maintenance scan triggered successfully');\n setTimeout(() => window.location.reload(), 2000);\n } else {\n alert('Failed to trigger scan: ' + (data.error || 'Unknown error'));\n }\n })\n .catch(error => {\n alert('Error: ' + error.message);\n });\n }\n </script>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "</div></div></div></div></div><script>\n // Debug output to browser console\n console.log(\"DEBUG: Maintenance Queue Template loaded\");\n \n // Auto-refresh every 10 seconds\n setInterval(function() {\n if (!document.hidden) {\n window.location.reload();\n }\n }, 10000);\n\n window.triggerScan = function() {\n console.log(\"triggerScan called\");\n fetch('/api/maintenance/scan', {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n }\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n alert('Maintenance scan triggered successfully');\n setTimeout(() => window.location.reload(), 2000);\n } else {\n alert('Failed to trigger scan: ' + (data.error || 'Unknown error'));\n }\n })\n .catch(error => {\n alert('Error: ' + error.message);\n });\n };\n\n window.refreshPage = function() {\n console.log(\"refreshPage called\");\n window.location.reload();\n };\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -292,30 +583,30 @@ func TaskTypeIcon(taskType maintenance.MaintenanceTaskType) templ.Component {
}()
}
ctx = templ.InitializeContext(ctx)
- templ_7745c5c3_Var16 := templ.GetChildren(ctx)
- if templ_7745c5c3_Var16 == nil {
- templ_7745c5c3_Var16 = templ.NopComponent
+ templ_7745c5c3_Var25 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var25 == nil {
+ templ_7745c5c3_Var25 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
- var templ_7745c5c3_Var17 = []any{maintenance.GetTaskIcon(taskType) + " me-1"}
- templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var17...)
+ var templ_7745c5c3_Var26 = []any{maintenance.GetTaskIcon(taskType) + " me-1"}
+ templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var26...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<i class=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "<i class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var18 string
- templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var17).String())
+ var templ_7745c5c3_Var27 string
+ templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var26).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 1, Col: 0}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "\"></i>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -339,34 +630,34 @@ func PriorityBadge(priority maintenance.MaintenanceTaskPriority) templ.Component
}()
}
ctx = templ.InitializeContext(ctx)
- templ_7745c5c3_Var19 := templ.GetChildren(ctx)
- if templ_7745c5c3_Var19 == nil {
- templ_7745c5c3_Var19 = templ.NopComponent
+ templ_7745c5c3_Var28 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var28 == nil {
+ templ_7745c5c3_Var28 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
switch priority {
case maintenance.PriorityCritical:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<span class=\"badge bg-danger\">Critical</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "<span class=\"badge bg-danger\">Critical</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.PriorityHigh:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "<span class=\"badge bg-warning\">High</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "<span class=\"badge bg-warning\">High</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.PriorityNormal:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "<span class=\"badge bg-primary\">Normal</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "<span class=\"badge bg-primary\">Normal</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.PriorityLow:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<span class=\"badge bg-secondary\">Low</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "<span class=\"badge bg-secondary\">Low</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
default:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<span class=\"badge bg-light text-dark\">Unknown</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "<span class=\"badge bg-light text-dark\">Unknown</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -391,44 +682,44 @@ func StatusBadge(status maintenance.MaintenanceTaskStatus) templ.Component {
}()
}
ctx = templ.InitializeContext(ctx)
- templ_7745c5c3_Var20 := templ.GetChildren(ctx)
- if templ_7745c5c3_Var20 == nil {
- templ_7745c5c3_Var20 = templ.NopComponent
+ templ_7745c5c3_Var29 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var29 == nil {
+ templ_7745c5c3_Var29 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
switch status {
case maintenance.TaskStatusPending:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "<span class=\"badge bg-secondary\">Pending</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "<span class=\"badge bg-secondary\">Pending</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.TaskStatusAssigned:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<span class=\"badge bg-info\">Assigned</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "<span class=\"badge bg-info\">Assigned</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.TaskStatusInProgress:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "<span class=\"badge bg-warning\">Running</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "<span class=\"badge bg-warning\">Running</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.TaskStatusCompleted:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<span class=\"badge bg-success\">Completed</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "<span class=\"badge bg-success\">Completed</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.TaskStatusFailed:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<span class=\"badge bg-danger\">Failed</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "<span class=\"badge bg-danger\">Failed</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.TaskStatusCancelled:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "<span class=\"badge bg-light text-dark\">Cancelled</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "<span class=\"badge bg-light text-dark\">Cancelled</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
default:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "<span class=\"badge bg-light text-dark\">Unknown</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "<span class=\"badge bg-light text-dark\">Unknown</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -453,96 +744,49 @@ func ProgressBar(progress float64, status maintenance.MaintenanceTaskStatus) tem
}()
}
ctx = templ.InitializeContext(ctx)
- templ_7745c5c3_Var21 := templ.GetChildren(ctx)
- if templ_7745c5c3_Var21 == nil {
- templ_7745c5c3_Var21 = templ.NopComponent
+ templ_7745c5c3_Var30 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var30 == nil {
+ templ_7745c5c3_Var30 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
if status == maintenance.TaskStatusInProgress || status == maintenance.TaskStatusAssigned {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "<div class=\"progress\" style=\"height: 8px; min-width: 100px;\"><div class=\"progress-bar\" role=\"progressbar\" style=\"")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "<div class=\"progress\" style=\"height: 8px; min-width: 100px;\"><div class=\"progress-bar\" role=\"progressbar\" style=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var22 string
- templ_7745c5c3_Var22, templ_7745c5c3_Err = templruntime.SanitizeStyleAttributeValues(fmt.Sprintf("width: %.1f%%", progress))
+ var templ_7745c5c3_Var31 string
+ templ_7745c5c3_Var31, templ_7745c5c3_Err = templruntime.SanitizeStyleAttributeValues(fmt.Sprintf("width: %.1f%%", progress))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 231, Col: 102}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 383, Col: 102}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "\"></div></div><small class=\"text-muted\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "\"></div></div><small class=\"text-muted\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var23 string
- templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f%%", progress))
+ var templ_7745c5c3_Var32 string
+ templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f%%", progress))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 234, Col: 66}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 386, Col: 66}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "</small>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "</small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if status == maintenance.TaskStatusCompleted {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "<div class=\"progress\" style=\"height: 8px; min-width: 100px;\"><div class=\"progress-bar bg-success\" role=\"progressbar\" style=\"width: 100%\"></div></div><small class=\"text-success\">100%</small>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "<div class=\"progress\" style=\"height: 8px; min-width: 100px;\"><div class=\"progress-bar bg-success\" role=\"progressbar\" style=\"width: 100%\"></div></div><small class=\"text-success\">100%</small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<span class=\"text-muted\">-</span>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- }
- return nil
- })
-}
-
-func WorkerStatusBadge(status string) templ.Component {
- return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
- templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
- if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
- return templ_7745c5c3_CtxErr
- }
- templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
- if !templ_7745c5c3_IsBuffer {
- defer func() {
- templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
- if templ_7745c5c3_Err == nil {
- templ_7745c5c3_Err = templ_7745c5c3_BufErr
- }
- }()
- }
- ctx = templ.InitializeContext(ctx)
- templ_7745c5c3_Var24 := templ.GetChildren(ctx)
- if templ_7745c5c3_Var24 == nil {
- templ_7745c5c3_Var24 = templ.NopComponent
- }
- ctx = templ.ClearChildren(ctx)
- switch status {
- case "active":
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "<span class=\"badge bg-success\">Active</span>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- case "busy":
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<span class=\"badge bg-warning\">Busy</span>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- case "inactive":
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "<span class=\"badge bg-secondary\">Inactive</span>")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- default:
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "<span class=\"badge bg-light text-dark\">Unknown</span>")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -551,18 +795,13 @@ func WorkerStatusBadge(status string) templ.Component {
})
}
-// Helper functions (would be defined in Go)
-
-func getWorkerStatusColor(status string) string {
- switch status {
- case "active":
- return "success"
- case "busy":
- return "warning"
- case "inactive":
- return "secondary"
- default:
- return "light"
+func formatDuration(d time.Duration) string {
+ if d < time.Minute {
+ return fmt.Sprintf("%.0fs", d.Seconds())
+ } else if d < time.Hour {
+ return fmt.Sprintf("%.1fm", d.Minutes())
+ } else {
+ return fmt.Sprintf("%.1fh", d.Hours())
}
}
diff --git a/weed/admin/view/app/task_config_schema.templ b/weed/admin/view/app/task_config_schema.templ
new file mode 100644
index 000000000..174a8f580
--- /dev/null
+++ b/weed/admin/view/app/task_config_schema.templ
@@ -0,0 +1,486 @@
+package app
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+ "github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks"
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/admin/view/components"
+)
+
+// Helper function to convert task schema to JSON string
+func taskSchemaToJSON(schema *tasks.TaskConfigSchema) string {
+ if schema == nil {
+ return "{}"
+ }
+
+ data := map[string]interface{}{
+ "fields": schema.Fields,
+ }
+
+ jsonBytes, err := json.Marshal(data)
+ if err != nil {
+ return "{}"
+ }
+
+ return string(jsonBytes)
+}
+
+// Helper function to base64 encode the JSON to avoid HTML escaping issues
+func taskSchemaToBase64JSON(schema *tasks.TaskConfigSchema) string {
+ jsonStr := taskSchemaToJSON(schema)
+ return base64.StdEncoding.EncodeToString([]byte(jsonStr))
+}
+
+templ TaskConfigSchema(data *maintenance.TaskConfigData, schema *tasks.TaskConfigSchema, config interface{}) {
+ <div class="container-fluid">
+ <div class="row mb-4">
+ <div class="col-12">
+ <div class="d-flex justify-content-between align-items-center">
+ <h2 class="mb-0">
+ <i class={schema.Icon + " me-2"}></i>
+ {schema.DisplayName} Configuration
+ </h2>
+ <div class="btn-group">
+ <a href="/maintenance/config" class="btn btn-outline-secondary">
+ <i class="fas fa-arrow-left me-1"></i>
+ Back to System Config
+ </a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <!-- Configuration Card -->
+ <div class="row">
+ <div class="col-12">
+ <div class="card">
+ <div class="card-header">
+ <h5 class="mb-0">
+ <i class="fas fa-cogs me-2"></i>
+ Task Configuration
+ </h5>
+ <p class="mb-0 text-muted small">{schema.Description}</p>
+ </div>
+ <div class="card-body">
+ <form id="taskConfigForm" method="POST">
+ <!-- Dynamically render all schema fields in defined order -->
+ for _, field := range schema.Fields {
+ @TaskConfigField(field, config)
+ }
+
+ <div class="d-flex gap-2">
+ <button type="submit" class="btn btn-primary">
+ <i class="fas fa-save me-1"></i>
+ Save Configuration
+ </button>
+ <button type="button" class="btn btn-secondary" onclick="resetToDefaults()">
+ <i class="fas fa-undo me-1"></i>
+ Reset to Defaults
+ </button>
+ </div>
+ </form>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <!-- Performance Notes Card -->
+ <div class="row mt-4">
+ <div class="col-12">
+ <div class="card">
+ <div class="card-header">
+ <h5 class="mb-0">
+ <i class="fas fa-info-circle me-2"></i>
+ Important Notes
+ </h5>
+ </div>
+ <div class="card-body">
+ <div class="alert alert-info" role="alert">
+ if schema.TaskName == "vacuum" {
+ <h6 class="alert-heading">Vacuum Operations:</h6>
+ <p class="mb-2"><strong>Performance:</strong> Vacuum operations are I/O intensive and may impact cluster performance.</p>
+ <p class="mb-2"><strong>Safety:</strong> Only volumes meeting age and garbage thresholds will be processed.</p>
+ <p class="mb-0"><strong>Recommendation:</strong> Monitor cluster load and adjust concurrent limits accordingly.</p>
+ } else if schema.TaskName == "balance" {
+ <h6 class="alert-heading">Balance Operations:</h6>
+ <p class="mb-2"><strong>Performance:</strong> Volume balancing involves data movement and can impact cluster performance.</p>
+ <p class="mb-2"><strong>Safety:</strong> Requires adequate server count to ensure data safety during moves.</p>
+ <p class="mb-0"><strong>Recommendation:</strong> Run during off-peak hours to minimize impact on production workloads.</p>
+ } else if schema.TaskName == "erasure_coding" {
+ <h6 class="alert-heading">Erasure Coding Operations:</h6>
+ <p class="mb-2"><strong>Performance:</strong> Erasure coding is CPU and I/O intensive. Consider running during off-peak hours.</p>
+ <p class="mb-2"><strong>Durability:</strong> With 10+4 configuration, can tolerate up to 4 shard failures.</p>
+ <p class="mb-0"><strong>Configuration:</strong> Fullness ratio should be between 0.5 and 1.0 (e.g., 0.90 for 90%).</p>
+ }
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <script>
+ function resetToDefaults() {
+ if (confirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.')) {
+ // Reset form fields to their default values
+ const form = document.getElementById('taskConfigForm');
+ const schemaFields = window.taskConfigSchema ? window.taskConfigSchema.fields : {};
+
+ Object.keys(schemaFields).forEach(fieldName => {
+ const field = schemaFields[fieldName];
+ const element = document.getElementById(fieldName);
+
+ if (element && field.default_value !== undefined) {
+ if (field.input_type === 'checkbox') {
+ element.checked = field.default_value;
+ } else if (field.input_type === 'interval') {
+ // Handle interval fields with value and unit
+ const valueElement = document.getElementById(fieldName + '_value');
+ const unitElement = document.getElementById(fieldName + '_unit');
+ if (valueElement && unitElement && field.default_value) {
+ const defaultSeconds = field.default_value;
+ const { value, unit } = convertSecondsToTaskIntervalValueUnit(defaultSeconds);
+ valueElement.value = value;
+ unitElement.value = unit;
+ }
+ } else {
+ element.value = field.default_value;
+ }
+ }
+ });
+ }
+ }
+
+ function convertSecondsToTaskIntervalValueUnit(totalSeconds) {
+ if (totalSeconds === 0) {
+ return { value: 0, unit: 'minutes' };
+ }
+
+ // Check if it's evenly divisible by days
+ if (totalSeconds % (24 * 3600) === 0) {
+ return { value: totalSeconds / (24 * 3600), unit: 'days' };
+ }
+
+ // Check if it's evenly divisible by hours
+ if (totalSeconds % 3600 === 0) {
+ return { value: totalSeconds / 3600, unit: 'hours' };
+ }
+
+ // Default to minutes
+ return { value: totalSeconds / 60, unit: 'minutes' };
+ }
+
+ // Store schema data for JavaScript access (moved to after div is created)
+ </script>
+
+ <!-- Hidden element to store schema data -->
+ <div data-task-schema={ taskSchemaToBase64JSON(schema) } style="display: none;"></div>
+
+ <script>
+ // Load schema data now that the div exists
+ const base64Data = document.querySelector('[data-task-schema]').getAttribute('data-task-schema');
+ const jsonStr = atob(base64Data);
+ window.taskConfigSchema = JSON.parse(jsonStr);
+ </script>
+}
+
+// TaskConfigField renders a single task configuration field based on schema with typed field lookup
+templ TaskConfigField(field *config.Field, config interface{}) {
+ if field.InputType == "interval" {
+ <!-- Interval field with number input + unit dropdown -->
+ <div class="mb-3">
+ <label for={ field.JSONName } class="form-label">
+ { field.DisplayName }
+ if field.Required {
+ <span class="text-danger">*</span>
+ }
+ </label>
+ <div class="input-group">
+ <input
+ type="number"
+ class="form-control"
+ id={ field.JSONName + "_value" }
+ name={ field.JSONName + "_value" }
+ value={ fmt.Sprintf("%.0f", components.ConvertInt32SecondsToDisplayValue(getTaskConfigInt32Field(config, field.JSONName))) }
+ step="1"
+ min="1"
+ if field.Required {
+ required
+ }
+ />
+ <select
+ class="form-select"
+ id={ field.JSONName + "_unit" }
+ name={ field.JSONName + "_unit" }
+ style="max-width: 120px;"
+ if field.Required {
+ required
+ }
+ >
+ <option
+ value="minutes"
+ if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "minutes" {
+ selected
+ }
+ >
+ Minutes
+ </option>
+ <option
+ value="hours"
+ if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "hours" {
+ selected
+ }
+ >
+ Hours
+ </option>
+ <option
+ value="days"
+ if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "days" {
+ selected
+ }
+ >
+ Days
+ </option>
+ </select>
+ </div>
+ if field.Description != "" {
+ <div class="form-text text-muted">{ field.Description }</div>
+ }
+ </div>
+ } else if field.InputType == "checkbox" {
+ <!-- Checkbox field -->
+ <div class="mb-3">
+ <div class="form-check form-switch">
+ <input
+ class="form-check-input"
+ type="checkbox"
+ id={ field.JSONName }
+ name={ field.JSONName }
+ value="on"
+ if getTaskConfigBoolField(config, field.JSONName) {
+ checked
+ }
+ />
+ <label class="form-check-label" for={ field.JSONName }>
+ <strong>{ field.DisplayName }</strong>
+ </label>
+ </div>
+ if field.Description != "" {
+ <div class="form-text text-muted">{ field.Description }</div>
+ }
+ </div>
+ } else if field.InputType == "text" {
+ <!-- Text field -->
+ <div class="mb-3">
+ <label for={ field.JSONName } class="form-label">
+ { field.DisplayName }
+ if field.Required {
+ <span class="text-danger">*</span>
+ }
+ </label>
+ <input
+ type="text"
+ class="form-control"
+ id={ field.JSONName }
+ name={ field.JSONName }
+ value={ getTaskConfigStringField(config, field.JSONName) }
+ placeholder={ field.Placeholder }
+ if field.Required {
+ required
+ }
+ />
+ if field.Description != "" {
+ <div class="form-text text-muted">{ field.Description }</div>
+ }
+ </div>
+ } else {
+ <!-- Number field -->
+ <div class="mb-3">
+ <label for={ field.JSONName } class="form-label">
+ { field.DisplayName }
+ if field.Required {
+ <span class="text-danger">*</span>
+ }
+ </label>
+ <input
+ type="number"
+ class="form-control"
+ id={ field.JSONName }
+ name={ field.JSONName }
+ value={ fmt.Sprintf("%.6g", getTaskConfigFloatField(config, field.JSONName)) }
+ placeholder={ field.Placeholder }
+ if field.MinValue != nil {
+ min={ fmt.Sprintf("%v", field.MinValue) }
+ }
+ if field.MaxValue != nil {
+ max={ fmt.Sprintf("%v", field.MaxValue) }
+ }
+ step={ getTaskNumberStep(field) }
+ if field.Required {
+ required
+ }
+ />
+ if field.Description != "" {
+ <div class="form-text text-muted">{ field.Description }</div>
+ }
+ </div>
+ }
+}
+
+// Typed field getters for task configs - avoiding interface{} where possible
+func getTaskConfigBoolField(config interface{}, fieldName string) bool {
+ switch fieldName {
+ case "enabled":
+ // Use reflection only for the common 'enabled' field in BaseConfig
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ if boolVal, ok := value.(bool); ok {
+ return boolVal
+ }
+ }
+ return false
+ default:
+ // For other boolean fields, use reflection
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ if boolVal, ok := value.(bool); ok {
+ return boolVal
+ }
+ }
+ return false
+ }
+}
+
+func getTaskConfigInt32Field(config interface{}, fieldName string) int32 {
+ switch fieldName {
+ case "scan_interval_seconds", "max_concurrent":
+ // Common fields that should be int/int32
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ switch v := value.(type) {
+ case int32:
+ return v
+ case int:
+ return int32(v)
+ case int64:
+ return int32(v)
+ }
+ }
+ return 0
+ default:
+ // For other int fields, use reflection
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ switch v := value.(type) {
+ case int32:
+ return v
+ case int:
+ return int32(v)
+ case int64:
+ return int32(v)
+ case float64:
+ return int32(v)
+ }
+ }
+ return 0
+ }
+}
+
+func getTaskConfigFloatField(config interface{}, fieldName string) float64 {
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ switch v := value.(type) {
+ case float64:
+ return v
+ case float32:
+ return float64(v)
+ case int:
+ return float64(v)
+ case int32:
+ return float64(v)
+ case int64:
+ return float64(v)
+ }
+ }
+ return 0.0
+}
+
+func getTaskConfigStringField(config interface{}, fieldName string) string {
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ if strVal, ok := value.(string); ok {
+ return strVal
+ }
+ // Convert numbers to strings for form display
+ switch v := value.(type) {
+ case int:
+ return fmt.Sprintf("%d", v)
+ case int32:
+ return fmt.Sprintf("%d", v)
+ case int64:
+ return fmt.Sprintf("%d", v)
+ case float64:
+ return fmt.Sprintf("%.6g", v)
+ case float32:
+ return fmt.Sprintf("%.6g", v)
+ }
+ }
+ return ""
+}
+
+func getTaskNumberStep(field *config.Field) string {
+ if field.Type == config.FieldTypeFloat {
+ return "0.01"
+ }
+ return "1"
+}
+
+func getTaskFieldValue(config interface{}, fieldName string) interface{} {
+ if config == nil {
+ return nil
+ }
+
+ // Use reflection to get the field value from the config struct
+ configValue := reflect.ValueOf(config)
+ if configValue.Kind() == reflect.Ptr {
+ configValue = configValue.Elem()
+ }
+
+ if configValue.Kind() != reflect.Struct {
+ return nil
+ }
+
+ configType := configValue.Type()
+
+ for i := 0; i < configValue.NumField(); i++ {
+ field := configValue.Field(i)
+ fieldType := configType.Field(i)
+
+ // Handle embedded structs recursively (before JSON tag check)
+ if field.Kind() == reflect.Struct && fieldType.Anonymous {
+ if value := getTaskFieldValue(field.Interface(), fieldName); value != nil {
+ return value
+ }
+ continue
+ }
+
+ // Get JSON tag name
+ jsonTag := fieldType.Tag.Get("json")
+ if jsonTag == "" {
+ continue
+ }
+
+ // Remove options like ",omitempty"
+ if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 {
+ jsonTag = jsonTag[:commaIdx]
+ }
+
+ // Check if this is the field we're looking for
+ if jsonTag == fieldName {
+ return field.Interface()
+ }
+ }
+
+ return nil
+}
+
+ \ No newline at end of file
diff --git a/weed/admin/view/app/task_config_schema_templ.go b/weed/admin/view/app/task_config_schema_templ.go
new file mode 100644
index 000000000..eae4683d9
--- /dev/null
+++ b/weed/admin/view/app/task_config_schema_templ.go
@@ -0,0 +1,921 @@
+// Code generated by templ - DO NOT EDIT.
+
+// templ: version: v0.3.906
+package app
+
+//lint:file-ignore SA4006 This context is only used if a nested component is present.
+
+import "github.com/a-h/templ"
+import templruntime "github.com/a-h/templ/runtime"
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
+ "github.com/seaweedfs/seaweedfs/weed/admin/view/components"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks"
+ "reflect"
+ "strings"
+)
+
+// Helper function to convert task schema to JSON string
+func taskSchemaToJSON(schema *tasks.TaskConfigSchema) string {
+ if schema == nil {
+ return "{}"
+ }
+
+ data := map[string]interface{}{
+ "fields": schema.Fields,
+ }
+
+ jsonBytes, err := json.Marshal(data)
+ if err != nil {
+ return "{}"
+ }
+
+ return string(jsonBytes)
+}
+
+// Helper function to base64 encode the JSON to avoid HTML escaping issues
+func taskSchemaToBase64JSON(schema *tasks.TaskConfigSchema) string {
+ jsonStr := taskSchemaToJSON(schema)
+ return base64.StdEncoding.EncodeToString([]byte(jsonStr))
+}
+
+func TaskConfigSchema(data *maintenance.TaskConfigData, schema *tasks.TaskConfigSchema, config interface{}) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var1 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var1 == nil {
+ templ_7745c5c3_Var1 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"container-fluid\"><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"d-flex justify-content-between align-items-center\"><h2 class=\"mb-0\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var2 = []any{schema.Icon + " me-2"}
+ templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var2...)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<i class=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var3 string
+ templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var2).String())
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 1, Col: 0}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "\"></i> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var4 string
+ templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(schema.DisplayName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 46, Col: 43}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, " Configuration</h2><div class=\"btn-group\"><a href=\"/maintenance/config\" class=\"btn btn-outline-secondary\"><i class=\"fas fa-arrow-left me-1\"></i> Back to System Config</a></div></div></div></div><!-- Configuration Card --><div class=\"row\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-cogs me-2\"></i> Task Configuration</h5><p class=\"mb-0 text-muted small\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var5 string
+ templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(schema.Description)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 67, Col: 76}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</p></div><div class=\"card-body\"><form id=\"taskConfigForm\" method=\"POST\"><!-- Dynamically render all schema fields in defined order -->")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for _, field := range schema.Fields {
+ templ_7745c5c3_Err = TaskConfigField(field, config).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<div class=\"d-flex gap-2\"><button type=\"submit\" class=\"btn btn-primary\"><i class=\"fas fa-save me-1\"></i> Save Configuration</button> <button type=\"button\" class=\"btn btn-secondary\" onclick=\"resetToDefaults()\"><i class=\"fas fa-undo me-1\"></i> Reset to Defaults</button></div></form></div></div></div></div><!-- Performance Notes Card --><div class=\"row mt-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-info-circle me-2\"></i> Important Notes</h5></div><div class=\"card-body\"><div class=\"alert alert-info\" role=\"alert\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if schema.TaskName == "vacuum" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "<h6 class=\"alert-heading\">Vacuum Operations:</h6><p class=\"mb-2\"><strong>Performance:</strong> Vacuum operations are I/O intensive and may impact cluster performance.</p><p class=\"mb-2\"><strong>Safety:</strong> Only volumes meeting age and garbage thresholds will be processed.</p><p class=\"mb-0\"><strong>Recommendation:</strong> Monitor cluster load and adjust concurrent limits accordingly.</p>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if schema.TaskName == "balance" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<h6 class=\"alert-heading\">Balance Operations:</h6><p class=\"mb-2\"><strong>Performance:</strong> Volume balancing involves data movement and can impact cluster performance.</p><p class=\"mb-2\"><strong>Safety:</strong> Requires adequate server count to ensure data safety during moves.</p><p class=\"mb-0\"><strong>Recommendation:</strong> Run during off-peak hours to minimize impact on production workloads.</p>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if schema.TaskName == "erasure_coding" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "<h6 class=\"alert-heading\">Erasure Coding Operations:</h6><p class=\"mb-2\"><strong>Performance:</strong> Erasure coding is CPU and I/O intensive. Consider running during off-peak hours.</p><p class=\"mb-2\"><strong>Durability:</strong> With 10+4 configuration, can tolerate up to 4 shard failures.</p><p class=\"mb-0\"><strong>Configuration:</strong> Fullness ratio should be between 0.5 and 1.0 (e.g., 0.90 for 90%).</p>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "</div></div></div></div></div></div><script>\n function resetToDefaults() {\n if (confirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.')) {\n // Reset form fields to their default values\n const form = document.getElementById('taskConfigForm');\n const schemaFields = window.taskConfigSchema ? window.taskConfigSchema.fields : {};\n \n Object.keys(schemaFields).forEach(fieldName => {\n const field = schemaFields[fieldName];\n const element = document.getElementById(fieldName);\n \n if (element && field.default_value !== undefined) {\n if (field.input_type === 'checkbox') {\n element.checked = field.default_value;\n } else if (field.input_type === 'interval') {\n // Handle interval fields with value and unit\n const valueElement = document.getElementById(fieldName + '_value');\n const unitElement = document.getElementById(fieldName + '_unit');\n if (valueElement && unitElement && field.default_value) {\n const defaultSeconds = field.default_value;\n const { value, unit } = convertSecondsToTaskIntervalValueUnit(defaultSeconds);\n valueElement.value = value;\n unitElement.value = unit;\n }\n } else {\n element.value = field.default_value;\n }\n }\n });\n }\n }\n\n function convertSecondsToTaskIntervalValueUnit(totalSeconds) {\n if (totalSeconds === 0) {\n return { value: 0, unit: 'minutes' };\n }\n\n // Check if it's evenly divisible by days\n if (totalSeconds % (24 * 3600) === 0) {\n return { value: totalSeconds / (24 * 3600), unit: 'days' };\n }\n\n // Check if it's evenly divisible by hours\n if (totalSeconds % 3600 === 0) {\n return { value: totalSeconds / 3600, unit: 'hours' };\n }\n\n // Default to minutes\n return { value: totalSeconds / 60, unit: 'minutes' };\n }\n\n // Store schema data for JavaScript access (moved to after div is created)\n </script><!-- Hidden element to store schema data --><div data-task-schema=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var6 string
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(taskSchemaToBase64JSON(schema))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 182, Col: 58}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "\" style=\"display: none;\"></div><script>\n // Load schema data now that the div exists\n const base64Data = document.querySelector('[data-task-schema]').getAttribute('data-task-schema');\n const jsonStr = atob(base64Data);\n window.taskConfigSchema = JSON.parse(jsonStr);\n </script>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
+// TaskConfigField renders a single task configuration field based on schema with typed field lookup
+func TaskConfigField(field *config.Field, config interface{}) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var7 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var7 == nil {
+ templ_7745c5c3_Var7 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ if field.InputType == "interval" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "<!-- Interval field with number input + unit dropdown --> <div class=\"mb-3\"><label for=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var8 string
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 197, Col: 39}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "\" class=\"form-label\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var9 string
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 198, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "<span class=\"text-danger\">*</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "</label><div class=\"input-group\"><input type=\"number\" class=\"form-control\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var10 string
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_value")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 207, Col: 50}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var11 string
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_value")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 208, Col: 52}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "\" value=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", components.ConvertInt32SecondsToDisplayValue(getTaskConfigInt32Field(config, field.JSONName))))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 209, Col: 142}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "\" step=\"1\" min=\"1\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, " required")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "> <select class=\"form-select\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var13 string
+ templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_unit")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 218, Col: 49}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var14 string
+ templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_unit")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 219, Col: 51}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "\" style=\"max-width: 120px;\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, " required")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "><option value=\"minutes\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "minutes" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, ">Minutes</option> <option value=\"hours\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "hours" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, ">Hours</option> <option value=\"days\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "days" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, ">Days</option></select></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Description != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<div class=\"form-text text-muted\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var15 string
+ templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 252, Col: 69}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if field.InputType == "checkbox" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "<!-- Checkbox field --> <div class=\"mb-3\"><div class=\"form-check form-switch\"><input class=\"form-check-input\" type=\"checkbox\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var16 string
+ templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 262, Col: 39}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var17 string
+ templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 263, Col: 41}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "\" value=\"on\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if getTaskConfigBoolField(config, field.JSONName) {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, " checked")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "> <label class=\"form-check-label\" for=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var18 string
+ templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 269, Col: 68}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "\"><strong>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var19 string
+ templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 270, Col: 47}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "</strong></label></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Description != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<div class=\"form-text text-muted\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var20 string
+ templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 274, Col: 69}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else if field.InputType == "text" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "<!-- Text field --> <div class=\"mb-3\"><label for=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var21 string
+ templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 280, Col: 39}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "\" class=\"form-label\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var22 string
+ templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 281, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "<span class=\"text-danger\">*</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "</label> <input type=\"text\" class=\"form-control\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var23 string
+ templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 289, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var24 string
+ templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 290, Col: 37}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "\" value=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var25 string
+ templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(getTaskConfigStringField(config, field.JSONName))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 291, Col: 72}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "\" placeholder=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var26 string
+ templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(field.Placeholder)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 292, Col: 47}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, " required")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Description != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "<div class=\"form-text text-muted\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var27 string
+ templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 298, Col: 69}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "<!-- Number field --> <div class=\"mb-3\"><label for=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var28 string
+ templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 304, Col: 39}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "\" class=\"form-label\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var29 string
+ templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 305, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "<span class=\"text-danger\">*</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "</label> <input type=\"number\" class=\"form-control\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var30 string
+ templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 313, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var31 string
+ templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 314, Col: 37}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "\" value=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var32 string
+ templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.6g", getTaskConfigFloatField(config, field.JSONName)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 315, Col: 92}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "\" placeholder=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var33 string
+ templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(field.Placeholder)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 316, Col: 47}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.MinValue != nil {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, " min=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var34 string
+ templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%v", field.MinValue))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 318, Col: 59}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ if field.MaxValue != nil {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, " max=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var35 string
+ templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%v", field.MaxValue))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 321, Col: 59}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, " step=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var36 string
+ templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(getTaskNumberStep(field))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 323, Col: 47}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, " required")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "> ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if field.Description != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "<div class=\"form-text text-muted\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var37 string
+ templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 329, Col: 69}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ return nil
+ })
+}
+
+// Typed field getters for task configs - avoiding interface{} where possible
+func getTaskConfigBoolField(config interface{}, fieldName string) bool {
+ switch fieldName {
+ case "enabled":
+ // Use reflection only for the common 'enabled' field in BaseConfig
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ if boolVal, ok := value.(bool); ok {
+ return boolVal
+ }
+ }
+ return false
+ default:
+ // For other boolean fields, use reflection
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ if boolVal, ok := value.(bool); ok {
+ return boolVal
+ }
+ }
+ return false
+ }
+}
+
+func getTaskConfigInt32Field(config interface{}, fieldName string) int32 {
+ switch fieldName {
+ case "scan_interval_seconds", "max_concurrent":
+ // Common fields that should be int/int32
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ switch v := value.(type) {
+ case int32:
+ return v
+ case int:
+ return int32(v)
+ case int64:
+ return int32(v)
+ }
+ }
+ return 0
+ default:
+ // For other int fields, use reflection
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ switch v := value.(type) {
+ case int32:
+ return v
+ case int:
+ return int32(v)
+ case int64:
+ return int32(v)
+ case float64:
+ return int32(v)
+ }
+ }
+ return 0
+ }
+}
+
+func getTaskConfigFloatField(config interface{}, fieldName string) float64 {
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ switch v := value.(type) {
+ case float64:
+ return v
+ case float32:
+ return float64(v)
+ case int:
+ return float64(v)
+ case int32:
+ return float64(v)
+ case int64:
+ return float64(v)
+ }
+ }
+ return 0.0
+}
+
+func getTaskConfigStringField(config interface{}, fieldName string) string {
+ if value := getTaskFieldValue(config, fieldName); value != nil {
+ if strVal, ok := value.(string); ok {
+ return strVal
+ }
+ // Convert numbers to strings for form display
+ switch v := value.(type) {
+ case int:
+ return fmt.Sprintf("%d", v)
+ case int32:
+ return fmt.Sprintf("%d", v)
+ case int64:
+ return fmt.Sprintf("%d", v)
+ case float64:
+ return fmt.Sprintf("%.6g", v)
+ case float32:
+ return fmt.Sprintf("%.6g", v)
+ }
+ }
+ return ""
+}
+
+func getTaskNumberStep(field *config.Field) string {
+ if field.Type == config.FieldTypeFloat {
+ return "0.01"
+ }
+ return "1"
+}
+
+func getTaskFieldValue(config interface{}, fieldName string) interface{} {
+ if config == nil {
+ return nil
+ }
+
+ // Use reflection to get the field value from the config struct
+ configValue := reflect.ValueOf(config)
+ if configValue.Kind() == reflect.Ptr {
+ configValue = configValue.Elem()
+ }
+
+ if configValue.Kind() != reflect.Struct {
+ return nil
+ }
+
+ configType := configValue.Type()
+
+ for i := 0; i < configValue.NumField(); i++ {
+ field := configValue.Field(i)
+ fieldType := configType.Field(i)
+
+ // Handle embedded structs recursively (before JSON tag check)
+ if field.Kind() == reflect.Struct && fieldType.Anonymous {
+ if value := getTaskFieldValue(field.Interface(), fieldName); value != nil {
+ return value
+ }
+ continue
+ }
+
+ // Get JSON tag name
+ jsonTag := fieldType.Tag.Get("json")
+ if jsonTag == "" {
+ continue
+ }
+
+ // Remove options like ",omitempty"
+ if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 {
+ jsonTag = jsonTag[:commaIdx]
+ }
+
+ // Check if this is the field we're looking for
+ if jsonTag == fieldName {
+ return field.Interface()
+ }
+ }
+
+ return nil
+}
+
+var _ = templruntime.GeneratedTemplate
diff --git a/weed/admin/view/app/task_config_schema_test.go b/weed/admin/view/app/task_config_schema_test.go
new file mode 100644
index 000000000..a4e2a8bc4
--- /dev/null
+++ b/weed/admin/view/app/task_config_schema_test.go
@@ -0,0 +1,232 @@
+package app
+
+import (
+ "testing"
+)
+
+// Test structs that mirror the actual configuration structure
+type TestBaseConfigForTemplate struct {
+ Enabled bool `json:"enabled"`
+ ScanIntervalSeconds int `json:"scan_interval_seconds"`
+ MaxConcurrent int `json:"max_concurrent"`
+}
+
+type TestTaskConfigForTemplate struct {
+ TestBaseConfigForTemplate
+ TaskSpecificField float64 `json:"task_specific_field"`
+ AnotherSpecificField string `json:"another_specific_field"`
+}
+
+func TestGetTaskFieldValue_EmbeddedStructFields(t *testing.T) {
+ config := &TestTaskConfigForTemplate{
+ TestBaseConfigForTemplate: TestBaseConfigForTemplate{
+ Enabled: true,
+ ScanIntervalSeconds: 2400,
+ MaxConcurrent: 5,
+ },
+ TaskSpecificField: 0.18,
+ AnotherSpecificField: "test_value",
+ }
+
+ // Test embedded struct fields
+ tests := []struct {
+ fieldName string
+ expectedValue interface{}
+ description string
+ }{
+ {"enabled", true, "BaseConfig boolean field"},
+ {"scan_interval_seconds", 2400, "BaseConfig integer field"},
+ {"max_concurrent", 5, "BaseConfig integer field"},
+ {"task_specific_field", 0.18, "Task-specific float field"},
+ {"another_specific_field", "test_value", "Task-specific string field"},
+ }
+
+ for _, test := range tests {
+ t.Run(test.description, func(t *testing.T) {
+ result := getTaskFieldValue(config, test.fieldName)
+
+ if result != test.expectedValue {
+ t.Errorf("Field %s: expected %v (%T), got %v (%T)",
+ test.fieldName, test.expectedValue, test.expectedValue, result, result)
+ }
+ })
+ }
+}
+
+func TestGetTaskFieldValue_NonExistentField(t *testing.T) {
+ config := &TestTaskConfigForTemplate{
+ TestBaseConfigForTemplate: TestBaseConfigForTemplate{
+ Enabled: true,
+ ScanIntervalSeconds: 1800,
+ MaxConcurrent: 3,
+ },
+ }
+
+ result := getTaskFieldValue(config, "non_existent_field")
+
+ if result != nil {
+ t.Errorf("Expected nil for non-existent field, got %v", result)
+ }
+}
+
+func TestGetTaskFieldValue_NilConfig(t *testing.T) {
+ var config *TestTaskConfigForTemplate = nil
+
+ result := getTaskFieldValue(config, "enabled")
+
+ if result != nil {
+ t.Errorf("Expected nil for nil config, got %v", result)
+ }
+}
+
+func TestGetTaskFieldValue_EmptyStruct(t *testing.T) {
+ config := &TestTaskConfigForTemplate{}
+
+ // Test that we can extract zero values
+ tests := []struct {
+ fieldName string
+ expectedValue interface{}
+ description string
+ }{
+ {"enabled", false, "Zero value boolean"},
+ {"scan_interval_seconds", 0, "Zero value integer"},
+ {"max_concurrent", 0, "Zero value integer"},
+ {"task_specific_field", 0.0, "Zero value float"},
+ {"another_specific_field", "", "Zero value string"},
+ }
+
+ for _, test := range tests {
+ t.Run(test.description, func(t *testing.T) {
+ result := getTaskFieldValue(config, test.fieldName)
+
+ if result != test.expectedValue {
+ t.Errorf("Field %s: expected %v (%T), got %v (%T)",
+ test.fieldName, test.expectedValue, test.expectedValue, result, result)
+ }
+ })
+ }
+}
+
+func TestGetTaskFieldValue_NonStructConfig(t *testing.T) {
+ var config interface{} = "not a struct"
+
+ result := getTaskFieldValue(config, "enabled")
+
+ if result != nil {
+ t.Errorf("Expected nil for non-struct config, got %v", result)
+ }
+}
+
+func TestGetTaskFieldValue_PointerToStruct(t *testing.T) {
+ config := &TestTaskConfigForTemplate{
+ TestBaseConfigForTemplate: TestBaseConfigForTemplate{
+ Enabled: false,
+ ScanIntervalSeconds: 900,
+ MaxConcurrent: 2,
+ },
+ TaskSpecificField: 0.35,
+ }
+
+ // Test that pointers are handled correctly
+ enabledResult := getTaskFieldValue(config, "enabled")
+ if enabledResult != false {
+ t.Errorf("Expected false for enabled field, got %v", enabledResult)
+ }
+
+ intervalResult := getTaskFieldValue(config, "scan_interval_seconds")
+ if intervalResult != 900 {
+ t.Errorf("Expected 900 for scan_interval_seconds field, got %v", intervalResult)
+ }
+}
+
+func TestGetTaskFieldValue_FieldsWithJSONOmitempty(t *testing.T) {
+ // Test struct with omitempty tags
+ type TestConfigWithOmitempty struct {
+ TestBaseConfigForTemplate
+ OptionalField string `json:"optional_field,omitempty"`
+ }
+
+ config := &TestConfigWithOmitempty{
+ TestBaseConfigForTemplate: TestBaseConfigForTemplate{
+ Enabled: true,
+ ScanIntervalSeconds: 1200,
+ MaxConcurrent: 4,
+ },
+ OptionalField: "optional_value",
+ }
+
+ // Test that fields with omitempty are still found
+ result := getTaskFieldValue(config, "optional_field")
+ if result != "optional_value" {
+ t.Errorf("Expected 'optional_value' for optional_field, got %v", result)
+ }
+
+ // Test embedded fields still work
+ enabledResult := getTaskFieldValue(config, "enabled")
+ if enabledResult != true {
+ t.Errorf("Expected true for enabled field, got %v", enabledResult)
+ }
+}
+
+func TestGetTaskFieldValue_DeepEmbedding(t *testing.T) {
+ // Test with multiple levels of embedding
+ type DeepBaseConfig struct {
+ DeepField string `json:"deep_field"`
+ }
+
+ type MiddleConfig struct {
+ DeepBaseConfig
+ MiddleField int `json:"middle_field"`
+ }
+
+ type TopConfig struct {
+ MiddleConfig
+ TopField bool `json:"top_field"`
+ }
+
+ config := &TopConfig{
+ MiddleConfig: MiddleConfig{
+ DeepBaseConfig: DeepBaseConfig{
+ DeepField: "deep_value",
+ },
+ MiddleField: 123,
+ },
+ TopField: true,
+ }
+
+ // Test that deeply embedded fields are found
+ deepResult := getTaskFieldValue(config, "deep_field")
+ if deepResult != "deep_value" {
+ t.Errorf("Expected 'deep_value' for deep_field, got %v", deepResult)
+ }
+
+ middleResult := getTaskFieldValue(config, "middle_field")
+ if middleResult != 123 {
+ t.Errorf("Expected 123 for middle_field, got %v", middleResult)
+ }
+
+ topResult := getTaskFieldValue(config, "top_field")
+ if topResult != true {
+ t.Errorf("Expected true for top_field, got %v", topResult)
+ }
+}
+
+// Benchmark to ensure performance is reasonable
+func BenchmarkGetTaskFieldValue(b *testing.B) {
+ config := &TestTaskConfigForTemplate{
+ TestBaseConfigForTemplate: TestBaseConfigForTemplate{
+ Enabled: true,
+ ScanIntervalSeconds: 1800,
+ MaxConcurrent: 3,
+ },
+ TaskSpecificField: 0.25,
+ AnotherSpecificField: "benchmark_test",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ // Test both embedded and regular fields
+ _ = getTaskFieldValue(config, "enabled")
+ _ = getTaskFieldValue(config, "task_specific_field")
+ }
+}
diff --git a/weed/admin/view/components/form_fields.templ b/weed/admin/view/components/form_fields.templ
index 5ac5c9241..82d20d407 100644
--- a/weed/admin/view/components/form_fields.templ
+++ b/weed/admin/view/components/form_fields.templ
@@ -269,6 +269,55 @@ templ DurationInputField(data DurationInputFieldData) {
}
// Helper functions for duration conversion (used by DurationInputField)
+
+// Typed conversion functions for protobuf int32 (most common) - EXPORTED
+func ConvertInt32SecondsToDisplayValue(seconds int32) float64 {
+ return convertIntSecondsToDisplayValue(int(seconds))
+}
+
+func GetInt32DisplayUnit(seconds int32) string {
+ return getIntDisplayUnit(int(seconds))
+}
+
+// Typed conversion functions for regular int
+func convertIntSecondsToDisplayValue(seconds int) float64 {
+ if seconds == 0 {
+ return 0
+ }
+
+ // Check if it's evenly divisible by days
+ if seconds%(24*3600) == 0 {
+ return float64(seconds / (24 * 3600))
+ }
+
+ // Check if it's evenly divisible by hours
+ if seconds%3600 == 0 {
+ return float64(seconds / 3600)
+ }
+
+ // Default to minutes
+ return float64(seconds / 60)
+}
+
+func getIntDisplayUnit(seconds int) string {
+ if seconds == 0 {
+ return "minutes"
+ }
+
+ // Check if it's evenly divisible by days
+ if seconds%(24*3600) == 0 {
+ return "days"
+ }
+
+ // Check if it's evenly divisible by hours
+ if seconds%3600 == 0 {
+ return "hours"
+ }
+
+ // Default to minutes
+ return "minutes"
+}
+
func convertSecondsToUnit(seconds int) string {
if seconds == 0 {
return "minutes"
@@ -303,4 +352,73 @@ func convertSecondsToValue(seconds int, unit string) float64 {
default:
return float64(seconds / 60) // Default to minutes
}
+}
+
+// IntervalFieldData represents interval input field data with separate value and unit
+type IntervalFieldData struct {
+ FormFieldData
+ Seconds int // The interval value in seconds
+}
+
+// IntervalField renders a Bootstrap interval input with number + unit dropdown (like task config)
+templ IntervalField(data IntervalFieldData) {
+ <div class="mb-3">
+ <label for={ data.Name } class="form-label">
+ { data.Label }
+ if data.Required {
+ <span class="text-danger">*</span>
+ }
+ </label>
+ <div class="input-group">
+ <input
+ type="number"
+ class="form-control"
+ id={ data.Name + "_value" }
+ name={ data.Name + "_value" }
+ value={ fmt.Sprintf("%.0f", convertSecondsToValue(data.Seconds, convertSecondsToUnit(data.Seconds))) }
+ step="1"
+ min="1"
+ if data.Required {
+ required
+ }
+ />
+ <select
+ class="form-select"
+ id={ data.Name + "_unit" }
+ name={ data.Name + "_unit" }
+ style="max-width: 120px;"
+ if data.Required {
+ required
+ }
+ >
+ <option
+ value="minutes"
+ if convertSecondsToUnit(data.Seconds) == "minutes" {
+ selected
+ }
+ >
+ Minutes
+ </option>
+ <option
+ value="hours"
+ if convertSecondsToUnit(data.Seconds) == "hours" {
+ selected
+ }
+ >
+ Hours
+ </option>
+ <option
+ value="days"
+ if convertSecondsToUnit(data.Seconds) == "days" {
+ selected
+ }
+ >
+ Days
+ </option>
+ </select>
+ </div>
+ if data.Description != "" {
+ <div class="form-text text-muted">{ data.Description }</div>
+ }
+ </div>
} \ No newline at end of file
diff --git a/weed/admin/view/components/form_fields_templ.go b/weed/admin/view/components/form_fields_templ.go
index 937082a17..d2ebd0125 100644
--- a/weed/admin/view/components/form_fields_templ.go
+++ b/weed/admin/view/components/form_fields_templ.go
@@ -1065,6 +1065,55 @@ func DurationInputField(data DurationInputFieldData) templ.Component {
}
// Helper functions for duration conversion (used by DurationInputField)
+
+// Typed conversion functions for protobuf int32 (most common) - EXPORTED
+func ConvertInt32SecondsToDisplayValue(seconds int32) float64 {
+ return convertIntSecondsToDisplayValue(int(seconds))
+}
+
+func GetInt32DisplayUnit(seconds int32) string {
+ return getIntDisplayUnit(int(seconds))
+}
+
+// Typed conversion functions for regular int
+func convertIntSecondsToDisplayValue(seconds int) float64 {
+ if seconds == 0 {
+ return 0
+ }
+
+ // Check if it's evenly divisible by days
+ if seconds%(24*3600) == 0 {
+ return float64(seconds / (24 * 3600))
+ }
+
+ // Check if it's evenly divisible by hours
+ if seconds%3600 == 0 {
+ return float64(seconds / 3600)
+ }
+
+ // Default to minutes
+ return float64(seconds / 60)
+}
+
+func getIntDisplayUnit(seconds int) string {
+ if seconds == 0 {
+ return "minutes"
+ }
+
+ // Check if it's evenly divisible by days
+ if seconds%(24*3600) == 0 {
+ return "days"
+ }
+
+ // Check if it's evenly divisible by hours
+ if seconds%3600 == 0 {
+ return "hours"
+ }
+
+ // Default to minutes
+ return "minutes"
+}
+
func convertSecondsToUnit(seconds int) string {
if seconds == 0 {
return "minutes"
@@ -1101,4 +1150,214 @@ func convertSecondsToValue(seconds int, unit string) float64 {
}
}
+// IntervalFieldData represents interval input field data with separate value and unit
+type IntervalFieldData struct {
+ FormFieldData
+ Seconds int // The interval value in seconds
+}
+
+// IntervalField renders a Bootstrap interval input with number + unit dropdown (like task config)
+func IntervalField(data IntervalFieldData) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var50 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var50 == nil {
+ templ_7745c5c3_Var50 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 101, "<div class=\"mb-3\"><label for=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var51 string
+ templ_7745c5c3_Var51, templ_7745c5c3_Err = templ.JoinStringErrs(data.Name)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 366, Col: 24}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var51))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 102, "\" class=\"form-label\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var52 string
+ templ_7745c5c3_Var52, templ_7745c5c3_Err = templ.JoinStringErrs(data.Label)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 367, Col: 15}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var52))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 103, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 104, "<span class=\"text-danger\">*</span>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 105, "</label><div class=\"input-group\"><input type=\"number\" class=\"form-control\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var53 string
+ templ_7745c5c3_Var53, templ_7745c5c3_Err = templ.JoinStringErrs(data.Name + "_value")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 376, Col: 29}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var53))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 106, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var54 string
+ templ_7745c5c3_Var54, templ_7745c5c3_Err = templ.JoinStringErrs(data.Name + "_value")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 377, Col: 31}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var54))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 107, "\" value=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var55 string
+ templ_7745c5c3_Var55, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", convertSecondsToValue(data.Seconds, convertSecondsToUnit(data.Seconds))))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 378, Col: 104}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var55))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 108, "\" step=\"1\" min=\"1\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 109, " required")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 110, "> <select class=\"form-select\" id=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var56 string
+ templ_7745c5c3_Var56, templ_7745c5c3_Err = templ.JoinStringErrs(data.Name + "_unit")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 387, Col: 28}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var56))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 111, "\" name=\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var57 string
+ templ_7745c5c3_Var57, templ_7745c5c3_Err = templ.JoinStringErrs(data.Name + "_unit")
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 388, Col: 30}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var57))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 112, "\" style=\"max-width: 120px;\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Required {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 113, " required")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 114, "><option value=\"minutes\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if convertSecondsToUnit(data.Seconds) == "minutes" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 115, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 116, ">Minutes</option> <option value=\"hours\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if convertSecondsToUnit(data.Seconds) == "hours" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 117, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 118, ">Hours</option> <option value=\"days\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if convertSecondsToUnit(data.Seconds) == "days" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 119, " selected")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 120, ">Days</option></select></div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Description != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 121, "<div class=\"form-text text-muted\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var58 string
+ templ_7745c5c3_Var58, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 421, Col: 55}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var58))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 122, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 123, "</div>")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
var _ = templruntime.GeneratedTemplate
diff --git a/weed/admin/view/layout/layout.templ b/weed/admin/view/layout/layout.templ
index b5e2cefbf..cd192fa44 100644
--- a/weed/admin/view/layout/layout.templ
+++ b/weed/admin/view/layout/layout.templ
@@ -112,6 +112,11 @@ templ Layout(c *gin.Context, content templ.Component) {
</a>
</li>
<li class="nav-item">
+ <a class="nav-link py-2" href="/cluster/ec-shards">
+ <i class="fas fa-th-large me-2"></i>EC Volumes
+ </a>
+ </li>
+ <li class="nav-item">
<a class="nav-link py-2" href="/cluster/collections">
<i class="fas fa-layer-group me-2"></i>Collections
</a>
diff --git a/weed/admin/view/layout/layout_templ.go b/weed/admin/view/layout/layout_templ.go
index 562faa677..4b15c658d 100644
--- a/weed/admin/view/layout/layout_templ.go
+++ b/weed/admin/view/layout/layout_templ.go
@@ -62,7 +62,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "</a><ul class=\"dropdown-menu\"><li><a class=\"dropdown-item\" href=\"/logout\"><i class=\"fas fa-sign-out-alt me-2\"></i>Logout</a></li></ul></li></ul></div></div></header><div class=\"row g-0\"><!-- Sidebar --><div class=\"col-md-3 col-lg-2 d-md-block bg-light sidebar collapse\"><div class=\"position-sticky pt-3\"><h6 class=\"sidebar-heading px-3 mt-4 mb-1 text-muted\"><span>MAIN</span></h6><ul class=\"nav flex-column\"><li class=\"nav-item\"><a class=\"nav-link\" href=\"/admin\"><i class=\"fas fa-tachometer-alt me-2\"></i>Dashboard</a></li><li class=\"nav-item\"><a class=\"nav-link collapsed\" href=\"#\" data-bs-toggle=\"collapse\" data-bs-target=\"#clusterSubmenu\" aria-expanded=\"false\" aria-controls=\"clusterSubmenu\"><i class=\"fas fa-sitemap me-2\"></i>Cluster <i class=\"fas fa-chevron-down ms-auto\"></i></a><div class=\"collapse\" id=\"clusterSubmenu\"><ul class=\"nav flex-column ms-3\"><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/masters\"><i class=\"fas fa-crown me-2\"></i>Masters</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/volume-servers\"><i class=\"fas fa-server me-2\"></i>Volume Servers</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/filers\"><i class=\"fas fa-folder-open me-2\"></i>Filers</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/volumes\"><i class=\"fas fa-database me-2\"></i>Volumes</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/collections\"><i class=\"fas fa-layer-group me-2\"></i>Collections</a></li></ul></div></li></ul><h6 class=\"sidebar-heading px-3 mt-4 mb-1 text-muted\"><span>MANAGEMENT</span></h6><ul class=\"nav flex-column\"><li class=\"nav-item\"><a class=\"nav-link\" href=\"/files\"><i class=\"fas fa-folder me-2\"></i>File Browser</a></li><li class=\"nav-item\"><a class=\"nav-link collapsed\" href=\"#\" data-bs-toggle=\"collapse\" data-bs-target=\"#objectStoreSubmenu\" aria-expanded=\"false\" aria-controls=\"objectStoreSubmenu\"><i class=\"fas fa-cloud me-2\"></i>Object Store <i class=\"fas fa-chevron-down ms-auto\"></i></a><div class=\"collapse\" id=\"objectStoreSubmenu\"><ul class=\"nav flex-column ms-3\"><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/buckets\"><i class=\"fas fa-cube me-2\"></i>Buckets</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/users\"><i class=\"fas fa-users me-2\"></i>Users</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/policies\"><i class=\"fas fa-shield-alt me-2\"></i>Policies</a></li></ul></div></li><li class=\"nav-item\">")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "</a><ul class=\"dropdown-menu\"><li><a class=\"dropdown-item\" href=\"/logout\"><i class=\"fas fa-sign-out-alt me-2\"></i>Logout</a></li></ul></li></ul></div></div></header><div class=\"row g-0\"><!-- Sidebar --><div class=\"col-md-3 col-lg-2 d-md-block bg-light sidebar collapse\"><div class=\"position-sticky pt-3\"><h6 class=\"sidebar-heading px-3 mt-4 mb-1 text-muted\"><span>MAIN</span></h6><ul class=\"nav flex-column\"><li class=\"nav-item\"><a class=\"nav-link\" href=\"/admin\"><i class=\"fas fa-tachometer-alt me-2\"></i>Dashboard</a></li><li class=\"nav-item\"><a class=\"nav-link collapsed\" href=\"#\" data-bs-toggle=\"collapse\" data-bs-target=\"#clusterSubmenu\" aria-expanded=\"false\" aria-controls=\"clusterSubmenu\"><i class=\"fas fa-sitemap me-2\"></i>Cluster <i class=\"fas fa-chevron-down ms-auto\"></i></a><div class=\"collapse\" id=\"clusterSubmenu\"><ul class=\"nav flex-column ms-3\"><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/masters\"><i class=\"fas fa-crown me-2\"></i>Masters</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/volume-servers\"><i class=\"fas fa-server me-2\"></i>Volume Servers</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/filers\"><i class=\"fas fa-folder-open me-2\"></i>Filers</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/volumes\"><i class=\"fas fa-database me-2\"></i>Volumes</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/ec-shards\"><i class=\"fas fa-th-large me-2\"></i>EC Volumes</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/collections\"><i class=\"fas fa-layer-group me-2\"></i>Collections</a></li></ul></div></li></ul><h6 class=\"sidebar-heading px-3 mt-4 mb-1 text-muted\"><span>MANAGEMENT</span></h6><ul class=\"nav flex-column\"><li class=\"nav-item\"><a class=\"nav-link\" href=\"/files\"><i class=\"fas fa-folder me-2\"></i>File Browser</a></li><li class=\"nav-item\"><a class=\"nav-link collapsed\" href=\"#\" data-bs-toggle=\"collapse\" data-bs-target=\"#objectStoreSubmenu\" aria-expanded=\"false\" aria-controls=\"objectStoreSubmenu\"><i class=\"fas fa-cloud me-2\"></i>Object Store <i class=\"fas fa-chevron-down ms-auto\"></i></a><div class=\"collapse\" id=\"objectStoreSubmenu\"><ul class=\"nav flex-column ms-3\"><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/buckets\"><i class=\"fas fa-cube me-2\"></i>Buckets</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/users\"><i class=\"fas fa-users me-2\"></i>Users</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/policies\"><i class=\"fas fa-shield-alt me-2\"></i>Policies</a></li></ul></div></li><li class=\"nav-item\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -153,7 +153,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var3 templ.SafeURL
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(menuItem.URL))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 253, Col: 117}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 258, Col: 117}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
@@ -188,7 +188,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.Name)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 254, Col: 109}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 259, Col: 109}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
@@ -206,7 +206,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var7 templ.SafeURL
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(menuItem.URL))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 257, Col: 110}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 262, Col: 110}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
@@ -241,7 +241,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.Name)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 258, Col: 109}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 263, Col: 109}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
@@ -274,7 +274,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var11 templ.SafeURL
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(menuItem.URL))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 270, Col: 106}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 275, Col: 106}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
@@ -309,7 +309,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.Name)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 271, Col: 105}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 276, Col: 105}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
@@ -370,7 +370,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", time.Now().Year()))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 318, Col: 60}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 323, Col: 60}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
@@ -383,7 +383,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(version.VERSION_NUMBER)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 318, Col: 102}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 323, Col: 102}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
@@ -435,7 +435,7 @@ func LoginForm(c *gin.Context, title string, errorMessage string) templ.Componen
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(title)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 342, Col: 17}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 347, Col: 17}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
@@ -448,7 +448,7 @@ func LoginForm(c *gin.Context, title string, errorMessage string) templ.Componen
var templ_7745c5c3_Var19 string
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(title)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 356, Col: 57}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 361, Col: 57}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil {
@@ -466,7 +466,7 @@ func LoginForm(c *gin.Context, title string, errorMessage string) templ.Componen
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(errorMessage)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 363, Col: 45}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 368, Col: 45}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
diff --git a/weed/command/admin.go b/weed/command/admin.go
index 6ac42330c..c1b55f105 100644
--- a/weed/command/admin.go
+++ b/weed/command/admin.go
@@ -33,6 +33,7 @@ var (
type AdminOptions struct {
port *int
+ grpcPort *int
masters *string
adminUser *string
adminPassword *string
@@ -42,6 +43,7 @@ type AdminOptions struct {
func init() {
cmdAdmin.Run = runAdmin // break init cycle
a.port = cmdAdmin.Flag.Int("port", 23646, "admin server port")
+ a.grpcPort = cmdAdmin.Flag.Int("port.grpc", 0, "gRPC server port for worker connections (default: http port + 10000)")
a.masters = cmdAdmin.Flag.String("masters", "localhost:9333", "comma-separated master servers")
a.dataDir = cmdAdmin.Flag.String("dataDir", "", "directory to store admin configuration and data files")
@@ -50,7 +52,7 @@ func init() {
}
var cmdAdmin = &Command{
- UsageLine: "admin -port=23646 -masters=localhost:9333 [-dataDir=/path/to/data]",
+ UsageLine: "admin -port=23646 -masters=localhost:9333 [-port.grpc=33646] [-dataDir=/path/to/data]",
Short: "start SeaweedFS web admin interface",
Long: `Start a web admin interface for SeaweedFS cluster management.
@@ -63,12 +65,13 @@ var cmdAdmin = &Command{
- Maintenance operations
The admin interface automatically discovers filers from the master servers.
- A gRPC server for worker connections runs on HTTP port + 10000.
+ A gRPC server for worker connections runs on the configured gRPC port (default: HTTP port + 10000).
Example Usage:
weed admin -port=23646 -masters="master1:9333,master2:9333"
weed admin -port=23646 -masters="localhost:9333" -dataDir="/var/lib/seaweedfs-admin"
- weed admin -port=23646 -masters="localhost:9333" -dataDir="~/seaweedfs-admin"
+ weed admin -port=23646 -port.grpc=33646 -masters="localhost:9333" -dataDir="~/seaweedfs-admin"
+ weed admin -port=9900 -port.grpc=19900 -masters="localhost:9333"
Data Directory:
- If dataDir is specified, admin configuration and maintenance data is persisted
@@ -128,6 +131,11 @@ func runAdmin(cmd *Command, args []string) bool {
return false
}
+ // Set default gRPC port if not specified
+ if *a.grpcPort == 0 {
+ *a.grpcPort = *a.port + 10000
+ }
+
// Security warnings
if *a.adminPassword == "" {
fmt.Println("WARNING: Admin interface is running without authentication!")
@@ -135,6 +143,7 @@ func runAdmin(cmd *Command, args []string) bool {
}
fmt.Printf("Starting SeaweedFS Admin Interface on port %d\n", *a.port)
+ fmt.Printf("Worker gRPC server will run on port %d\n", *a.grpcPort)
fmt.Printf("Masters: %s\n", *a.masters)
fmt.Printf("Filers will be discovered automatically from masters\n")
if *a.dataDir != "" {
@@ -232,7 +241,7 @@ func startAdminServer(ctx context.Context, options AdminOptions) error {
}
// Start worker gRPC server for worker connections
- err = adminServer.StartWorkerGrpcServer(*options.port)
+ err = adminServer.StartWorkerGrpcServer(*options.grpcPort)
if err != nil {
return fmt.Errorf("failed to start worker gRPC server: %w", err)
}
diff --git a/weed/command/worker.go b/weed/command/worker.go
index f217e57f7..6e592f73f 100644
--- a/weed/command/worker.go
+++ b/weed/command/worker.go
@@ -3,6 +3,7 @@ package command
import (
"os"
"os/signal"
+ "path/filepath"
"strings"
"syscall"
"time"
@@ -21,7 +22,7 @@ import (
)
var cmdWorker = &Command{
- UsageLine: "worker -admin=<admin_server> [-capabilities=<task_types>] [-maxConcurrent=<num>]",
+ UsageLine: "worker -admin=<admin_server> [-capabilities=<task_types>] [-maxConcurrent=<num>] [-workingDir=<path>]",
Short: "start a maintenance worker to process cluster maintenance tasks",
Long: `Start a maintenance worker that connects to an admin server to process
maintenance tasks like vacuum, erasure coding, remote upload, and replication fixes.
@@ -34,6 +35,7 @@ Examples:
weed worker -admin=admin.example.com:23646
weed worker -admin=localhost:23646 -capabilities=vacuum,replication
weed worker -admin=localhost:23646 -maxConcurrent=4
+ weed worker -admin=localhost:23646 -workingDir=/tmp/worker
`,
}
@@ -43,6 +45,7 @@ var (
workerMaxConcurrent = cmdWorker.Flag.Int("maxConcurrent", 2, "maximum number of concurrent tasks")
workerHeartbeatInterval = cmdWorker.Flag.Duration("heartbeat", 30*time.Second, "heartbeat interval")
workerTaskRequestInterval = cmdWorker.Flag.Duration("taskInterval", 5*time.Second, "task request interval")
+ workerWorkingDir = cmdWorker.Flag.String("workingDir", "", "working directory for the worker")
)
func init() {
@@ -67,6 +70,45 @@ func runWorker(cmd *Command, args []string) bool {
return false
}
+ // Set working directory and create task-specific subdirectories
+ var baseWorkingDir string
+ if *workerWorkingDir != "" {
+ glog.Infof("Setting working directory to: %s", *workerWorkingDir)
+ if err := os.Chdir(*workerWorkingDir); err != nil {
+ glog.Fatalf("Failed to change working directory: %v", err)
+ return false
+ }
+ wd, err := os.Getwd()
+ if err != nil {
+ glog.Fatalf("Failed to get working directory: %v", err)
+ return false
+ }
+ baseWorkingDir = wd
+ glog.Infof("Current working directory: %s", baseWorkingDir)
+ } else {
+ // Use default working directory when not specified
+ wd, err := os.Getwd()
+ if err != nil {
+ glog.Fatalf("Failed to get current working directory: %v", err)
+ return false
+ }
+ baseWorkingDir = wd
+ glog.Infof("Using current working directory: %s", baseWorkingDir)
+ }
+
+ // Create task-specific subdirectories
+ for _, capability := range capabilities {
+ taskDir := filepath.Join(baseWorkingDir, string(capability))
+ if err := os.MkdirAll(taskDir, 0755); err != nil {
+ glog.Fatalf("Failed to create task directory %s: %v", taskDir, err)
+ return false
+ }
+ glog.Infof("Created task directory: %s", taskDir)
+ }
+
+ // Create gRPC dial option using TLS configuration
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.worker")
+
// Create worker configuration
config := &types.WorkerConfig{
AdminServer: *workerAdminServer,
@@ -74,6 +116,8 @@ func runWorker(cmd *Command, args []string) bool {
MaxConcurrent: *workerMaxConcurrent,
HeartbeatInterval: *workerHeartbeatInterval,
TaskRequestInterval: *workerTaskRequestInterval,
+ BaseWorkingDir: baseWorkingDir,
+ GrpcDialOption: grpcDialOption,
}
// Create worker instance
@@ -82,9 +126,6 @@ func runWorker(cmd *Command, args []string) bool {
glog.Fatalf("Failed to create worker: %v", err)
return false
}
-
- // Create admin client with LoadClientTLS
- grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.worker")
adminClient, err := worker.CreateAdminClient(*workerAdminServer, workerInstance.ID(), grpcDialOption)
if err != nil {
glog.Fatalf("Failed to create admin client: %v", err)
@@ -94,10 +135,25 @@ func runWorker(cmd *Command, args []string) bool {
// Set admin client
workerInstance.SetAdminClient(adminClient)
+ // Set working directory
+ if *workerWorkingDir != "" {
+ glog.Infof("Setting working directory to: %s", *workerWorkingDir)
+ if err := os.Chdir(*workerWorkingDir); err != nil {
+ glog.Fatalf("Failed to change working directory: %v", err)
+ return false
+ }
+ wd, err := os.Getwd()
+ if err != nil {
+ glog.Fatalf("Failed to get working directory: %v", err)
+ return false
+ }
+ glog.Infof("Current working directory: %s", wd)
+ }
+
// Start the worker
err = workerInstance.Start()
if err != nil {
- glog.Fatalf("Failed to start worker: %v", err)
+ glog.Errorf("Failed to start worker: %v", err)
return false
}
diff --git a/weed/pb/master.proto b/weed/pb/master.proto
index f5d03ad9d..403dbb278 100644
--- a/weed/pb/master.proto
+++ b/weed/pb/master.proto
@@ -109,6 +109,7 @@ message VolumeInformationMessage {
string remote_storage_name = 13;
string remote_storage_key = 14;
string disk_type = 15;
+ uint32 disk_id = 16;
}
message VolumeShortInformationMessage {
@@ -118,6 +119,7 @@ message VolumeShortInformationMessage {
uint32 version = 9;
uint32 ttl = 10;
string disk_type = 15;
+ uint32 disk_id = 16;
}
message VolumeEcShardInformationMessage {
@@ -126,6 +128,7 @@ message VolumeEcShardInformationMessage {
uint32 ec_index_bits = 3;
string disk_type = 4;
uint64 expire_at_sec = 5; // used to record the destruction time of ec volume
+ uint32 disk_id = 6;
}
message StorageBackend {
@@ -279,6 +282,7 @@ message DiskInfo {
repeated VolumeInformationMessage volume_infos = 6;
repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
int64 remote_volume_count = 8;
+ uint32 disk_id = 9;
}
message DataNodeInfo {
string id = 1;
diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go
index 2aa644136..0f772f0dc 100644
--- a/weed/pb/master_pb/master.pb.go
+++ b/weed/pb/master_pb/master.pb.go
@@ -313,6 +313,7 @@ type VolumeInformationMessage struct {
RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName,proto3" json:"remote_storage_name,omitempty"`
RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey,proto3" json:"remote_storage_key,omitempty"`
DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+ DiskId uint32 `protobuf:"varint,16,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -452,6 +453,13 @@ func (x *VolumeInformationMessage) GetDiskType() string {
return ""
}
+func (x *VolumeInformationMessage) GetDiskId() uint32 {
+ if x != nil {
+ return x.DiskId
+ }
+ return 0
+}
+
type VolumeShortInformationMessage struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
@@ -460,6 +468,7 @@ type VolumeShortInformationMessage struct {
Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"`
Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"`
DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+ DiskId uint32 `protobuf:"varint,16,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -536,6 +545,13 @@ func (x *VolumeShortInformationMessage) GetDiskType() string {
return ""
}
+func (x *VolumeShortInformationMessage) GetDiskId() uint32 {
+ if x != nil {
+ return x.DiskId
+ }
+ return 0
+}
+
type VolumeEcShardInformationMessage struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
@@ -543,6 +559,7 @@ type VolumeEcShardInformationMessage struct {
EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits,proto3" json:"ec_index_bits,omitempty"`
DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
ExpireAtSec uint64 `protobuf:"varint,5,opt,name=expire_at_sec,json=expireAtSec,proto3" json:"expire_at_sec,omitempty"` // used to record the destruction time of ec volume
+ DiskId uint32 `protobuf:"varint,6,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -612,6 +629,13 @@ func (x *VolumeEcShardInformationMessage) GetExpireAtSec() uint64 {
return 0
}
+func (x *VolumeEcShardInformationMessage) GetDiskId() uint32 {
+ if x != nil {
+ return x.DiskId
+ }
+ return 0
+}
+
type StorageBackend struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
@@ -1904,6 +1928,7 @@ type DiskInfo struct {
VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos,proto3" json:"volume_infos,omitempty"`
EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos,proto3" json:"ec_shard_infos,omitempty"`
RemoteVolumeCount int64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"`
+ DiskId uint32 `protobuf:"varint,9,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -1994,6 +2019,13 @@ func (x *DiskInfo) GetRemoteVolumeCount() int64 {
return 0
}
+func (x *DiskInfo) GetDiskId() uint32 {
+ if x != nil {
+ return x.DiskId
+ }
+ return 0
+}
+
type DataNodeInfo struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
@@ -4034,7 +4066,7 @@ const file_master_proto_rawDesc = "" +
"\x18metrics_interval_seconds\x18\x04 \x01(\rR\x16metricsIntervalSeconds\x12D\n" +
"\x10storage_backends\x18\x05 \x03(\v2\x19.master_pb.StorageBackendR\x0fstorageBackends\x12)\n" +
"\x10duplicated_uuids\x18\x06 \x03(\tR\x0fduplicatedUuids\x12 \n" +
- "\vpreallocate\x18\a \x01(\bR\vpreallocate\"\x98\x04\n" +
+ "\vpreallocate\x18\a \x01(\bR\vpreallocate\"\xb1\x04\n" +
"\x18VolumeInformationMessage\x12\x0e\n" +
"\x02id\x18\x01 \x01(\rR\x02id\x12\x12\n" +
"\x04size\x18\x02 \x01(\x04R\x04size\x12\x1e\n" +
@@ -4054,7 +4086,8 @@ const file_master_proto_rawDesc = "" +
"\x12modified_at_second\x18\f \x01(\x03R\x10modifiedAtSecond\x12.\n" +
"\x13remote_storage_name\x18\r \x01(\tR\x11remoteStorageName\x12,\n" +
"\x12remote_storage_key\x18\x0e \x01(\tR\x10remoteStorageKey\x12\x1b\n" +
- "\tdisk_type\x18\x0f \x01(\tR\bdiskType\"\xc5\x01\n" +
+ "\tdisk_type\x18\x0f \x01(\tR\bdiskType\x12\x17\n" +
+ "\adisk_id\x18\x10 \x01(\rR\x06diskId\"\xde\x01\n" +
"\x1dVolumeShortInformationMessage\x12\x0e\n" +
"\x02id\x18\x01 \x01(\rR\x02id\x12\x1e\n" +
"\n" +
@@ -4064,7 +4097,8 @@ const file_master_proto_rawDesc = "" +
"\aversion\x18\t \x01(\rR\aversion\x12\x10\n" +
"\x03ttl\x18\n" +
" \x01(\rR\x03ttl\x12\x1b\n" +
- "\tdisk_type\x18\x0f \x01(\tR\bdiskType\"\xb6\x01\n" +
+ "\tdisk_type\x18\x0f \x01(\tR\bdiskType\x12\x17\n" +
+ "\adisk_id\x18\x10 \x01(\rR\x06diskId\"\xcf\x01\n" +
"\x1fVolumeEcShardInformationMessage\x12\x0e\n" +
"\x02id\x18\x01 \x01(\rR\x02id\x12\x1e\n" +
"\n" +
@@ -4072,7 +4106,8 @@ const file_master_proto_rawDesc = "" +
"collection\x12\"\n" +
"\rec_index_bits\x18\x03 \x01(\rR\vecIndexBits\x12\x1b\n" +
"\tdisk_type\x18\x04 \x01(\tR\bdiskType\x12\"\n" +
- "\rexpire_at_sec\x18\x05 \x01(\x04R\vexpireAtSec\"\xbe\x01\n" +
+ "\rexpire_at_sec\x18\x05 \x01(\x04R\vexpireAtSec\x12\x17\n" +
+ "\adisk_id\x18\x06 \x01(\rR\x06diskId\"\xbe\x01\n" +
"\x0eStorageBackend\x12\x12\n" +
"\x04type\x18\x01 \x01(\tR\x04type\x12\x0e\n" +
"\x02id\x18\x02 \x01(\tR\x02id\x12I\n" +
@@ -4199,7 +4234,7 @@ const file_master_proto_rawDesc = "" +
"\vcollections\x18\x01 \x03(\v2\x15.master_pb.CollectionR\vcollections\"-\n" +
"\x17CollectionDeleteRequest\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x1a\n" +
- "\x18CollectionDeleteResponse\"\x91\x03\n" +
+ "\x18CollectionDeleteResponse\"\xaa\x03\n" +
"\bDiskInfo\x12\x12\n" +
"\x04type\x18\x01 \x01(\tR\x04type\x12!\n" +
"\fvolume_count\x18\x02 \x01(\x03R\vvolumeCount\x12(\n" +
@@ -4208,7 +4243,8 @@ const file_master_proto_rawDesc = "" +
"\x13active_volume_count\x18\x05 \x01(\x03R\x11activeVolumeCount\x12F\n" +
"\fvolume_infos\x18\x06 \x03(\v2#.master_pb.VolumeInformationMessageR\vvolumeInfos\x12P\n" +
"\x0eec_shard_infos\x18\a \x03(\v2*.master_pb.VolumeEcShardInformationMessageR\fecShardInfos\x12.\n" +
- "\x13remote_volume_count\x18\b \x01(\x03R\x11remoteVolumeCount\"\xd4\x01\n" +
+ "\x13remote_volume_count\x18\b \x01(\x03R\x11remoteVolumeCount\x12\x17\n" +
+ "\adisk_id\x18\t \x01(\rR\x06diskId\"\xd4\x01\n" +
"\fDataNodeInfo\x12\x0e\n" +
"\x02id\x18\x01 \x01(\tR\x02id\x12D\n" +
"\tdiskInfos\x18\x02 \x03(\v2&.master_pb.DataNodeInfo.DiskInfosEntryR\tdiskInfos\x12\x1b\n" +
diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto
index 79b1ba1d0..fcdad30ff 100644
--- a/weed/pb/volume_server.proto
+++ b/weed/pb/volume_server.proto
@@ -53,6 +53,8 @@ service VolumeServer {
}
rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) {
}
+ rpc ReceiveFile (stream ReceiveFileRequest) returns (ReceiveFileResponse) {
+ }
rpc ReadNeedleBlob (ReadNeedleBlobRequest) returns (ReadNeedleBlobResponse) {
}
@@ -87,6 +89,8 @@ service VolumeServer {
}
rpc VolumeEcShardsToVolume (VolumeEcShardsToVolumeRequest) returns (VolumeEcShardsToVolumeResponse) {
}
+ rpc VolumeEcShardsInfo (VolumeEcShardsInfoRequest) returns (VolumeEcShardsInfoResponse) {
+ }
// tiered storage
rpc VolumeTierMoveDatToRemote (VolumeTierMoveDatToRemoteRequest) returns (stream VolumeTierMoveDatToRemoteResponse) {
@@ -285,6 +289,27 @@ message CopyFileResponse {
int64 modified_ts_ns = 2;
}
+message ReceiveFileRequest {
+ oneof data {
+ ReceiveFileInfo info = 1;
+ bytes file_content = 2;
+ }
+}
+
+message ReceiveFileInfo {
+ uint32 volume_id = 1;
+ string ext = 2;
+ string collection = 3;
+ bool is_ec_volume = 4;
+ uint32 shard_id = 5;
+ uint64 file_size = 6;
+}
+
+message ReceiveFileResponse {
+ uint64 bytes_written = 1;
+ string error = 2;
+}
+
message ReadNeedleBlobRequest {
uint32 volume_id = 1;
int64 offset = 3; // actual offset
@@ -376,6 +401,7 @@ message VolumeEcShardsCopyRequest {
string source_data_node = 5;
bool copy_ecj_file = 6;
bool copy_vif_file = 7;
+ uint32 disk_id = 8; // Target disk ID for storing EC shards
}
message VolumeEcShardsCopyResponse {
}
@@ -431,6 +457,19 @@ message VolumeEcShardsToVolumeRequest {
message VolumeEcShardsToVolumeResponse {
}
+message VolumeEcShardsInfoRequest {
+ uint32 volume_id = 1;
+}
+message VolumeEcShardsInfoResponse {
+ repeated EcShardInfo ec_shard_infos = 1;
+}
+
+message EcShardInfo {
+ uint32 shard_id = 1;
+ int64 size = 2;
+ string collection = 3;
+}
+
message ReadVolumeFileStatusRequest {
uint32 volume_id = 1;
}
diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go
index b4c5ec809..503db63ef 100644
--- a/weed/pb/volume_server_pb/volume_server.pb.go
+++ b/weed/pb/volume_server_pb/volume_server.pb.go
@@ -1966,6 +1966,224 @@ func (x *CopyFileResponse) GetModifiedTsNs() int64 {
return 0
}
+type ReceiveFileRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Data:
+ //
+ // *ReceiveFileRequest_Info
+ // *ReceiveFileRequest_FileContent
+ Data isReceiveFileRequest_Data `protobuf_oneof:"data"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ReceiveFileRequest) Reset() {
+ *x = ReceiveFileRequest{}
+ mi := &file_volume_server_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ReceiveFileRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReceiveFileRequest) ProtoMessage() {}
+
+func (x *ReceiveFileRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[38]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReceiveFileRequest.ProtoReflect.Descriptor instead.
+func (*ReceiveFileRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{38}
+}
+
+func (x *ReceiveFileRequest) GetData() isReceiveFileRequest_Data {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *ReceiveFileRequest) GetInfo() *ReceiveFileInfo {
+ if x != nil {
+ if x, ok := x.Data.(*ReceiveFileRequest_Info); ok {
+ return x.Info
+ }
+ }
+ return nil
+}
+
+func (x *ReceiveFileRequest) GetFileContent() []byte {
+ if x != nil {
+ if x, ok := x.Data.(*ReceiveFileRequest_FileContent); ok {
+ return x.FileContent
+ }
+ }
+ return nil
+}
+
+type isReceiveFileRequest_Data interface {
+ isReceiveFileRequest_Data()
+}
+
+type ReceiveFileRequest_Info struct {
+ Info *ReceiveFileInfo `protobuf:"bytes,1,opt,name=info,proto3,oneof"`
+}
+
+type ReceiveFileRequest_FileContent struct {
+ FileContent []byte `protobuf:"bytes,2,opt,name=file_content,json=fileContent,proto3,oneof"`
+}
+
+func (*ReceiveFileRequest_Info) isReceiveFileRequest_Data() {}
+
+func (*ReceiveFileRequest_FileContent) isReceiveFileRequest_Data() {}
+
+type ReceiveFileInfo struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Ext string `protobuf:"bytes,2,opt,name=ext,proto3" json:"ext,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ IsEcVolume bool `protobuf:"varint,4,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"`
+ ShardId uint32 `protobuf:"varint,5,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
+ FileSize uint64 `protobuf:"varint,6,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ReceiveFileInfo) Reset() {
+ *x = ReceiveFileInfo{}
+ mi := &file_volume_server_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ReceiveFileInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReceiveFileInfo) ProtoMessage() {}
+
+func (x *ReceiveFileInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[39]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReceiveFileInfo.ProtoReflect.Descriptor instead.
+func (*ReceiveFileInfo) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{39}
+}
+
+func (x *ReceiveFileInfo) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *ReceiveFileInfo) GetExt() string {
+ if x != nil {
+ return x.Ext
+ }
+ return ""
+}
+
+func (x *ReceiveFileInfo) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *ReceiveFileInfo) GetIsEcVolume() bool {
+ if x != nil {
+ return x.IsEcVolume
+ }
+ return false
+}
+
+func (x *ReceiveFileInfo) GetShardId() uint32 {
+ if x != nil {
+ return x.ShardId
+ }
+ return 0
+}
+
+func (x *ReceiveFileInfo) GetFileSize() uint64 {
+ if x != nil {
+ return x.FileSize
+ }
+ return 0
+}
+
+type ReceiveFileResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ BytesWritten uint64 `protobuf:"varint,1,opt,name=bytes_written,json=bytesWritten,proto3" json:"bytes_written,omitempty"`
+ Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ReceiveFileResponse) Reset() {
+ *x = ReceiveFileResponse{}
+ mi := &file_volume_server_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ReceiveFileResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReceiveFileResponse) ProtoMessage() {}
+
+func (x *ReceiveFileResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[40]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReceiveFileResponse.ProtoReflect.Descriptor instead.
+func (*ReceiveFileResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{40}
+}
+
+func (x *ReceiveFileResponse) GetBytesWritten() uint64 {
+ if x != nil {
+ return x.BytesWritten
+ }
+ return 0
+}
+
+func (x *ReceiveFileResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
type ReadNeedleBlobRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
@@ -1977,7 +2195,7 @@ type ReadNeedleBlobRequest struct {
func (x *ReadNeedleBlobRequest) Reset() {
*x = ReadNeedleBlobRequest{}
- mi := &file_volume_server_proto_msgTypes[38]
+ mi := &file_volume_server_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1989,7 +2207,7 @@ func (x *ReadNeedleBlobRequest) String() string {
func (*ReadNeedleBlobRequest) ProtoMessage() {}
func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[38]
+ mi := &file_volume_server_proto_msgTypes[41]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2002,7 +2220,7 @@ func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadNeedleBlobRequest.ProtoReflect.Descriptor instead.
func (*ReadNeedleBlobRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{38}
+ return file_volume_server_proto_rawDescGZIP(), []int{41}
}
func (x *ReadNeedleBlobRequest) GetVolumeId() uint32 {
@@ -2035,7 +2253,7 @@ type ReadNeedleBlobResponse struct {
func (x *ReadNeedleBlobResponse) Reset() {
*x = ReadNeedleBlobResponse{}
- mi := &file_volume_server_proto_msgTypes[39]
+ mi := &file_volume_server_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2047,7 +2265,7 @@ func (x *ReadNeedleBlobResponse) String() string {
func (*ReadNeedleBlobResponse) ProtoMessage() {}
func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[39]
+ mi := &file_volume_server_proto_msgTypes[42]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2060,7 +2278,7 @@ func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadNeedleBlobResponse.ProtoReflect.Descriptor instead.
func (*ReadNeedleBlobResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{39}
+ return file_volume_server_proto_rawDescGZIP(), []int{42}
}
func (x *ReadNeedleBlobResponse) GetNeedleBlob() []byte {
@@ -2082,7 +2300,7 @@ type ReadNeedleMetaRequest struct {
func (x *ReadNeedleMetaRequest) Reset() {
*x = ReadNeedleMetaRequest{}
- mi := &file_volume_server_proto_msgTypes[40]
+ mi := &file_volume_server_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2094,7 +2312,7 @@ func (x *ReadNeedleMetaRequest) String() string {
func (*ReadNeedleMetaRequest) ProtoMessage() {}
func (x *ReadNeedleMetaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[40]
+ mi := &file_volume_server_proto_msgTypes[43]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2107,7 +2325,7 @@ func (x *ReadNeedleMetaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadNeedleMetaRequest.ProtoReflect.Descriptor instead.
func (*ReadNeedleMetaRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{40}
+ return file_volume_server_proto_rawDescGZIP(), []int{43}
}
func (x *ReadNeedleMetaRequest) GetVolumeId() uint32 {
@@ -2151,7 +2369,7 @@ type ReadNeedleMetaResponse struct {
func (x *ReadNeedleMetaResponse) Reset() {
*x = ReadNeedleMetaResponse{}
- mi := &file_volume_server_proto_msgTypes[41]
+ mi := &file_volume_server_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2163,7 +2381,7 @@ func (x *ReadNeedleMetaResponse) String() string {
func (*ReadNeedleMetaResponse) ProtoMessage() {}
func (x *ReadNeedleMetaResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[41]
+ mi := &file_volume_server_proto_msgTypes[44]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2176,7 +2394,7 @@ func (x *ReadNeedleMetaResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadNeedleMetaResponse.ProtoReflect.Descriptor instead.
func (*ReadNeedleMetaResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{41}
+ return file_volume_server_proto_rawDescGZIP(), []int{44}
}
func (x *ReadNeedleMetaResponse) GetCookie() uint32 {
@@ -2226,7 +2444,7 @@ type WriteNeedleBlobRequest struct {
func (x *WriteNeedleBlobRequest) Reset() {
*x = WriteNeedleBlobRequest{}
- mi := &file_volume_server_proto_msgTypes[42]
+ mi := &file_volume_server_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2238,7 +2456,7 @@ func (x *WriteNeedleBlobRequest) String() string {
func (*WriteNeedleBlobRequest) ProtoMessage() {}
func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[42]
+ mi := &file_volume_server_proto_msgTypes[45]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2251,7 +2469,7 @@ func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use WriteNeedleBlobRequest.ProtoReflect.Descriptor instead.
func (*WriteNeedleBlobRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{42}
+ return file_volume_server_proto_rawDescGZIP(), []int{45}
}
func (x *WriteNeedleBlobRequest) GetVolumeId() uint32 {
@@ -2290,7 +2508,7 @@ type WriteNeedleBlobResponse struct {
func (x *WriteNeedleBlobResponse) Reset() {
*x = WriteNeedleBlobResponse{}
- mi := &file_volume_server_proto_msgTypes[43]
+ mi := &file_volume_server_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2302,7 +2520,7 @@ func (x *WriteNeedleBlobResponse) String() string {
func (*WriteNeedleBlobResponse) ProtoMessage() {}
func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[43]
+ mi := &file_volume_server_proto_msgTypes[46]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2315,7 +2533,7 @@ func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use WriteNeedleBlobResponse.ProtoReflect.Descriptor instead.
func (*WriteNeedleBlobResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{43}
+ return file_volume_server_proto_rawDescGZIP(), []int{46}
}
type ReadAllNeedlesRequest struct {
@@ -2327,7 +2545,7 @@ type ReadAllNeedlesRequest struct {
func (x *ReadAllNeedlesRequest) Reset() {
*x = ReadAllNeedlesRequest{}
- mi := &file_volume_server_proto_msgTypes[44]
+ mi := &file_volume_server_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2339,7 +2557,7 @@ func (x *ReadAllNeedlesRequest) String() string {
func (*ReadAllNeedlesRequest) ProtoMessage() {}
func (x *ReadAllNeedlesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[44]
+ mi := &file_volume_server_proto_msgTypes[47]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2352,7 +2570,7 @@ func (x *ReadAllNeedlesRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadAllNeedlesRequest.ProtoReflect.Descriptor instead.
func (*ReadAllNeedlesRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{44}
+ return file_volume_server_proto_rawDescGZIP(), []int{47}
}
func (x *ReadAllNeedlesRequest) GetVolumeIds() []uint32 {
@@ -2379,7 +2597,7 @@ type ReadAllNeedlesResponse struct {
func (x *ReadAllNeedlesResponse) Reset() {
*x = ReadAllNeedlesResponse{}
- mi := &file_volume_server_proto_msgTypes[45]
+ mi := &file_volume_server_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2391,7 +2609,7 @@ func (x *ReadAllNeedlesResponse) String() string {
func (*ReadAllNeedlesResponse) ProtoMessage() {}
func (x *ReadAllNeedlesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[45]
+ mi := &file_volume_server_proto_msgTypes[48]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2404,7 +2622,7 @@ func (x *ReadAllNeedlesResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadAllNeedlesResponse.ProtoReflect.Descriptor instead.
func (*ReadAllNeedlesResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{45}
+ return file_volume_server_proto_rawDescGZIP(), []int{48}
}
func (x *ReadAllNeedlesResponse) GetVolumeId() uint32 {
@@ -2481,7 +2699,7 @@ type VolumeTailSenderRequest struct {
func (x *VolumeTailSenderRequest) Reset() {
*x = VolumeTailSenderRequest{}
- mi := &file_volume_server_proto_msgTypes[46]
+ mi := &file_volume_server_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2493,7 +2711,7 @@ func (x *VolumeTailSenderRequest) String() string {
func (*VolumeTailSenderRequest) ProtoMessage() {}
func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[46]
+ mi := &file_volume_server_proto_msgTypes[49]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2506,7 +2724,7 @@ func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeTailSenderRequest.ProtoReflect.Descriptor instead.
func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{46}
+ return file_volume_server_proto_rawDescGZIP(), []int{49}
}
func (x *VolumeTailSenderRequest) GetVolumeId() uint32 {
@@ -2542,7 +2760,7 @@ type VolumeTailSenderResponse struct {
func (x *VolumeTailSenderResponse) Reset() {
*x = VolumeTailSenderResponse{}
- mi := &file_volume_server_proto_msgTypes[47]
+ mi := &file_volume_server_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2554,7 +2772,7 @@ func (x *VolumeTailSenderResponse) String() string {
func (*VolumeTailSenderResponse) ProtoMessage() {}
func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[47]
+ mi := &file_volume_server_proto_msgTypes[50]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2567,7 +2785,7 @@ func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeTailSenderResponse.ProtoReflect.Descriptor instead.
func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{47}
+ return file_volume_server_proto_rawDescGZIP(), []int{50}
}
func (x *VolumeTailSenderResponse) GetNeedleHeader() []byte {
@@ -2610,7 +2828,7 @@ type VolumeTailReceiverRequest struct {
func (x *VolumeTailReceiverRequest) Reset() {
*x = VolumeTailReceiverRequest{}
- mi := &file_volume_server_proto_msgTypes[48]
+ mi := &file_volume_server_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2622,7 +2840,7 @@ func (x *VolumeTailReceiverRequest) String() string {
func (*VolumeTailReceiverRequest) ProtoMessage() {}
func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[48]
+ mi := &file_volume_server_proto_msgTypes[51]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2635,7 +2853,7 @@ func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeTailReceiverRequest.ProtoReflect.Descriptor instead.
func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{48}
+ return file_volume_server_proto_rawDescGZIP(), []int{51}
}
func (x *VolumeTailReceiverRequest) GetVolumeId() uint32 {
@@ -2674,7 +2892,7 @@ type VolumeTailReceiverResponse struct {
func (x *VolumeTailReceiverResponse) Reset() {
*x = VolumeTailReceiverResponse{}
- mi := &file_volume_server_proto_msgTypes[49]
+ mi := &file_volume_server_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2686,7 +2904,7 @@ func (x *VolumeTailReceiverResponse) String() string {
func (*VolumeTailReceiverResponse) ProtoMessage() {}
func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[49]
+ mi := &file_volume_server_proto_msgTypes[52]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2699,7 +2917,7 @@ func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeTailReceiverResponse.ProtoReflect.Descriptor instead.
func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{49}
+ return file_volume_server_proto_rawDescGZIP(), []int{52}
}
type VolumeEcShardsGenerateRequest struct {
@@ -2712,7 +2930,7 @@ type VolumeEcShardsGenerateRequest struct {
func (x *VolumeEcShardsGenerateRequest) Reset() {
*x = VolumeEcShardsGenerateRequest{}
- mi := &file_volume_server_proto_msgTypes[50]
+ mi := &file_volume_server_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2724,7 +2942,7 @@ func (x *VolumeEcShardsGenerateRequest) String() string {
func (*VolumeEcShardsGenerateRequest) ProtoMessage() {}
func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[50]
+ mi := &file_volume_server_proto_msgTypes[53]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2737,7 +2955,7 @@ func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsGenerateRequest.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{50}
+ return file_volume_server_proto_rawDescGZIP(), []int{53}
}
func (x *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 {
@@ -2762,7 +2980,7 @@ type VolumeEcShardsGenerateResponse struct {
func (x *VolumeEcShardsGenerateResponse) Reset() {
*x = VolumeEcShardsGenerateResponse{}
- mi := &file_volume_server_proto_msgTypes[51]
+ mi := &file_volume_server_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2774,7 +2992,7 @@ func (x *VolumeEcShardsGenerateResponse) String() string {
func (*VolumeEcShardsGenerateResponse) ProtoMessage() {}
func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[51]
+ mi := &file_volume_server_proto_msgTypes[54]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2787,7 +3005,7 @@ func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsGenerateResponse.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{51}
+ return file_volume_server_proto_rawDescGZIP(), []int{54}
}
type VolumeEcShardsRebuildRequest struct {
@@ -2800,7 +3018,7 @@ type VolumeEcShardsRebuildRequest struct {
func (x *VolumeEcShardsRebuildRequest) Reset() {
*x = VolumeEcShardsRebuildRequest{}
- mi := &file_volume_server_proto_msgTypes[52]
+ mi := &file_volume_server_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2812,7 +3030,7 @@ func (x *VolumeEcShardsRebuildRequest) String() string {
func (*VolumeEcShardsRebuildRequest) ProtoMessage() {}
func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[52]
+ mi := &file_volume_server_proto_msgTypes[55]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2825,7 +3043,7 @@ func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsRebuildRequest.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{52}
+ return file_volume_server_proto_rawDescGZIP(), []int{55}
}
func (x *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 {
@@ -2851,7 +3069,7 @@ type VolumeEcShardsRebuildResponse struct {
func (x *VolumeEcShardsRebuildResponse) Reset() {
*x = VolumeEcShardsRebuildResponse{}
- mi := &file_volume_server_proto_msgTypes[53]
+ mi := &file_volume_server_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2863,7 +3081,7 @@ func (x *VolumeEcShardsRebuildResponse) String() string {
func (*VolumeEcShardsRebuildResponse) ProtoMessage() {}
func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[53]
+ mi := &file_volume_server_proto_msgTypes[56]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2876,7 +3094,7 @@ func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsRebuildResponse.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{53}
+ return file_volume_server_proto_rawDescGZIP(), []int{56}
}
func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 {
@@ -2895,13 +3113,14 @@ type VolumeEcShardsCopyRequest struct {
SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"`
CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile,proto3" json:"copy_ecj_file,omitempty"`
CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile,proto3" json:"copy_vif_file,omitempty"`
+ DiskId uint32 `protobuf:"varint,8,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` // Target disk ID for storing EC shards
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *VolumeEcShardsCopyRequest) Reset() {
*x = VolumeEcShardsCopyRequest{}
- mi := &file_volume_server_proto_msgTypes[54]
+ mi := &file_volume_server_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2913,7 +3132,7 @@ func (x *VolumeEcShardsCopyRequest) String() string {
func (*VolumeEcShardsCopyRequest) ProtoMessage() {}
func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[54]
+ mi := &file_volume_server_proto_msgTypes[57]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2926,7 +3145,7 @@ func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsCopyRequest.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{54}
+ return file_volume_server_proto_rawDescGZIP(), []int{57}
}
func (x *VolumeEcShardsCopyRequest) GetVolumeId() uint32 {
@@ -2978,6 +3197,13 @@ func (x *VolumeEcShardsCopyRequest) GetCopyVifFile() bool {
return false
}
+func (x *VolumeEcShardsCopyRequest) GetDiskId() uint32 {
+ if x != nil {
+ return x.DiskId
+ }
+ return 0
+}
+
type VolumeEcShardsCopyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
@@ -2986,7 +3212,7 @@ type VolumeEcShardsCopyResponse struct {
func (x *VolumeEcShardsCopyResponse) Reset() {
*x = VolumeEcShardsCopyResponse{}
- mi := &file_volume_server_proto_msgTypes[55]
+ mi := &file_volume_server_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2998,7 +3224,7 @@ func (x *VolumeEcShardsCopyResponse) String() string {
func (*VolumeEcShardsCopyResponse) ProtoMessage() {}
func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[55]
+ mi := &file_volume_server_proto_msgTypes[58]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3011,7 +3237,7 @@ func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsCopyResponse.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{55}
+ return file_volume_server_proto_rawDescGZIP(), []int{58}
}
type VolumeEcShardsDeleteRequest struct {
@@ -3025,7 +3251,7 @@ type VolumeEcShardsDeleteRequest struct {
func (x *VolumeEcShardsDeleteRequest) Reset() {
*x = VolumeEcShardsDeleteRequest{}
- mi := &file_volume_server_proto_msgTypes[56]
+ mi := &file_volume_server_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3037,7 +3263,7 @@ func (x *VolumeEcShardsDeleteRequest) String() string {
func (*VolumeEcShardsDeleteRequest) ProtoMessage() {}
func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[56]
+ mi := &file_volume_server_proto_msgTypes[59]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3050,7 +3276,7 @@ func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsDeleteRequest.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{56}
+ return file_volume_server_proto_rawDescGZIP(), []int{59}
}
func (x *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 {
@@ -3082,7 +3308,7 @@ type VolumeEcShardsDeleteResponse struct {
func (x *VolumeEcShardsDeleteResponse) Reset() {
*x = VolumeEcShardsDeleteResponse{}
- mi := &file_volume_server_proto_msgTypes[57]
+ mi := &file_volume_server_proto_msgTypes[60]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3094,7 +3320,7 @@ func (x *VolumeEcShardsDeleteResponse) String() string {
func (*VolumeEcShardsDeleteResponse) ProtoMessage() {}
func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[57]
+ mi := &file_volume_server_proto_msgTypes[60]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3107,7 +3333,7 @@ func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsDeleteResponse.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{57}
+ return file_volume_server_proto_rawDescGZIP(), []int{60}
}
type VolumeEcShardsMountRequest struct {
@@ -3121,7 +3347,7 @@ type VolumeEcShardsMountRequest struct {
func (x *VolumeEcShardsMountRequest) Reset() {
*x = VolumeEcShardsMountRequest{}
- mi := &file_volume_server_proto_msgTypes[58]
+ mi := &file_volume_server_proto_msgTypes[61]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3133,7 +3359,7 @@ func (x *VolumeEcShardsMountRequest) String() string {
func (*VolumeEcShardsMountRequest) ProtoMessage() {}
func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[58]
+ mi := &file_volume_server_proto_msgTypes[61]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3146,7 +3372,7 @@ func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsMountRequest.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{58}
+ return file_volume_server_proto_rawDescGZIP(), []int{61}
}
func (x *VolumeEcShardsMountRequest) GetVolumeId() uint32 {
@@ -3178,7 +3404,7 @@ type VolumeEcShardsMountResponse struct {
func (x *VolumeEcShardsMountResponse) Reset() {
*x = VolumeEcShardsMountResponse{}
- mi := &file_volume_server_proto_msgTypes[59]
+ mi := &file_volume_server_proto_msgTypes[62]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3190,7 +3416,7 @@ func (x *VolumeEcShardsMountResponse) String() string {
func (*VolumeEcShardsMountResponse) ProtoMessage() {}
func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[59]
+ mi := &file_volume_server_proto_msgTypes[62]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3203,7 +3429,7 @@ func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsMountResponse.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{59}
+ return file_volume_server_proto_rawDescGZIP(), []int{62}
}
type VolumeEcShardsUnmountRequest struct {
@@ -3216,7 +3442,7 @@ type VolumeEcShardsUnmountRequest struct {
func (x *VolumeEcShardsUnmountRequest) Reset() {
*x = VolumeEcShardsUnmountRequest{}
- mi := &file_volume_server_proto_msgTypes[60]
+ mi := &file_volume_server_proto_msgTypes[63]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3228,7 +3454,7 @@ func (x *VolumeEcShardsUnmountRequest) String() string {
func (*VolumeEcShardsUnmountRequest) ProtoMessage() {}
func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[60]
+ mi := &file_volume_server_proto_msgTypes[63]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3241,7 +3467,7 @@ func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsUnmountRequest.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{60}
+ return file_volume_server_proto_rawDescGZIP(), []int{63}
}
func (x *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 {
@@ -3266,7 +3492,7 @@ type VolumeEcShardsUnmountResponse struct {
func (x *VolumeEcShardsUnmountResponse) Reset() {
*x = VolumeEcShardsUnmountResponse{}
- mi := &file_volume_server_proto_msgTypes[61]
+ mi := &file_volume_server_proto_msgTypes[64]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3278,7 +3504,7 @@ func (x *VolumeEcShardsUnmountResponse) String() string {
func (*VolumeEcShardsUnmountResponse) ProtoMessage() {}
func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[61]
+ mi := &file_volume_server_proto_msgTypes[64]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3291,7 +3517,7 @@ func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsUnmountResponse.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{61}
+ return file_volume_server_proto_rawDescGZIP(), []int{64}
}
type VolumeEcShardReadRequest struct {
@@ -3307,7 +3533,7 @@ type VolumeEcShardReadRequest struct {
func (x *VolumeEcShardReadRequest) Reset() {
*x = VolumeEcShardReadRequest{}
- mi := &file_volume_server_proto_msgTypes[62]
+ mi := &file_volume_server_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3319,7 +3545,7 @@ func (x *VolumeEcShardReadRequest) String() string {
func (*VolumeEcShardReadRequest) ProtoMessage() {}
func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[62]
+ mi := &file_volume_server_proto_msgTypes[65]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3332,7 +3558,7 @@ func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardReadRequest.ProtoReflect.Descriptor instead.
func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{62}
+ return file_volume_server_proto_rawDescGZIP(), []int{65}
}
func (x *VolumeEcShardReadRequest) GetVolumeId() uint32 {
@@ -3380,7 +3606,7 @@ type VolumeEcShardReadResponse struct {
func (x *VolumeEcShardReadResponse) Reset() {
*x = VolumeEcShardReadResponse{}
- mi := &file_volume_server_proto_msgTypes[63]
+ mi := &file_volume_server_proto_msgTypes[66]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3392,7 +3618,7 @@ func (x *VolumeEcShardReadResponse) String() string {
func (*VolumeEcShardReadResponse) ProtoMessage() {}
func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[63]
+ mi := &file_volume_server_proto_msgTypes[66]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3405,7 +3631,7 @@ func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardReadResponse.ProtoReflect.Descriptor instead.
func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{63}
+ return file_volume_server_proto_rawDescGZIP(), []int{66}
}
func (x *VolumeEcShardReadResponse) GetData() []byte {
@@ -3434,7 +3660,7 @@ type VolumeEcBlobDeleteRequest struct {
func (x *VolumeEcBlobDeleteRequest) Reset() {
*x = VolumeEcBlobDeleteRequest{}
- mi := &file_volume_server_proto_msgTypes[64]
+ mi := &file_volume_server_proto_msgTypes[67]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3446,7 +3672,7 @@ func (x *VolumeEcBlobDeleteRequest) String() string {
func (*VolumeEcBlobDeleteRequest) ProtoMessage() {}
func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[64]
+ mi := &file_volume_server_proto_msgTypes[67]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3459,7 +3685,7 @@ func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcBlobDeleteRequest.ProtoReflect.Descriptor instead.
func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{64}
+ return file_volume_server_proto_rawDescGZIP(), []int{67}
}
func (x *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 {
@@ -3498,7 +3724,7 @@ type VolumeEcBlobDeleteResponse struct {
func (x *VolumeEcBlobDeleteResponse) Reset() {
*x = VolumeEcBlobDeleteResponse{}
- mi := &file_volume_server_proto_msgTypes[65]
+ mi := &file_volume_server_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3510,7 +3736,7 @@ func (x *VolumeEcBlobDeleteResponse) String() string {
func (*VolumeEcBlobDeleteResponse) ProtoMessage() {}
func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[65]
+ mi := &file_volume_server_proto_msgTypes[68]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3523,7 +3749,7 @@ func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcBlobDeleteResponse.ProtoReflect.Descriptor instead.
func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{65}
+ return file_volume_server_proto_rawDescGZIP(), []int{68}
}
type VolumeEcShardsToVolumeRequest struct {
@@ -3536,7 +3762,7 @@ type VolumeEcShardsToVolumeRequest struct {
func (x *VolumeEcShardsToVolumeRequest) Reset() {
*x = VolumeEcShardsToVolumeRequest{}
- mi := &file_volume_server_proto_msgTypes[66]
+ mi := &file_volume_server_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3548,7 +3774,7 @@ func (x *VolumeEcShardsToVolumeRequest) String() string {
func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {}
func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[66]
+ mi := &file_volume_server_proto_msgTypes[69]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3561,7 +3787,7 @@ func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsToVolumeRequest.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{66}
+ return file_volume_server_proto_rawDescGZIP(), []int{69}
}
func (x *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 {
@@ -3586,7 +3812,7 @@ type VolumeEcShardsToVolumeResponse struct {
func (x *VolumeEcShardsToVolumeResponse) Reset() {
*x = VolumeEcShardsToVolumeResponse{}
- mi := &file_volume_server_proto_msgTypes[67]
+ mi := &file_volume_server_proto_msgTypes[70]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3598,7 +3824,7 @@ func (x *VolumeEcShardsToVolumeResponse) String() string {
func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {}
func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[67]
+ mi := &file_volume_server_proto_msgTypes[70]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3611,7 +3837,155 @@ func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeEcShardsToVolumeResponse.ProtoReflect.Descriptor instead.
func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{67}
+ return file_volume_server_proto_rawDescGZIP(), []int{70}
+}
+
+type VolumeEcShardsInfoRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *VolumeEcShardsInfoRequest) Reset() {
+ *x = VolumeEcShardsInfoRequest{}
+ mi := &file_volume_server_proto_msgTypes[71]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *VolumeEcShardsInfoRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsInfoRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsInfoRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[71]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsInfoRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsInfoRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{71}
+}
+
+func (x *VolumeEcShardsInfoRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type VolumeEcShardsInfoResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ EcShardInfos []*EcShardInfo `protobuf:"bytes,1,rep,name=ec_shard_infos,json=ecShardInfos,proto3" json:"ec_shard_infos,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *VolumeEcShardsInfoResponse) Reset() {
+ *x = VolumeEcShardsInfoResponse{}
+ mi := &file_volume_server_proto_msgTypes[72]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *VolumeEcShardsInfoResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsInfoResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsInfoResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[72]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsInfoResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsInfoResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{72}
+}
+
+func (x *VolumeEcShardsInfoResponse) GetEcShardInfos() []*EcShardInfo {
+ if x != nil {
+ return x.EcShardInfos
+ }
+ return nil
+}
+
+type EcShardInfo struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
+ Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EcShardInfo) Reset() {
+ *x = EcShardInfo{}
+ mi := &file_volume_server_proto_msgTypes[73]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EcShardInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EcShardInfo) ProtoMessage() {}
+
+func (x *EcShardInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[73]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EcShardInfo.ProtoReflect.Descriptor instead.
+func (*EcShardInfo) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{73}
+}
+
+func (x *EcShardInfo) GetShardId() uint32 {
+ if x != nil {
+ return x.ShardId
+ }
+ return 0
+}
+
+func (x *EcShardInfo) GetSize() int64 {
+ if x != nil {
+ return x.Size
+ }
+ return 0
+}
+
+func (x *EcShardInfo) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
}
type ReadVolumeFileStatusRequest struct {
@@ -3623,7 +3997,7 @@ type ReadVolumeFileStatusRequest struct {
func (x *ReadVolumeFileStatusRequest) Reset() {
*x = ReadVolumeFileStatusRequest{}
- mi := &file_volume_server_proto_msgTypes[68]
+ mi := &file_volume_server_proto_msgTypes[74]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3635,7 +4009,7 @@ func (x *ReadVolumeFileStatusRequest) String() string {
func (*ReadVolumeFileStatusRequest) ProtoMessage() {}
func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[68]
+ mi := &file_volume_server_proto_msgTypes[74]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3648,7 +4022,7 @@ func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadVolumeFileStatusRequest.ProtoReflect.Descriptor instead.
func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{68}
+ return file_volume_server_proto_rawDescGZIP(), []int{74}
}
func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 {
@@ -3677,7 +4051,7 @@ type ReadVolumeFileStatusResponse struct {
func (x *ReadVolumeFileStatusResponse) Reset() {
*x = ReadVolumeFileStatusResponse{}
- mi := &file_volume_server_proto_msgTypes[69]
+ mi := &file_volume_server_proto_msgTypes[75]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3689,7 +4063,7 @@ func (x *ReadVolumeFileStatusResponse) String() string {
func (*ReadVolumeFileStatusResponse) ProtoMessage() {}
func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[69]
+ mi := &file_volume_server_proto_msgTypes[75]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3702,7 +4076,7 @@ func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadVolumeFileStatusResponse.ProtoReflect.Descriptor instead.
func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{69}
+ return file_volume_server_proto_rawDescGZIP(), []int{75}
}
func (x *ReadVolumeFileStatusResponse) GetVolumeId() uint32 {
@@ -3797,7 +4171,7 @@ type DiskStatus struct {
func (x *DiskStatus) Reset() {
*x = DiskStatus{}
- mi := &file_volume_server_proto_msgTypes[70]
+ mi := &file_volume_server_proto_msgTypes[76]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3809,7 +4183,7 @@ func (x *DiskStatus) String() string {
func (*DiskStatus) ProtoMessage() {}
func (x *DiskStatus) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[70]
+ mi := &file_volume_server_proto_msgTypes[76]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3822,7 +4196,7 @@ func (x *DiskStatus) ProtoReflect() protoreflect.Message {
// Deprecated: Use DiskStatus.ProtoReflect.Descriptor instead.
func (*DiskStatus) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{70}
+ return file_volume_server_proto_rawDescGZIP(), []int{76}
}
func (x *DiskStatus) GetDir() string {
@@ -3889,7 +4263,7 @@ type MemStatus struct {
func (x *MemStatus) Reset() {
*x = MemStatus{}
- mi := &file_volume_server_proto_msgTypes[71]
+ mi := &file_volume_server_proto_msgTypes[77]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3901,7 +4275,7 @@ func (x *MemStatus) String() string {
func (*MemStatus) ProtoMessage() {}
func (x *MemStatus) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[71]
+ mi := &file_volume_server_proto_msgTypes[77]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3914,7 +4288,7 @@ func (x *MemStatus) ProtoReflect() protoreflect.Message {
// Deprecated: Use MemStatus.ProtoReflect.Descriptor instead.
func (*MemStatus) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{71}
+ return file_volume_server_proto_rawDescGZIP(), []int{77}
}
func (x *MemStatus) GetGoroutines() int32 {
@@ -3982,7 +4356,7 @@ type RemoteFile struct {
func (x *RemoteFile) Reset() {
*x = RemoteFile{}
- mi := &file_volume_server_proto_msgTypes[72]
+ mi := &file_volume_server_proto_msgTypes[78]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3994,7 +4368,7 @@ func (x *RemoteFile) String() string {
func (*RemoteFile) ProtoMessage() {}
func (x *RemoteFile) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[72]
+ mi := &file_volume_server_proto_msgTypes[78]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4007,7 +4381,7 @@ func (x *RemoteFile) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoteFile.ProtoReflect.Descriptor instead.
func (*RemoteFile) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{72}
+ return file_volume_server_proto_rawDescGZIP(), []int{78}
}
func (x *RemoteFile) GetBackendType() string {
@@ -4074,7 +4448,7 @@ type VolumeInfo struct {
func (x *VolumeInfo) Reset() {
*x = VolumeInfo{}
- mi := &file_volume_server_proto_msgTypes[73]
+ mi := &file_volume_server_proto_msgTypes[79]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4086,7 +4460,7 @@ func (x *VolumeInfo) String() string {
func (*VolumeInfo) ProtoMessage() {}
func (x *VolumeInfo) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[73]
+ mi := &file_volume_server_proto_msgTypes[79]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4099,7 +4473,7 @@ func (x *VolumeInfo) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeInfo.ProtoReflect.Descriptor instead.
func (*VolumeInfo) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{73}
+ return file_volume_server_proto_rawDescGZIP(), []int{79}
}
func (x *VolumeInfo) GetFiles() []*RemoteFile {
@@ -4166,7 +4540,7 @@ type OldVersionVolumeInfo struct {
func (x *OldVersionVolumeInfo) Reset() {
*x = OldVersionVolumeInfo{}
- mi := &file_volume_server_proto_msgTypes[74]
+ mi := &file_volume_server_proto_msgTypes[80]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4178,7 +4552,7 @@ func (x *OldVersionVolumeInfo) String() string {
func (*OldVersionVolumeInfo) ProtoMessage() {}
func (x *OldVersionVolumeInfo) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[74]
+ mi := &file_volume_server_proto_msgTypes[80]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4191,7 +4565,7 @@ func (x *OldVersionVolumeInfo) ProtoReflect() protoreflect.Message {
// Deprecated: Use OldVersionVolumeInfo.ProtoReflect.Descriptor instead.
func (*OldVersionVolumeInfo) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{74}
+ return file_volume_server_proto_rawDescGZIP(), []int{80}
}
func (x *OldVersionVolumeInfo) GetFiles() []*RemoteFile {
@@ -4256,7 +4630,7 @@ type VolumeTierMoveDatToRemoteRequest struct {
func (x *VolumeTierMoveDatToRemoteRequest) Reset() {
*x = VolumeTierMoveDatToRemoteRequest{}
- mi := &file_volume_server_proto_msgTypes[75]
+ mi := &file_volume_server_proto_msgTypes[81]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4268,7 +4642,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) String() string {
func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {}
func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[75]
+ mi := &file_volume_server_proto_msgTypes[81]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4281,7 +4655,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead.
func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{75}
+ return file_volume_server_proto_rawDescGZIP(), []int{81}
}
func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 {
@@ -4322,7 +4696,7 @@ type VolumeTierMoveDatToRemoteResponse struct {
func (x *VolumeTierMoveDatToRemoteResponse) Reset() {
*x = VolumeTierMoveDatToRemoteResponse{}
- mi := &file_volume_server_proto_msgTypes[76]
+ mi := &file_volume_server_proto_msgTypes[82]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4334,7 +4708,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) String() string {
func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {}
func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[76]
+ mi := &file_volume_server_proto_msgTypes[82]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4347,7 +4721,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message
// Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead.
func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{76}
+ return file_volume_server_proto_rawDescGZIP(), []int{82}
}
func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 {
@@ -4375,7 +4749,7 @@ type VolumeTierMoveDatFromRemoteRequest struct {
func (x *VolumeTierMoveDatFromRemoteRequest) Reset() {
*x = VolumeTierMoveDatFromRemoteRequest{}
- mi := &file_volume_server_proto_msgTypes[77]
+ mi := &file_volume_server_proto_msgTypes[83]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4387,7 +4761,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) String() string {
func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {}
func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[77]
+ mi := &file_volume_server_proto_msgTypes[83]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4400,7 +4774,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message
// Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead.
func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{77}
+ return file_volume_server_proto_rawDescGZIP(), []int{83}
}
func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 {
@@ -4434,7 +4808,7 @@ type VolumeTierMoveDatFromRemoteResponse struct {
func (x *VolumeTierMoveDatFromRemoteResponse) Reset() {
*x = VolumeTierMoveDatFromRemoteResponse{}
- mi := &file_volume_server_proto_msgTypes[78]
+ mi := &file_volume_server_proto_msgTypes[84]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4446,7 +4820,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) String() string {
func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {}
func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[78]
+ mi := &file_volume_server_proto_msgTypes[84]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4459,7 +4833,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Messag
// Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead.
func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{78}
+ return file_volume_server_proto_rawDescGZIP(), []int{84}
}
func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 {
@@ -4484,7 +4858,7 @@ type VolumeServerStatusRequest struct {
func (x *VolumeServerStatusRequest) Reset() {
*x = VolumeServerStatusRequest{}
- mi := &file_volume_server_proto_msgTypes[79]
+ mi := &file_volume_server_proto_msgTypes[85]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4496,7 +4870,7 @@ func (x *VolumeServerStatusRequest) String() string {
func (*VolumeServerStatusRequest) ProtoMessage() {}
func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[79]
+ mi := &file_volume_server_proto_msgTypes[85]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4509,7 +4883,7 @@ func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead.
func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{79}
+ return file_volume_server_proto_rawDescGZIP(), []int{85}
}
type VolumeServerStatusResponse struct {
@@ -4525,7 +4899,7 @@ type VolumeServerStatusResponse struct {
func (x *VolumeServerStatusResponse) Reset() {
*x = VolumeServerStatusResponse{}
- mi := &file_volume_server_proto_msgTypes[80]
+ mi := &file_volume_server_proto_msgTypes[86]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4537,7 +4911,7 @@ func (x *VolumeServerStatusResponse) String() string {
func (*VolumeServerStatusResponse) ProtoMessage() {}
func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[80]
+ mi := &file_volume_server_proto_msgTypes[86]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4550,7 +4924,7 @@ func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead.
func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{80}
+ return file_volume_server_proto_rawDescGZIP(), []int{86}
}
func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus {
@@ -4596,7 +4970,7 @@ type VolumeServerLeaveRequest struct {
func (x *VolumeServerLeaveRequest) Reset() {
*x = VolumeServerLeaveRequest{}
- mi := &file_volume_server_proto_msgTypes[81]
+ mi := &file_volume_server_proto_msgTypes[87]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4608,7 +4982,7 @@ func (x *VolumeServerLeaveRequest) String() string {
func (*VolumeServerLeaveRequest) ProtoMessage() {}
func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[81]
+ mi := &file_volume_server_proto_msgTypes[87]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4621,7 +4995,7 @@ func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeServerLeaveRequest.ProtoReflect.Descriptor instead.
func (*VolumeServerLeaveRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{81}
+ return file_volume_server_proto_rawDescGZIP(), []int{87}
}
type VolumeServerLeaveResponse struct {
@@ -4632,7 +5006,7 @@ type VolumeServerLeaveResponse struct {
func (x *VolumeServerLeaveResponse) Reset() {
*x = VolumeServerLeaveResponse{}
- mi := &file_volume_server_proto_msgTypes[82]
+ mi := &file_volume_server_proto_msgTypes[88]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4644,7 +5018,7 @@ func (x *VolumeServerLeaveResponse) String() string {
func (*VolumeServerLeaveResponse) ProtoMessage() {}
func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[82]
+ mi := &file_volume_server_proto_msgTypes[88]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4657,7 +5031,7 @@ func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeServerLeaveResponse.ProtoReflect.Descriptor instead.
func (*VolumeServerLeaveResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{82}
+ return file_volume_server_proto_rawDescGZIP(), []int{88}
}
// remote storage
@@ -4679,7 +5053,7 @@ type FetchAndWriteNeedleRequest struct {
func (x *FetchAndWriteNeedleRequest) Reset() {
*x = FetchAndWriteNeedleRequest{}
- mi := &file_volume_server_proto_msgTypes[83]
+ mi := &file_volume_server_proto_msgTypes[89]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4691,7 +5065,7 @@ func (x *FetchAndWriteNeedleRequest) String() string {
func (*FetchAndWriteNeedleRequest) ProtoMessage() {}
func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[83]
+ mi := &file_volume_server_proto_msgTypes[89]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4704,7 +5078,7 @@ func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use FetchAndWriteNeedleRequest.ProtoReflect.Descriptor instead.
func (*FetchAndWriteNeedleRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{83}
+ return file_volume_server_proto_rawDescGZIP(), []int{89}
}
func (x *FetchAndWriteNeedleRequest) GetVolumeId() uint32 {
@@ -4779,7 +5153,7 @@ type FetchAndWriteNeedleResponse struct {
func (x *FetchAndWriteNeedleResponse) Reset() {
*x = FetchAndWriteNeedleResponse{}
- mi := &file_volume_server_proto_msgTypes[84]
+ mi := &file_volume_server_proto_msgTypes[90]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4791,7 +5165,7 @@ func (x *FetchAndWriteNeedleResponse) String() string {
func (*FetchAndWriteNeedleResponse) ProtoMessage() {}
func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[84]
+ mi := &file_volume_server_proto_msgTypes[90]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4804,7 +5178,7 @@ func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use FetchAndWriteNeedleResponse.ProtoReflect.Descriptor instead.
func (*FetchAndWriteNeedleResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{84}
+ return file_volume_server_proto_rawDescGZIP(), []int{90}
}
func (x *FetchAndWriteNeedleResponse) GetETag() string {
@@ -4828,7 +5202,7 @@ type QueryRequest struct {
func (x *QueryRequest) Reset() {
*x = QueryRequest{}
- mi := &file_volume_server_proto_msgTypes[85]
+ mi := &file_volume_server_proto_msgTypes[91]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4840,7 +5214,7 @@ func (x *QueryRequest) String() string {
func (*QueryRequest) ProtoMessage() {}
func (x *QueryRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[85]
+ mi := &file_volume_server_proto_msgTypes[91]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4853,7 +5227,7 @@ func (x *QueryRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead.
func (*QueryRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{85}
+ return file_volume_server_proto_rawDescGZIP(), []int{91}
}
func (x *QueryRequest) GetSelections() []string {
@@ -4900,7 +5274,7 @@ type QueriedStripe struct {
func (x *QueriedStripe) Reset() {
*x = QueriedStripe{}
- mi := &file_volume_server_proto_msgTypes[86]
+ mi := &file_volume_server_proto_msgTypes[92]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4912,7 +5286,7 @@ func (x *QueriedStripe) String() string {
func (*QueriedStripe) ProtoMessage() {}
func (x *QueriedStripe) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[86]
+ mi := &file_volume_server_proto_msgTypes[92]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4925,7 +5299,7 @@ func (x *QueriedStripe) ProtoReflect() protoreflect.Message {
// Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead.
func (*QueriedStripe) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{86}
+ return file_volume_server_proto_rawDescGZIP(), []int{92}
}
func (x *QueriedStripe) GetRecords() []byte {
@@ -4945,7 +5319,7 @@ type VolumeNeedleStatusRequest struct {
func (x *VolumeNeedleStatusRequest) Reset() {
*x = VolumeNeedleStatusRequest{}
- mi := &file_volume_server_proto_msgTypes[87]
+ mi := &file_volume_server_proto_msgTypes[93]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4957,7 +5331,7 @@ func (x *VolumeNeedleStatusRequest) String() string {
func (*VolumeNeedleStatusRequest) ProtoMessage() {}
func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[87]
+ mi := &file_volume_server_proto_msgTypes[93]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4970,7 +5344,7 @@ func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeNeedleStatusRequest.ProtoReflect.Descriptor instead.
func (*VolumeNeedleStatusRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{87}
+ return file_volume_server_proto_rawDescGZIP(), []int{93}
}
func (x *VolumeNeedleStatusRequest) GetVolumeId() uint32 {
@@ -5001,7 +5375,7 @@ type VolumeNeedleStatusResponse struct {
func (x *VolumeNeedleStatusResponse) Reset() {
*x = VolumeNeedleStatusResponse{}
- mi := &file_volume_server_proto_msgTypes[88]
+ mi := &file_volume_server_proto_msgTypes[94]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5013,7 +5387,7 @@ func (x *VolumeNeedleStatusResponse) String() string {
func (*VolumeNeedleStatusResponse) ProtoMessage() {}
func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[88]
+ mi := &file_volume_server_proto_msgTypes[94]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5026,7 +5400,7 @@ func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VolumeNeedleStatusResponse.ProtoReflect.Descriptor instead.
func (*VolumeNeedleStatusResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{88}
+ return file_volume_server_proto_rawDescGZIP(), []int{94}
}
func (x *VolumeNeedleStatusResponse) GetNeedleId() uint64 {
@@ -5081,7 +5455,7 @@ type PingRequest struct {
func (x *PingRequest) Reset() {
*x = PingRequest{}
- mi := &file_volume_server_proto_msgTypes[89]
+ mi := &file_volume_server_proto_msgTypes[95]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5093,7 +5467,7 @@ func (x *PingRequest) String() string {
func (*PingRequest) ProtoMessage() {}
func (x *PingRequest) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[89]
+ mi := &file_volume_server_proto_msgTypes[95]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5106,7 +5480,7 @@ func (x *PingRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PingRequest.ProtoReflect.Descriptor instead.
func (*PingRequest) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{89}
+ return file_volume_server_proto_rawDescGZIP(), []int{95}
}
func (x *PingRequest) GetTarget() string {
@@ -5134,7 +5508,7 @@ type PingResponse struct {
func (x *PingResponse) Reset() {
*x = PingResponse{}
- mi := &file_volume_server_proto_msgTypes[90]
+ mi := &file_volume_server_proto_msgTypes[96]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5146,7 +5520,7 @@ func (x *PingResponse) String() string {
func (*PingResponse) ProtoMessage() {}
func (x *PingResponse) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[90]
+ mi := &file_volume_server_proto_msgTypes[96]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5159,7 +5533,7 @@ func (x *PingResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PingResponse.ProtoReflect.Descriptor instead.
func (*PingResponse) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{90}
+ return file_volume_server_proto_rawDescGZIP(), []int{96}
}
func (x *PingResponse) GetStartTimeNs() int64 {
@@ -5194,7 +5568,7 @@ type FetchAndWriteNeedleRequest_Replica struct {
func (x *FetchAndWriteNeedleRequest_Replica) Reset() {
*x = FetchAndWriteNeedleRequest_Replica{}
- mi := &file_volume_server_proto_msgTypes[91]
+ mi := &file_volume_server_proto_msgTypes[97]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5206,7 +5580,7 @@ func (x *FetchAndWriteNeedleRequest_Replica) String() string {
func (*FetchAndWriteNeedleRequest_Replica) ProtoMessage() {}
func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[91]
+ mi := &file_volume_server_proto_msgTypes[97]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5219,7 +5593,7 @@ func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message
// Deprecated: Use FetchAndWriteNeedleRequest_Replica.ProtoReflect.Descriptor instead.
func (*FetchAndWriteNeedleRequest_Replica) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{83, 0}
+ return file_volume_server_proto_rawDescGZIP(), []int{89, 0}
}
func (x *FetchAndWriteNeedleRequest_Replica) GetUrl() string {
@@ -5254,7 +5628,7 @@ type QueryRequest_Filter struct {
func (x *QueryRequest_Filter) Reset() {
*x = QueryRequest_Filter{}
- mi := &file_volume_server_proto_msgTypes[92]
+ mi := &file_volume_server_proto_msgTypes[98]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5266,7 +5640,7 @@ func (x *QueryRequest_Filter) String() string {
func (*QueryRequest_Filter) ProtoMessage() {}
func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[92]
+ mi := &file_volume_server_proto_msgTypes[98]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5279,7 +5653,7 @@ func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message {
// Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead.
func (*QueryRequest_Filter) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{85, 0}
+ return file_volume_server_proto_rawDescGZIP(), []int{91, 0}
}
func (x *QueryRequest_Filter) GetField() string {
@@ -5316,7 +5690,7 @@ type QueryRequest_InputSerialization struct {
func (x *QueryRequest_InputSerialization) Reset() {
*x = QueryRequest_InputSerialization{}
- mi := &file_volume_server_proto_msgTypes[93]
+ mi := &file_volume_server_proto_msgTypes[99]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5328,7 +5702,7 @@ func (x *QueryRequest_InputSerialization) String() string {
func (*QueryRequest_InputSerialization) ProtoMessage() {}
func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[93]
+ mi := &file_volume_server_proto_msgTypes[99]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5341,7 +5715,7 @@ func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message {
// Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead.
func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{85, 1}
+ return file_volume_server_proto_rawDescGZIP(), []int{91, 1}
}
func (x *QueryRequest_InputSerialization) GetCompressionType() string {
@@ -5382,7 +5756,7 @@ type QueryRequest_OutputSerialization struct {
func (x *QueryRequest_OutputSerialization) Reset() {
*x = QueryRequest_OutputSerialization{}
- mi := &file_volume_server_proto_msgTypes[94]
+ mi := &file_volume_server_proto_msgTypes[100]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5394,7 +5768,7 @@ func (x *QueryRequest_OutputSerialization) String() string {
func (*QueryRequest_OutputSerialization) ProtoMessage() {}
func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[94]
+ mi := &file_volume_server_proto_msgTypes[100]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5407,7 +5781,7 @@ func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message {
// Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead.
func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{85, 2}
+ return file_volume_server_proto_rawDescGZIP(), []int{91, 2}
}
func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput {
@@ -5440,7 +5814,7 @@ type QueryRequest_InputSerialization_CSVInput struct {
func (x *QueryRequest_InputSerialization_CSVInput) Reset() {
*x = QueryRequest_InputSerialization_CSVInput{}
- mi := &file_volume_server_proto_msgTypes[95]
+ mi := &file_volume_server_proto_msgTypes[101]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5452,7 +5826,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) String() string {
func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {}
func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[95]
+ mi := &file_volume_server_proto_msgTypes[101]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5465,7 +5839,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.M
// Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead.
func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{85, 1, 0}
+ return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 0}
}
func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string {
@@ -5526,7 +5900,7 @@ type QueryRequest_InputSerialization_JSONInput struct {
func (x *QueryRequest_InputSerialization_JSONInput) Reset() {
*x = QueryRequest_InputSerialization_JSONInput{}
- mi := &file_volume_server_proto_msgTypes[96]
+ mi := &file_volume_server_proto_msgTypes[102]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5538,7 +5912,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) String() string {
func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {}
func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[96]
+ mi := &file_volume_server_proto_msgTypes[102]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5551,7 +5925,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.
// Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead.
func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{85, 1, 1}
+ return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 1}
}
func (x *QueryRequest_InputSerialization_JSONInput) GetType() string {
@@ -5569,7 +5943,7 @@ type QueryRequest_InputSerialization_ParquetInput struct {
func (x *QueryRequest_InputSerialization_ParquetInput) Reset() {
*x = QueryRequest_InputSerialization_ParquetInput{}
- mi := &file_volume_server_proto_msgTypes[97]
+ mi := &file_volume_server_proto_msgTypes[103]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5581,7 +5955,7 @@ func (x *QueryRequest_InputSerialization_ParquetInput) String() string {
func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {}
func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[97]
+ mi := &file_volume_server_proto_msgTypes[103]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5594,7 +5968,7 @@ func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protorefle
// Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead.
func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{85, 1, 2}
+ return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 2}
}
type QueryRequest_OutputSerialization_CSVOutput struct {
@@ -5610,7 +5984,7 @@ type QueryRequest_OutputSerialization_CSVOutput struct {
func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() {
*x = QueryRequest_OutputSerialization_CSVOutput{}
- mi := &file_volume_server_proto_msgTypes[98]
+ mi := &file_volume_server_proto_msgTypes[104]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5622,7 +5996,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) String() string {
func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {}
func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[98]
+ mi := &file_volume_server_proto_msgTypes[104]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5635,7 +6009,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect
// Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead.
func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{85, 2, 0}
+ return file_volume_server_proto_rawDescGZIP(), []int{91, 2, 0}
}
func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string {
@@ -5682,7 +6056,7 @@ type QueryRequest_OutputSerialization_JSONOutput struct {
func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() {
*x = QueryRequest_OutputSerialization_JSONOutput{}
- mi := &file_volume_server_proto_msgTypes[99]
+ mi := &file_volume_server_proto_msgTypes[105]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5694,7 +6068,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) String() string {
func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {}
func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message {
- mi := &file_volume_server_proto_msgTypes[99]
+ mi := &file_volume_server_proto_msgTypes[105]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5707,7 +6081,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflec
// Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead.
func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) {
- return file_volume_server_proto_rawDescGZIP(), []int{85, 2, 1}
+ return file_volume_server_proto_rawDescGZIP(), []int{91, 2, 1}
}
func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string {
@@ -5850,7 +6224,24 @@ const file_volume_server_proto_rawDesc = "" +
"\x1cignore_source_file_not_found\x18\a \x01(\bR\x18ignoreSourceFileNotFound\"[\n" +
"\x10CopyFileResponse\x12!\n" +
"\ffile_content\x18\x01 \x01(\fR\vfileContent\x12$\n" +
- "\x0emodified_ts_ns\x18\x02 \x01(\x03R\fmodifiedTsNs\"`\n" +
+ "\x0emodified_ts_ns\x18\x02 \x01(\x03R\fmodifiedTsNs\"z\n" +
+ "\x12ReceiveFileRequest\x127\n" +
+ "\x04info\x18\x01 \x01(\v2!.volume_server_pb.ReceiveFileInfoH\x00R\x04info\x12#\n" +
+ "\ffile_content\x18\x02 \x01(\fH\x00R\vfileContentB\x06\n" +
+ "\x04data\"\xba\x01\n" +
+ "\x0fReceiveFileInfo\x12\x1b\n" +
+ "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x10\n" +
+ "\x03ext\x18\x02 \x01(\tR\x03ext\x12\x1e\n" +
+ "\n" +
+ "collection\x18\x03 \x01(\tR\n" +
+ "collection\x12 \n" +
+ "\fis_ec_volume\x18\x04 \x01(\bR\n" +
+ "isEcVolume\x12\x19\n" +
+ "\bshard_id\x18\x05 \x01(\rR\ashardId\x12\x1b\n" +
+ "\tfile_size\x18\x06 \x01(\x04R\bfileSize\"P\n" +
+ "\x13ReceiveFileResponse\x12#\n" +
+ "\rbytes_written\x18\x01 \x01(\x04R\fbytesWritten\x12\x14\n" +
+ "\x05error\x18\x02 \x01(\tR\x05error\"`\n" +
"\x15ReadNeedleBlobRequest\x12\x1b\n" +
"\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x16\n" +
"\x06offset\x18\x03 \x01(\x03R\x06offset\x12\x12\n" +
@@ -5920,7 +6311,7 @@ const file_volume_server_proto_rawDesc = "" +
"collection\x18\x02 \x01(\tR\n" +
"collection\"K\n" +
"\x1dVolumeEcShardsRebuildResponse\x12*\n" +
- "\x11rebuilt_shard_ids\x18\x01 \x03(\rR\x0frebuiltShardIds\"\x8b\x02\n" +
+ "\x11rebuilt_shard_ids\x18\x01 \x03(\rR\x0frebuiltShardIds\"\xa4\x02\n" +
"\x19VolumeEcShardsCopyRequest\x12\x1b\n" +
"\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" +
"\n" +
@@ -5930,7 +6321,8 @@ const file_volume_server_proto_rawDesc = "" +
"\rcopy_ecx_file\x18\x04 \x01(\bR\vcopyEcxFile\x12(\n" +
"\x10source_data_node\x18\x05 \x01(\tR\x0esourceDataNode\x12\"\n" +
"\rcopy_ecj_file\x18\x06 \x01(\bR\vcopyEcjFile\x12\"\n" +
- "\rcopy_vif_file\x18\a \x01(\bR\vcopyVifFile\"\x1c\n" +
+ "\rcopy_vif_file\x18\a \x01(\bR\vcopyVifFile\x12\x17\n" +
+ "\adisk_id\x18\b \x01(\rR\x06diskId\"\x1c\n" +
"\x1aVolumeEcShardsCopyResponse\"w\n" +
"\x1bVolumeEcShardsDeleteRequest\x12\x1b\n" +
"\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" +
@@ -5973,7 +6365,17 @@ const file_volume_server_proto_rawDesc = "" +
"\n" +
"collection\x18\x02 \x01(\tR\n" +
"collection\" \n" +
- "\x1eVolumeEcShardsToVolumeResponse\":\n" +
+ "\x1eVolumeEcShardsToVolumeResponse\"8\n" +
+ "\x19VolumeEcShardsInfoRequest\x12\x1b\n" +
+ "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"a\n" +
+ "\x1aVolumeEcShardsInfoResponse\x12C\n" +
+ "\x0eec_shard_infos\x18\x01 \x03(\v2\x1d.volume_server_pb.EcShardInfoR\fecShardInfos\"\\\n" +
+ "\vEcShardInfo\x12\x19\n" +
+ "\bshard_id\x18\x01 \x01(\rR\ashardId\x12\x12\n" +
+ "\x04size\x18\x02 \x01(\x03R\x04size\x12\x1e\n" +
+ "\n" +
+ "collection\x18\x03 \x01(\tR\n" +
+ "collection\":\n" +
"\x1bReadVolumeFileStatusRequest\x12\x1b\n" +
"\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"\xe3\x03\n" +
"\x1cReadVolumeFileStatusResponse\x12\x1b\n" +
@@ -6149,7 +6551,7 @@ const file_volume_server_proto_rawDesc = "" +
"\rstart_time_ns\x18\x01 \x01(\x03R\vstartTimeNs\x12$\n" +
"\x0eremote_time_ns\x18\x02 \x01(\x03R\fremoteTimeNs\x12 \n" +
"\fstop_time_ns\x18\x03 \x01(\x03R\n" +
- "stopTimeNs2\xbc$\n" +
+ "stopTimeNs2\x8f&\n" +
"\fVolumeServer\x12\\\n" +
"\vBatchDelete\x12$.volume_server_pb.BatchDeleteRequest\x1a%.volume_server_pb.BatchDeleteResponse\"\x00\x12n\n" +
"\x11VacuumVolumeCheck\x12*.volume_server_pb.VacuumVolumeCheckRequest\x1a+.volume_server_pb.VacuumVolumeCheckResponse\"\x00\x12v\n" +
@@ -6170,7 +6572,8 @@ const file_volume_server_proto_rawDesc = "" +
"\n" +
"VolumeCopy\x12#.volume_server_pb.VolumeCopyRequest\x1a$.volume_server_pb.VolumeCopyResponse\"\x000\x01\x12w\n" +
"\x14ReadVolumeFileStatus\x12-.volume_server_pb.ReadVolumeFileStatusRequest\x1a..volume_server_pb.ReadVolumeFileStatusResponse\"\x00\x12U\n" +
- "\bCopyFile\x12!.volume_server_pb.CopyFileRequest\x1a\".volume_server_pb.CopyFileResponse\"\x000\x01\x12e\n" +
+ "\bCopyFile\x12!.volume_server_pb.CopyFileRequest\x1a\".volume_server_pb.CopyFileResponse\"\x000\x01\x12^\n" +
+ "\vReceiveFile\x12$.volume_server_pb.ReceiveFileRequest\x1a%.volume_server_pb.ReceiveFileResponse\"\x00(\x01\x12e\n" +
"\x0eReadNeedleBlob\x12'.volume_server_pb.ReadNeedleBlobRequest\x1a(.volume_server_pb.ReadNeedleBlobResponse\"\x00\x12e\n" +
"\x0eReadNeedleMeta\x12'.volume_server_pb.ReadNeedleMetaRequest\x1a(.volume_server_pb.ReadNeedleMetaResponse\"\x00\x12h\n" +
"\x0fWriteNeedleBlob\x12(.volume_server_pb.WriteNeedleBlobRequest\x1a).volume_server_pb.WriteNeedleBlobResponse\"\x00\x12g\n" +
@@ -6185,7 +6588,8 @@ const file_volume_server_proto_rawDesc = "" +
"\x15VolumeEcShardsUnmount\x12..volume_server_pb.VolumeEcShardsUnmountRequest\x1a/.volume_server_pb.VolumeEcShardsUnmountResponse\"\x00\x12p\n" +
"\x11VolumeEcShardRead\x12*.volume_server_pb.VolumeEcShardReadRequest\x1a+.volume_server_pb.VolumeEcShardReadResponse\"\x000\x01\x12q\n" +
"\x12VolumeEcBlobDelete\x12+.volume_server_pb.VolumeEcBlobDeleteRequest\x1a,.volume_server_pb.VolumeEcBlobDeleteResponse\"\x00\x12}\n" +
- "\x16VolumeEcShardsToVolume\x12/.volume_server_pb.VolumeEcShardsToVolumeRequest\x1a0.volume_server_pb.VolumeEcShardsToVolumeResponse\"\x00\x12\x88\x01\n" +
+ "\x16VolumeEcShardsToVolume\x12/.volume_server_pb.VolumeEcShardsToVolumeRequest\x1a0.volume_server_pb.VolumeEcShardsToVolumeResponse\"\x00\x12q\n" +
+ "\x12VolumeEcShardsInfo\x12+.volume_server_pb.VolumeEcShardsInfoRequest\x1a,.volume_server_pb.VolumeEcShardsInfoResponse\"\x00\x12\x88\x01\n" +
"\x19VolumeTierMoveDatToRemote\x122.volume_server_pb.VolumeTierMoveDatToRemoteRequest\x1a3.volume_server_pb.VolumeTierMoveDatToRemoteResponse\"\x000\x01\x12\x8e\x01\n" +
"\x1bVolumeTierMoveDatFromRemote\x124.volume_server_pb.VolumeTierMoveDatFromRemoteRequest\x1a5.volume_server_pb.VolumeTierMoveDatFromRemoteResponse\"\x000\x01\x12q\n" +
"\x12VolumeServerStatus\x12+.volume_server_pb.VolumeServerStatusRequest\x1a,.volume_server_pb.VolumeServerStatusResponse\"\x00\x12n\n" +
@@ -6207,7 +6611,7 @@ func file_volume_server_proto_rawDescGZIP() []byte {
return file_volume_server_proto_rawDescData
}
-var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 100)
+var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 106)
var file_volume_server_proto_goTypes = []any{
(*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest
(*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse
@@ -6247,178 +6651,190 @@ var file_volume_server_proto_goTypes = []any{
(*VolumeCopyResponse)(nil), // 35: volume_server_pb.VolumeCopyResponse
(*CopyFileRequest)(nil), // 36: volume_server_pb.CopyFileRequest
(*CopyFileResponse)(nil), // 37: volume_server_pb.CopyFileResponse
- (*ReadNeedleBlobRequest)(nil), // 38: volume_server_pb.ReadNeedleBlobRequest
- (*ReadNeedleBlobResponse)(nil), // 39: volume_server_pb.ReadNeedleBlobResponse
- (*ReadNeedleMetaRequest)(nil), // 40: volume_server_pb.ReadNeedleMetaRequest
- (*ReadNeedleMetaResponse)(nil), // 41: volume_server_pb.ReadNeedleMetaResponse
- (*WriteNeedleBlobRequest)(nil), // 42: volume_server_pb.WriteNeedleBlobRequest
- (*WriteNeedleBlobResponse)(nil), // 43: volume_server_pb.WriteNeedleBlobResponse
- (*ReadAllNeedlesRequest)(nil), // 44: volume_server_pb.ReadAllNeedlesRequest
- (*ReadAllNeedlesResponse)(nil), // 45: volume_server_pb.ReadAllNeedlesResponse
- (*VolumeTailSenderRequest)(nil), // 46: volume_server_pb.VolumeTailSenderRequest
- (*VolumeTailSenderResponse)(nil), // 47: volume_server_pb.VolumeTailSenderResponse
- (*VolumeTailReceiverRequest)(nil), // 48: volume_server_pb.VolumeTailReceiverRequest
- (*VolumeTailReceiverResponse)(nil), // 49: volume_server_pb.VolumeTailReceiverResponse
- (*VolumeEcShardsGenerateRequest)(nil), // 50: volume_server_pb.VolumeEcShardsGenerateRequest
- (*VolumeEcShardsGenerateResponse)(nil), // 51: volume_server_pb.VolumeEcShardsGenerateResponse
- (*VolumeEcShardsRebuildRequest)(nil), // 52: volume_server_pb.VolumeEcShardsRebuildRequest
- (*VolumeEcShardsRebuildResponse)(nil), // 53: volume_server_pb.VolumeEcShardsRebuildResponse
- (*VolumeEcShardsCopyRequest)(nil), // 54: volume_server_pb.VolumeEcShardsCopyRequest
- (*VolumeEcShardsCopyResponse)(nil), // 55: volume_server_pb.VolumeEcShardsCopyResponse
- (*VolumeEcShardsDeleteRequest)(nil), // 56: volume_server_pb.VolumeEcShardsDeleteRequest
- (*VolumeEcShardsDeleteResponse)(nil), // 57: volume_server_pb.VolumeEcShardsDeleteResponse
- (*VolumeEcShardsMountRequest)(nil), // 58: volume_server_pb.VolumeEcShardsMountRequest
- (*VolumeEcShardsMountResponse)(nil), // 59: volume_server_pb.VolumeEcShardsMountResponse
- (*VolumeEcShardsUnmountRequest)(nil), // 60: volume_server_pb.VolumeEcShardsUnmountRequest
- (*VolumeEcShardsUnmountResponse)(nil), // 61: volume_server_pb.VolumeEcShardsUnmountResponse
- (*VolumeEcShardReadRequest)(nil), // 62: volume_server_pb.VolumeEcShardReadRequest
- (*VolumeEcShardReadResponse)(nil), // 63: volume_server_pb.VolumeEcShardReadResponse
- (*VolumeEcBlobDeleteRequest)(nil), // 64: volume_server_pb.VolumeEcBlobDeleteRequest
- (*VolumeEcBlobDeleteResponse)(nil), // 65: volume_server_pb.VolumeEcBlobDeleteResponse
- (*VolumeEcShardsToVolumeRequest)(nil), // 66: volume_server_pb.VolumeEcShardsToVolumeRequest
- (*VolumeEcShardsToVolumeResponse)(nil), // 67: volume_server_pb.VolumeEcShardsToVolumeResponse
- (*ReadVolumeFileStatusRequest)(nil), // 68: volume_server_pb.ReadVolumeFileStatusRequest
- (*ReadVolumeFileStatusResponse)(nil), // 69: volume_server_pb.ReadVolumeFileStatusResponse
- (*DiskStatus)(nil), // 70: volume_server_pb.DiskStatus
- (*MemStatus)(nil), // 71: volume_server_pb.MemStatus
- (*RemoteFile)(nil), // 72: volume_server_pb.RemoteFile
- (*VolumeInfo)(nil), // 73: volume_server_pb.VolumeInfo
- (*OldVersionVolumeInfo)(nil), // 74: volume_server_pb.OldVersionVolumeInfo
- (*VolumeTierMoveDatToRemoteRequest)(nil), // 75: volume_server_pb.VolumeTierMoveDatToRemoteRequest
- (*VolumeTierMoveDatToRemoteResponse)(nil), // 76: volume_server_pb.VolumeTierMoveDatToRemoteResponse
- (*VolumeTierMoveDatFromRemoteRequest)(nil), // 77: volume_server_pb.VolumeTierMoveDatFromRemoteRequest
- (*VolumeTierMoveDatFromRemoteResponse)(nil), // 78: volume_server_pb.VolumeTierMoveDatFromRemoteResponse
- (*VolumeServerStatusRequest)(nil), // 79: volume_server_pb.VolumeServerStatusRequest
- (*VolumeServerStatusResponse)(nil), // 80: volume_server_pb.VolumeServerStatusResponse
- (*VolumeServerLeaveRequest)(nil), // 81: volume_server_pb.VolumeServerLeaveRequest
- (*VolumeServerLeaveResponse)(nil), // 82: volume_server_pb.VolumeServerLeaveResponse
- (*FetchAndWriteNeedleRequest)(nil), // 83: volume_server_pb.FetchAndWriteNeedleRequest
- (*FetchAndWriteNeedleResponse)(nil), // 84: volume_server_pb.FetchAndWriteNeedleResponse
- (*QueryRequest)(nil), // 85: volume_server_pb.QueryRequest
- (*QueriedStripe)(nil), // 86: volume_server_pb.QueriedStripe
- (*VolumeNeedleStatusRequest)(nil), // 87: volume_server_pb.VolumeNeedleStatusRequest
- (*VolumeNeedleStatusResponse)(nil), // 88: volume_server_pb.VolumeNeedleStatusResponse
- (*PingRequest)(nil), // 89: volume_server_pb.PingRequest
- (*PingResponse)(nil), // 90: volume_server_pb.PingResponse
- (*FetchAndWriteNeedleRequest_Replica)(nil), // 91: volume_server_pb.FetchAndWriteNeedleRequest.Replica
- (*QueryRequest_Filter)(nil), // 92: volume_server_pb.QueryRequest.Filter
- (*QueryRequest_InputSerialization)(nil), // 93: volume_server_pb.QueryRequest.InputSerialization
- (*QueryRequest_OutputSerialization)(nil), // 94: volume_server_pb.QueryRequest.OutputSerialization
- (*QueryRequest_InputSerialization_CSVInput)(nil), // 95: volume_server_pb.QueryRequest.InputSerialization.CSVInput
- (*QueryRequest_InputSerialization_JSONInput)(nil), // 96: volume_server_pb.QueryRequest.InputSerialization.JSONInput
- (*QueryRequest_InputSerialization_ParquetInput)(nil), // 97: volume_server_pb.QueryRequest.InputSerialization.ParquetInput
- (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 98: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput
- (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 99: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput
- (*remote_pb.RemoteConf)(nil), // 100: remote_pb.RemoteConf
- (*remote_pb.RemoteStorageLocation)(nil), // 101: remote_pb.RemoteStorageLocation
+ (*ReceiveFileRequest)(nil), // 38: volume_server_pb.ReceiveFileRequest
+ (*ReceiveFileInfo)(nil), // 39: volume_server_pb.ReceiveFileInfo
+ (*ReceiveFileResponse)(nil), // 40: volume_server_pb.ReceiveFileResponse
+ (*ReadNeedleBlobRequest)(nil), // 41: volume_server_pb.ReadNeedleBlobRequest
+ (*ReadNeedleBlobResponse)(nil), // 42: volume_server_pb.ReadNeedleBlobResponse
+ (*ReadNeedleMetaRequest)(nil), // 43: volume_server_pb.ReadNeedleMetaRequest
+ (*ReadNeedleMetaResponse)(nil), // 44: volume_server_pb.ReadNeedleMetaResponse
+ (*WriteNeedleBlobRequest)(nil), // 45: volume_server_pb.WriteNeedleBlobRequest
+ (*WriteNeedleBlobResponse)(nil), // 46: volume_server_pb.WriteNeedleBlobResponse
+ (*ReadAllNeedlesRequest)(nil), // 47: volume_server_pb.ReadAllNeedlesRequest
+ (*ReadAllNeedlesResponse)(nil), // 48: volume_server_pb.ReadAllNeedlesResponse
+ (*VolumeTailSenderRequest)(nil), // 49: volume_server_pb.VolumeTailSenderRequest
+ (*VolumeTailSenderResponse)(nil), // 50: volume_server_pb.VolumeTailSenderResponse
+ (*VolumeTailReceiverRequest)(nil), // 51: volume_server_pb.VolumeTailReceiverRequest
+ (*VolumeTailReceiverResponse)(nil), // 52: volume_server_pb.VolumeTailReceiverResponse
+ (*VolumeEcShardsGenerateRequest)(nil), // 53: volume_server_pb.VolumeEcShardsGenerateRequest
+ (*VolumeEcShardsGenerateResponse)(nil), // 54: volume_server_pb.VolumeEcShardsGenerateResponse
+ (*VolumeEcShardsRebuildRequest)(nil), // 55: volume_server_pb.VolumeEcShardsRebuildRequest
+ (*VolumeEcShardsRebuildResponse)(nil), // 56: volume_server_pb.VolumeEcShardsRebuildResponse
+ (*VolumeEcShardsCopyRequest)(nil), // 57: volume_server_pb.VolumeEcShardsCopyRequest
+ (*VolumeEcShardsCopyResponse)(nil), // 58: volume_server_pb.VolumeEcShardsCopyResponse
+ (*VolumeEcShardsDeleteRequest)(nil), // 59: volume_server_pb.VolumeEcShardsDeleteRequest
+ (*VolumeEcShardsDeleteResponse)(nil), // 60: volume_server_pb.VolumeEcShardsDeleteResponse
+ (*VolumeEcShardsMountRequest)(nil), // 61: volume_server_pb.VolumeEcShardsMountRequest
+ (*VolumeEcShardsMountResponse)(nil), // 62: volume_server_pb.VolumeEcShardsMountResponse
+ (*VolumeEcShardsUnmountRequest)(nil), // 63: volume_server_pb.VolumeEcShardsUnmountRequest
+ (*VolumeEcShardsUnmountResponse)(nil), // 64: volume_server_pb.VolumeEcShardsUnmountResponse
+ (*VolumeEcShardReadRequest)(nil), // 65: volume_server_pb.VolumeEcShardReadRequest
+ (*VolumeEcShardReadResponse)(nil), // 66: volume_server_pb.VolumeEcShardReadResponse
+ (*VolumeEcBlobDeleteRequest)(nil), // 67: volume_server_pb.VolumeEcBlobDeleteRequest
+ (*VolumeEcBlobDeleteResponse)(nil), // 68: volume_server_pb.VolumeEcBlobDeleteResponse
+ (*VolumeEcShardsToVolumeRequest)(nil), // 69: volume_server_pb.VolumeEcShardsToVolumeRequest
+ (*VolumeEcShardsToVolumeResponse)(nil), // 70: volume_server_pb.VolumeEcShardsToVolumeResponse
+ (*VolumeEcShardsInfoRequest)(nil), // 71: volume_server_pb.VolumeEcShardsInfoRequest
+ (*VolumeEcShardsInfoResponse)(nil), // 72: volume_server_pb.VolumeEcShardsInfoResponse
+ (*EcShardInfo)(nil), // 73: volume_server_pb.EcShardInfo
+ (*ReadVolumeFileStatusRequest)(nil), // 74: volume_server_pb.ReadVolumeFileStatusRequest
+ (*ReadVolumeFileStatusResponse)(nil), // 75: volume_server_pb.ReadVolumeFileStatusResponse
+ (*DiskStatus)(nil), // 76: volume_server_pb.DiskStatus
+ (*MemStatus)(nil), // 77: volume_server_pb.MemStatus
+ (*RemoteFile)(nil), // 78: volume_server_pb.RemoteFile
+ (*VolumeInfo)(nil), // 79: volume_server_pb.VolumeInfo
+ (*OldVersionVolumeInfo)(nil), // 80: volume_server_pb.OldVersionVolumeInfo
+ (*VolumeTierMoveDatToRemoteRequest)(nil), // 81: volume_server_pb.VolumeTierMoveDatToRemoteRequest
+ (*VolumeTierMoveDatToRemoteResponse)(nil), // 82: volume_server_pb.VolumeTierMoveDatToRemoteResponse
+ (*VolumeTierMoveDatFromRemoteRequest)(nil), // 83: volume_server_pb.VolumeTierMoveDatFromRemoteRequest
+ (*VolumeTierMoveDatFromRemoteResponse)(nil), // 84: volume_server_pb.VolumeTierMoveDatFromRemoteResponse
+ (*VolumeServerStatusRequest)(nil), // 85: volume_server_pb.VolumeServerStatusRequest
+ (*VolumeServerStatusResponse)(nil), // 86: volume_server_pb.VolumeServerStatusResponse
+ (*VolumeServerLeaveRequest)(nil), // 87: volume_server_pb.VolumeServerLeaveRequest
+ (*VolumeServerLeaveResponse)(nil), // 88: volume_server_pb.VolumeServerLeaveResponse
+ (*FetchAndWriteNeedleRequest)(nil), // 89: volume_server_pb.FetchAndWriteNeedleRequest
+ (*FetchAndWriteNeedleResponse)(nil), // 90: volume_server_pb.FetchAndWriteNeedleResponse
+ (*QueryRequest)(nil), // 91: volume_server_pb.QueryRequest
+ (*QueriedStripe)(nil), // 92: volume_server_pb.QueriedStripe
+ (*VolumeNeedleStatusRequest)(nil), // 93: volume_server_pb.VolumeNeedleStatusRequest
+ (*VolumeNeedleStatusResponse)(nil), // 94: volume_server_pb.VolumeNeedleStatusResponse
+ (*PingRequest)(nil), // 95: volume_server_pb.PingRequest
+ (*PingResponse)(nil), // 96: volume_server_pb.PingResponse
+ (*FetchAndWriteNeedleRequest_Replica)(nil), // 97: volume_server_pb.FetchAndWriteNeedleRequest.Replica
+ (*QueryRequest_Filter)(nil), // 98: volume_server_pb.QueryRequest.Filter
+ (*QueryRequest_InputSerialization)(nil), // 99: volume_server_pb.QueryRequest.InputSerialization
+ (*QueryRequest_OutputSerialization)(nil), // 100: volume_server_pb.QueryRequest.OutputSerialization
+ (*QueryRequest_InputSerialization_CSVInput)(nil), // 101: volume_server_pb.QueryRequest.InputSerialization.CSVInput
+ (*QueryRequest_InputSerialization_JSONInput)(nil), // 102: volume_server_pb.QueryRequest.InputSerialization.JSONInput
+ (*QueryRequest_InputSerialization_ParquetInput)(nil), // 103: volume_server_pb.QueryRequest.InputSerialization.ParquetInput
+ (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 104: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput
+ (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 105: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput
+ (*remote_pb.RemoteConf)(nil), // 106: remote_pb.RemoteConf
+ (*remote_pb.RemoteStorageLocation)(nil), // 107: remote_pb.RemoteStorageLocation
}
var file_volume_server_proto_depIdxs = []int32{
2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult
- 73, // 1: volume_server_pb.ReadVolumeFileStatusResponse.volume_info:type_name -> volume_server_pb.VolumeInfo
- 72, // 2: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile
- 72, // 3: volume_server_pb.OldVersionVolumeInfo.files:type_name -> volume_server_pb.RemoteFile
- 70, // 4: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus
- 71, // 5: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus
- 91, // 6: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica
- 100, // 7: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf
- 101, // 8: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation
- 92, // 9: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter
- 93, // 10: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization
- 94, // 11: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization
- 95, // 12: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput
- 96, // 13: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput
- 97, // 14: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput
- 98, // 15: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput
- 99, // 16: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput
- 0, // 17: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest
- 4, // 18: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest
- 6, // 19: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest
- 8, // 20: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest
- 10, // 21: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest
- 12, // 22: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest
- 14, // 23: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest
- 16, // 24: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest
- 18, // 25: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest
- 20, // 26: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest
- 22, // 27: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest
- 24, // 28: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest
- 26, // 29: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest
- 28, // 30: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest
- 30, // 31: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest
- 32, // 32: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest
- 34, // 33: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest
- 68, // 34: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest
- 36, // 35: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest
- 38, // 36: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest
- 40, // 37: volume_server_pb.VolumeServer.ReadNeedleMeta:input_type -> volume_server_pb.ReadNeedleMetaRequest
- 42, // 38: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest
- 44, // 39: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest
- 46, // 40: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest
- 48, // 41: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest
- 50, // 42: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest
- 52, // 43: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest
- 54, // 44: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest
- 56, // 45: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest
- 58, // 46: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest
- 60, // 47: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest
- 62, // 48: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest
- 64, // 49: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest
- 66, // 50: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest
- 75, // 51: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest
- 77, // 52: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest
- 79, // 53: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest
- 81, // 54: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest
- 83, // 55: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest
- 85, // 56: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest
- 87, // 57: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest
- 89, // 58: volume_server_pb.VolumeServer.Ping:input_type -> volume_server_pb.PingRequest
- 1, // 59: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse
- 5, // 60: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse
- 7, // 61: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse
- 9, // 62: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse
- 11, // 63: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse
- 13, // 64: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse
- 15, // 65: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse
- 17, // 66: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse
- 19, // 67: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse
- 21, // 68: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse
- 23, // 69: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse
- 25, // 70: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse
- 27, // 71: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse
- 29, // 72: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse
- 31, // 73: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse
- 33, // 74: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse
- 35, // 75: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse
- 69, // 76: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse
- 37, // 77: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse
- 39, // 78: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse
- 41, // 79: volume_server_pb.VolumeServer.ReadNeedleMeta:output_type -> volume_server_pb.ReadNeedleMetaResponse
- 43, // 80: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse
- 45, // 81: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse
- 47, // 82: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse
- 49, // 83: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse
- 51, // 84: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse
- 53, // 85: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse
- 55, // 86: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse
- 57, // 87: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse
- 59, // 88: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse
- 61, // 89: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse
- 63, // 90: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse
- 65, // 91: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse
- 67, // 92: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse
- 76, // 93: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse
- 78, // 94: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse
- 80, // 95: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse
- 82, // 96: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse
- 84, // 97: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse
- 86, // 98: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe
- 88, // 99: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse
- 90, // 100: volume_server_pb.VolumeServer.Ping:output_type -> volume_server_pb.PingResponse
- 59, // [59:101] is the sub-list for method output_type
- 17, // [17:59] is the sub-list for method input_type
- 17, // [17:17] is the sub-list for extension type_name
- 17, // [17:17] is the sub-list for extension extendee
- 0, // [0:17] is the sub-list for field type_name
+ 39, // 1: volume_server_pb.ReceiveFileRequest.info:type_name -> volume_server_pb.ReceiveFileInfo
+ 73, // 2: volume_server_pb.VolumeEcShardsInfoResponse.ec_shard_infos:type_name -> volume_server_pb.EcShardInfo
+ 79, // 3: volume_server_pb.ReadVolumeFileStatusResponse.volume_info:type_name -> volume_server_pb.VolumeInfo
+ 78, // 4: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile
+ 78, // 5: volume_server_pb.OldVersionVolumeInfo.files:type_name -> volume_server_pb.RemoteFile
+ 76, // 6: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus
+ 77, // 7: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus
+ 97, // 8: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica
+ 106, // 9: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf
+ 107, // 10: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation
+ 98, // 11: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter
+ 99, // 12: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization
+ 100, // 13: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization
+ 101, // 14: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput
+ 102, // 15: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput
+ 103, // 16: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput
+ 104, // 17: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput
+ 105, // 18: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput
+ 0, // 19: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest
+ 4, // 20: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest
+ 6, // 21: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest
+ 8, // 22: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest
+ 10, // 23: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest
+ 12, // 24: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest
+ 14, // 25: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest
+ 16, // 26: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest
+ 18, // 27: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest
+ 20, // 28: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest
+ 22, // 29: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest
+ 24, // 30: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest
+ 26, // 31: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest
+ 28, // 32: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest
+ 30, // 33: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest
+ 32, // 34: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest
+ 34, // 35: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest
+ 74, // 36: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest
+ 36, // 37: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest
+ 38, // 38: volume_server_pb.VolumeServer.ReceiveFile:input_type -> volume_server_pb.ReceiveFileRequest
+ 41, // 39: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest
+ 43, // 40: volume_server_pb.VolumeServer.ReadNeedleMeta:input_type -> volume_server_pb.ReadNeedleMetaRequest
+ 45, // 41: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest
+ 47, // 42: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest
+ 49, // 43: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest
+ 51, // 44: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest
+ 53, // 45: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest
+ 55, // 46: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest
+ 57, // 47: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest
+ 59, // 48: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest
+ 61, // 49: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest
+ 63, // 50: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest
+ 65, // 51: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest
+ 67, // 52: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest
+ 69, // 53: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest
+ 71, // 54: volume_server_pb.VolumeServer.VolumeEcShardsInfo:input_type -> volume_server_pb.VolumeEcShardsInfoRequest
+ 81, // 55: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest
+ 83, // 56: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest
+ 85, // 57: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest
+ 87, // 58: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest
+ 89, // 59: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest
+ 91, // 60: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest
+ 93, // 61: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest
+ 95, // 62: volume_server_pb.VolumeServer.Ping:input_type -> volume_server_pb.PingRequest
+ 1, // 63: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse
+ 5, // 64: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse
+ 7, // 65: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse
+ 9, // 66: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse
+ 11, // 67: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse
+ 13, // 68: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse
+ 15, // 69: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse
+ 17, // 70: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse
+ 19, // 71: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse
+ 21, // 72: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse
+ 23, // 73: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse
+ 25, // 74: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse
+ 27, // 75: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse
+ 29, // 76: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse
+ 31, // 77: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse
+ 33, // 78: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse
+ 35, // 79: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse
+ 75, // 80: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse
+ 37, // 81: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse
+ 40, // 82: volume_server_pb.VolumeServer.ReceiveFile:output_type -> volume_server_pb.ReceiveFileResponse
+ 42, // 83: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse
+ 44, // 84: volume_server_pb.VolumeServer.ReadNeedleMeta:output_type -> volume_server_pb.ReadNeedleMetaResponse
+ 46, // 85: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse
+ 48, // 86: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse
+ 50, // 87: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse
+ 52, // 88: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse
+ 54, // 89: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse
+ 56, // 90: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse
+ 58, // 91: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse
+ 60, // 92: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse
+ 62, // 93: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse
+ 64, // 94: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse
+ 66, // 95: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse
+ 68, // 96: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse
+ 70, // 97: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse
+ 72, // 98: volume_server_pb.VolumeServer.VolumeEcShardsInfo:output_type -> volume_server_pb.VolumeEcShardsInfoResponse
+ 82, // 99: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse
+ 84, // 100: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse
+ 86, // 101: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse
+ 88, // 102: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse
+ 90, // 103: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse
+ 92, // 104: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe
+ 94, // 105: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse
+ 96, // 106: volume_server_pb.VolumeServer.Ping:output_type -> volume_server_pb.PingResponse
+ 63, // [63:107] is the sub-list for method output_type
+ 19, // [19:63] is the sub-list for method input_type
+ 19, // [19:19] is the sub-list for extension type_name
+ 19, // [19:19] is the sub-list for extension extendee
+ 0, // [0:19] is the sub-list for field type_name
}
func init() { file_volume_server_proto_init() }
@@ -6426,13 +6842,17 @@ func file_volume_server_proto_init() {
if File_volume_server_proto != nil {
return
}
+ file_volume_server_proto_msgTypes[38].OneofWrappers = []any{
+ (*ReceiveFileRequest_Info)(nil),
+ (*ReceiveFileRequest_FileContent)(nil),
+ }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_volume_server_proto_rawDesc), len(file_volume_server_proto_rawDesc)),
NumEnums: 0,
- NumMessages: 100,
+ NumMessages: 106,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/weed/pb/volume_server_pb/volume_server_grpc.pb.go b/weed/pb/volume_server_pb/volume_server_grpc.pb.go
index 48fa3e8ab..f43cff84c 100644
--- a/weed/pb/volume_server_pb/volume_server_grpc.pb.go
+++ b/weed/pb/volume_server_pb/volume_server_grpc.pb.go
@@ -38,6 +38,7 @@ const (
VolumeServer_VolumeCopy_FullMethodName = "/volume_server_pb.VolumeServer/VolumeCopy"
VolumeServer_ReadVolumeFileStatus_FullMethodName = "/volume_server_pb.VolumeServer/ReadVolumeFileStatus"
VolumeServer_CopyFile_FullMethodName = "/volume_server_pb.VolumeServer/CopyFile"
+ VolumeServer_ReceiveFile_FullMethodName = "/volume_server_pb.VolumeServer/ReceiveFile"
VolumeServer_ReadNeedleBlob_FullMethodName = "/volume_server_pb.VolumeServer/ReadNeedleBlob"
VolumeServer_ReadNeedleMeta_FullMethodName = "/volume_server_pb.VolumeServer/ReadNeedleMeta"
VolumeServer_WriteNeedleBlob_FullMethodName = "/volume_server_pb.VolumeServer/WriteNeedleBlob"
@@ -53,6 +54,7 @@ const (
VolumeServer_VolumeEcShardRead_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardRead"
VolumeServer_VolumeEcBlobDelete_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcBlobDelete"
VolumeServer_VolumeEcShardsToVolume_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume"
+ VolumeServer_VolumeEcShardsInfo_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsInfo"
VolumeServer_VolumeTierMoveDatToRemote_FullMethodName = "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote"
VolumeServer_VolumeTierMoveDatFromRemote_FullMethodName = "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote"
VolumeServer_VolumeServerStatus_FullMethodName = "/volume_server_pb.VolumeServer/VolumeServerStatus"
@@ -88,6 +90,7 @@ type VolumeServerClient interface {
VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeCopyResponse], error)
ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error)
CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CopyFileResponse], error)
+ ReceiveFile(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse], error)
ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error)
ReadNeedleMeta(ctx context.Context, in *ReadNeedleMetaRequest, opts ...grpc.CallOption) (*ReadNeedleMetaResponse, error)
WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error)
@@ -104,6 +107,7 @@ type VolumeServerClient interface {
VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeEcShardReadResponse], error)
VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error)
VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error)
+ VolumeEcShardsInfo(ctx context.Context, in *VolumeEcShardsInfoRequest, opts ...grpc.CallOption) (*VolumeEcShardsInfoResponse, error)
// tiered storage
VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatToRemoteResponse], error)
VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatFromRemoteResponse], error)
@@ -351,6 +355,19 @@ func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest,
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type VolumeServer_CopyFileClient = grpc.ServerStreamingClient[CopyFileResponse]
+func (c *volumeServerClient) ReceiveFile(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse], error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[4], VolumeServer_ReceiveFile_FullMethodName, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &grpc.GenericClientStream[ReceiveFileRequest, ReceiveFileResponse]{ClientStream: stream}
+ return x, nil
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type VolumeServer_ReceiveFileClient = grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse]
+
func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ReadNeedleBlobResponse)
@@ -383,7 +400,7 @@ func (c *volumeServerClient) WriteNeedleBlob(ctx context.Context, in *WriteNeedl
func (c *volumeServerClient) ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ReadAllNeedlesResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[4], VolumeServer_ReadAllNeedles_FullMethodName, cOpts...)
+ stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[5], VolumeServer_ReadAllNeedles_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@@ -402,7 +419,7 @@ type VolumeServer_ReadAllNeedlesClient = grpc.ServerStreamingClient[ReadAllNeedl
func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTailSenderResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[5], VolumeServer_VolumeTailSender_FullMethodName, cOpts...)
+ stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[6], VolumeServer_VolumeTailSender_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@@ -491,7 +508,7 @@ func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *Volu
func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeEcShardReadResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[6], VolumeServer_VolumeEcShardRead_FullMethodName, cOpts...)
+ stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[7], VolumeServer_VolumeEcShardRead_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@@ -528,9 +545,19 @@ func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *Vol
return out, nil
}
+func (c *volumeServerClient) VolumeEcShardsInfo(ctx context.Context, in *VolumeEcShardsInfoRequest, opts ...grpc.CallOption) (*VolumeEcShardsInfoResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(VolumeEcShardsInfoResponse)
+ err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsInfo_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatToRemoteResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[7], VolumeServer_VolumeTierMoveDatToRemote_FullMethodName, cOpts...)
+ stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[8], VolumeServer_VolumeTierMoveDatToRemote_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@@ -549,7 +576,7 @@ type VolumeServer_VolumeTierMoveDatToRemoteClient = grpc.ServerStreamingClient[V
func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatFromRemoteResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[8], VolumeServer_VolumeTierMoveDatFromRemote_FullMethodName, cOpts...)
+ stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[9], VolumeServer_VolumeTierMoveDatFromRemote_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@@ -598,7 +625,7 @@ func (c *volumeServerClient) FetchAndWriteNeedle(ctx context.Context, in *FetchA
func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[QueriedStripe], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[9], VolumeServer_Query_FullMethodName, cOpts...)
+ stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[10], VolumeServer_Query_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@@ -660,6 +687,7 @@ type VolumeServerServer interface {
VolumeCopy(*VolumeCopyRequest, grpc.ServerStreamingServer[VolumeCopyResponse]) error
ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error)
CopyFile(*CopyFileRequest, grpc.ServerStreamingServer[CopyFileResponse]) error
+ ReceiveFile(grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse]) error
ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error)
ReadNeedleMeta(context.Context, *ReadNeedleMetaRequest) (*ReadNeedleMetaResponse, error)
WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error)
@@ -676,6 +704,7 @@ type VolumeServerServer interface {
VolumeEcShardRead(*VolumeEcShardReadRequest, grpc.ServerStreamingServer[VolumeEcShardReadResponse]) error
VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error)
VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error)
+ VolumeEcShardsInfo(context.Context, *VolumeEcShardsInfoRequest) (*VolumeEcShardsInfoResponse, error)
// tiered storage
VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatToRemoteResponse]) error
VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatFromRemoteResponse]) error
@@ -754,6 +783,9 @@ func (UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *Re
func (UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, grpc.ServerStreamingServer[CopyFileResponse]) error {
return status.Errorf(codes.Unimplemented, "method CopyFile not implemented")
}
+func (UnimplementedVolumeServerServer) ReceiveFile(grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse]) error {
+ return status.Errorf(codes.Unimplemented, "method ReceiveFile not implemented")
+}
func (UnimplementedVolumeServerServer) ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleBlob not implemented")
}
@@ -799,6 +831,9 @@ func (UnimplementedVolumeServerServer) VolumeEcBlobDelete(context.Context, *Volu
func (UnimplementedVolumeServerServer) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsToVolume not implemented")
}
+func (UnimplementedVolumeServerServer) VolumeEcShardsInfo(context.Context, *VolumeEcShardsInfoRequest) (*VolumeEcShardsInfoResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsInfo not implemented")
+}
func (UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatToRemoteResponse]) error {
return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatToRemote not implemented")
}
@@ -1158,6 +1193,13 @@ func _VolumeServer_CopyFile_Handler(srv interface{}, stream grpc.ServerStream) e
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type VolumeServer_CopyFileServer = grpc.ServerStreamingServer[CopyFileResponse]
+func _VolumeServer_ReceiveFile_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(VolumeServerServer).ReceiveFile(&grpc.GenericServerStream[ReceiveFileRequest, ReceiveFileResponse]{ServerStream: stream})
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type VolumeServer_ReceiveFileServer = grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse]
+
func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReadNeedleBlobRequest)
if err := dec(in); err != nil {
@@ -1407,6 +1449,24 @@ func _VolumeServer_VolumeEcShardsToVolume_Handler(srv interface{}, ctx context.C
return interceptor(ctx, in, info, handler)
}
+func _VolumeServer_VolumeEcShardsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeEcShardsInfoRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeEcShardsInfo(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: VolumeServer_VolumeEcShardsInfo_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeEcShardsInfo(ctx, req.(*VolumeEcShardsInfoRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VolumeServer_VolumeTierMoveDatToRemote_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(VolumeTierMoveDatToRemoteRequest)
if err := stream.RecvMsg(m); err != nil {
@@ -1646,6 +1706,10 @@ var VolumeServer_ServiceDesc = grpc.ServiceDesc{
Handler: _VolumeServer_VolumeEcShardsToVolume_Handler,
},
{
+ MethodName: "VolumeEcShardsInfo",
+ Handler: _VolumeServer_VolumeEcShardsInfo_Handler,
+ },
+ {
MethodName: "VolumeServerStatus",
Handler: _VolumeServer_VolumeServerStatus_Handler,
},
@@ -1688,6 +1752,11 @@ var VolumeServer_ServiceDesc = grpc.ServiceDesc{
ServerStreams: true,
},
{
+ StreamName: "ReceiveFile",
+ Handler: _VolumeServer_ReceiveFile_Handler,
+ ClientStreams: true,
+ },
+ {
StreamName: "ReadAllNeedles",
Handler: _VolumeServer_ReadAllNeedles_Handler,
ServerStreams: true,
diff --git a/weed/pb/worker.proto b/weed/pb/worker.proto
index d96fce7d0..63eeea4b0 100644
--- a/weed/pb/worker.proto
+++ b/weed/pb/worker.proto
@@ -22,6 +22,7 @@ message WorkerMessage {
TaskUpdate task_update = 6;
TaskComplete task_complete = 7;
WorkerShutdown shutdown = 8;
+ TaskLogResponse task_log_response = 9;
}
}
@@ -36,6 +37,7 @@ message AdminMessage {
TaskAssignment task_assignment = 5;
TaskCancellation task_cancellation = 6;
AdminShutdown admin_shutdown = 7;
+ TaskLogRequest task_log_request = 8;
}
}
@@ -90,7 +92,7 @@ message TaskAssignment {
map<string, string> metadata = 6;
}
-// TaskParams contains task-specific parameters
+// TaskParams contains task-specific parameters with typed variants
message TaskParams {
uint32 volume_id = 1;
string server = 2;
@@ -98,7 +100,75 @@ message TaskParams {
string data_center = 4;
string rack = 5;
repeated string replicas = 6;
- map<string, string> parameters = 7;
+
+ // Typed task parameters
+ oneof task_params {
+ VacuumTaskParams vacuum_params = 7;
+ ErasureCodingTaskParams erasure_coding_params = 8;
+ BalanceTaskParams balance_params = 9;
+ ReplicationTaskParams replication_params = 10;
+ }
+}
+
+// VacuumTaskParams for vacuum operations
+message VacuumTaskParams {
+ double garbage_threshold = 1; // Minimum garbage ratio to trigger vacuum
+ bool force_vacuum = 2; // Force vacuum even if below threshold
+ int32 batch_size = 3; // Number of files to process per batch
+ string working_dir = 4; // Working directory for temporary files
+ bool verify_checksum = 5; // Verify file checksums during vacuum
+}
+
+// ErasureCodingTaskParams for EC encoding operations
+message ErasureCodingTaskParams {
+ uint64 estimated_shard_size = 3; // Estimated size per shard
+ int32 data_shards = 4; // Number of data shards (default: 10)
+ int32 parity_shards = 5; // Number of parity shards (default: 4)
+ string working_dir = 6; // Working directory for EC processing
+ string master_client = 7; // Master server address
+ bool cleanup_source = 8; // Whether to cleanup source volume after EC
+ repeated string placement_conflicts = 9; // Any placement rule conflicts
+ repeated ECDestination destinations = 10; // Planned destinations with disk information
+ repeated ExistingECShardLocation existing_shard_locations = 11; // Existing EC shards to cleanup
+}
+
+// ECDestination represents a planned destination for EC shards with disk information
+message ECDestination {
+ string node = 1; // Target server address
+ uint32 disk_id = 2; // Target disk ID
+ string rack = 3; // Target rack for placement tracking
+ string data_center = 4; // Target data center for placement tracking
+ double placement_score = 5; // Quality score of the placement
+}
+
+// ExistingECShardLocation represents existing EC shards that need cleanup
+message ExistingECShardLocation {
+ string node = 1; // Server address with existing shards
+ repeated uint32 shard_ids = 2; // List of shard IDs on this server
+}
+
+// BalanceTaskParams for volume balancing operations
+message BalanceTaskParams {
+ string dest_node = 1; // Planned destination node
+ uint64 estimated_size = 2; // Estimated volume size
+ string dest_rack = 3; // Destination rack for placement rules
+ string dest_dc = 4; // Destination data center
+ double placement_score = 5; // Quality score of the planned placement
+ repeated string placement_conflicts = 6; // Any placement rule conflicts
+ bool force_move = 7; // Force move even with conflicts
+ int32 timeout_seconds = 8; // Operation timeout
+}
+
+// ReplicationTaskParams for adding replicas
+message ReplicationTaskParams {
+ string dest_node = 1; // Planned destination node for new replica
+ uint64 estimated_size = 2; // Estimated replica size
+ string dest_rack = 3; // Destination rack for placement rules
+ string dest_dc = 4; // Destination data center
+ double placement_score = 5; // Quality score of the planned placement
+ repeated string placement_conflicts = 6; // Any placement rule conflicts
+ int32 replica_count = 7; // Target replica count
+ bool verify_consistency = 8; // Verify replica consistency after creation
}
// TaskUpdate reports task progress
@@ -139,4 +209,122 @@ message WorkerShutdown {
message AdminShutdown {
string reason = 1;
int32 graceful_shutdown_seconds = 2;
+}
+
+// ========== Task Log Messages ==========
+
+// TaskLogRequest requests logs for a specific task
+message TaskLogRequest {
+ string task_id = 1;
+ string worker_id = 2;
+ bool include_metadata = 3; // Include task metadata
+ int32 max_entries = 4; // Maximum number of log entries (0 = all)
+ string log_level = 5; // Filter by log level (INFO, WARNING, ERROR, DEBUG)
+ int64 start_time = 6; // Unix timestamp for start time filter
+ int64 end_time = 7; // Unix timestamp for end time filter
+}
+
+// TaskLogResponse returns task logs and metadata
+message TaskLogResponse {
+ string task_id = 1;
+ string worker_id = 2;
+ bool success = 3;
+ string error_message = 4;
+ TaskLogMetadata metadata = 5;
+ repeated TaskLogEntry log_entries = 6;
+}
+
+// TaskLogMetadata contains metadata about task execution
+message TaskLogMetadata {
+ string task_id = 1;
+ string task_type = 2;
+ string worker_id = 3;
+ int64 start_time = 4;
+ int64 end_time = 5;
+ int64 duration_ms = 6;
+ string status = 7;
+ float progress = 8;
+ uint32 volume_id = 9;
+ string server = 10;
+ string collection = 11;
+ string log_file_path = 12;
+ int64 created_at = 13;
+ map<string, string> custom_data = 14;
+}
+
+// TaskLogEntry represents a single log entry
+message TaskLogEntry {
+ int64 timestamp = 1;
+ string level = 2;
+ string message = 3;
+ map<string, string> fields = 4;
+ float progress = 5;
+ string status = 6;
+}
+
+// ========== Maintenance Configuration Messages ==========
+
+// MaintenanceConfig holds configuration for the maintenance system
+message MaintenanceConfig {
+ bool enabled = 1;
+ int32 scan_interval_seconds = 2; // How often to scan for maintenance needs
+ int32 worker_timeout_seconds = 3; // Worker heartbeat timeout
+ int32 task_timeout_seconds = 4; // Individual task timeout
+ int32 retry_delay_seconds = 5; // Delay between retries
+ int32 max_retries = 6; // Default max retries for tasks
+ int32 cleanup_interval_seconds = 7; // How often to clean up old tasks
+ int32 task_retention_seconds = 8; // How long to keep completed/failed tasks
+ MaintenancePolicy policy = 9;
+}
+
+// MaintenancePolicy defines policies for maintenance operations
+message MaintenancePolicy {
+ map<string, TaskPolicy> task_policies = 1; // Task type -> policy mapping
+ int32 global_max_concurrent = 2; // Overall limit across all task types
+ int32 default_repeat_interval_seconds = 3; // Default seconds if task doesn't specify
+ int32 default_check_interval_seconds = 4; // Default seconds for periodic checks
+}
+
+// TaskPolicy represents configuration for a specific task type
+message TaskPolicy {
+ bool enabled = 1;
+ int32 max_concurrent = 2;
+ int32 repeat_interval_seconds = 3; // Seconds to wait before repeating
+ int32 check_interval_seconds = 4; // Seconds between checks
+
+ // Typed task-specific configuration (replaces generic map)
+ oneof task_config {
+ VacuumTaskConfig vacuum_config = 5;
+ ErasureCodingTaskConfig erasure_coding_config = 6;
+ BalanceTaskConfig balance_config = 7;
+ ReplicationTaskConfig replication_config = 8;
+ }
+}
+
+// Task-specific configuration messages
+
+// VacuumTaskConfig contains vacuum-specific configuration
+message VacuumTaskConfig {
+ double garbage_threshold = 1; // Minimum garbage ratio to trigger vacuum (0.0-1.0)
+ int32 min_volume_age_hours = 2; // Minimum age before vacuum is considered
+ int32 min_interval_seconds = 3; // Minimum time between vacuum operations on the same volume
+}
+
+// ErasureCodingTaskConfig contains EC-specific configuration
+message ErasureCodingTaskConfig {
+ double fullness_ratio = 1; // Minimum fullness ratio to trigger EC (0.0-1.0)
+ int32 quiet_for_seconds = 2; // Minimum quiet time before EC
+ int32 min_volume_size_mb = 3; // Minimum volume size for EC
+ string collection_filter = 4; // Only process volumes from specific collections
+}
+
+// BalanceTaskConfig contains balance-specific configuration
+message BalanceTaskConfig {
+ double imbalance_threshold = 1; // Threshold for triggering rebalancing (0.0-1.0)
+ int32 min_server_count = 2; // Minimum number of servers required for balancing
+}
+
+// ReplicationTaskConfig contains replication-specific configuration
+message ReplicationTaskConfig {
+ int32 target_replica_count = 1; // Target number of replicas
} \ No newline at end of file
diff --git a/weed/pb/worker_pb/worker.pb.go b/weed/pb/worker_pb/worker.pb.go
index 00188937b..210f6feac 100644
--- a/weed/pb/worker_pb/worker.pb.go
+++ b/weed/pb/worker_pb/worker.pb.go
@@ -34,6 +34,7 @@ type WorkerMessage struct {
// *WorkerMessage_TaskUpdate
// *WorkerMessage_TaskComplete
// *WorkerMessage_Shutdown
+ // *WorkerMessage_TaskLogResponse
Message isWorkerMessage_Message `protobuf_oneof:"message"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
@@ -144,6 +145,15 @@ func (x *WorkerMessage) GetShutdown() *WorkerShutdown {
return nil
}
+func (x *WorkerMessage) GetTaskLogResponse() *TaskLogResponse {
+ if x != nil {
+ if x, ok := x.Message.(*WorkerMessage_TaskLogResponse); ok {
+ return x.TaskLogResponse
+ }
+ }
+ return nil
+}
+
type isWorkerMessage_Message interface {
isWorkerMessage_Message()
}
@@ -172,6 +182,10 @@ type WorkerMessage_Shutdown struct {
Shutdown *WorkerShutdown `protobuf:"bytes,8,opt,name=shutdown,proto3,oneof"`
}
+type WorkerMessage_TaskLogResponse struct {
+ TaskLogResponse *TaskLogResponse `protobuf:"bytes,9,opt,name=task_log_response,json=taskLogResponse,proto3,oneof"`
+}
+
func (*WorkerMessage_Registration) isWorkerMessage_Message() {}
func (*WorkerMessage_Heartbeat) isWorkerMessage_Message() {}
@@ -184,6 +198,8 @@ func (*WorkerMessage_TaskComplete) isWorkerMessage_Message() {}
func (*WorkerMessage_Shutdown) isWorkerMessage_Message() {}
+func (*WorkerMessage_TaskLogResponse) isWorkerMessage_Message() {}
+
// AdminMessage represents messages from admin to worker
type AdminMessage struct {
state protoimpl.MessageState `protogen:"open.v1"`
@@ -196,6 +212,7 @@ type AdminMessage struct {
// *AdminMessage_TaskAssignment
// *AdminMessage_TaskCancellation
// *AdminMessage_AdminShutdown
+ // *AdminMessage_TaskLogRequest
Message isAdminMessage_Message `protobuf_oneof:"message"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
@@ -297,6 +314,15 @@ func (x *AdminMessage) GetAdminShutdown() *AdminShutdown {
return nil
}
+func (x *AdminMessage) GetTaskLogRequest() *TaskLogRequest {
+ if x != nil {
+ if x, ok := x.Message.(*AdminMessage_TaskLogRequest); ok {
+ return x.TaskLogRequest
+ }
+ }
+ return nil
+}
+
type isAdminMessage_Message interface {
isAdminMessage_Message()
}
@@ -321,6 +347,10 @@ type AdminMessage_AdminShutdown struct {
AdminShutdown *AdminShutdown `protobuf:"bytes,7,opt,name=admin_shutdown,json=adminShutdown,proto3,oneof"`
}
+type AdminMessage_TaskLogRequest struct {
+ TaskLogRequest *TaskLogRequest `protobuf:"bytes,8,opt,name=task_log_request,json=taskLogRequest,proto3,oneof"`
+}
+
func (*AdminMessage_RegistrationResponse) isAdminMessage_Message() {}
func (*AdminMessage_HeartbeatResponse) isAdminMessage_Message() {}
@@ -331,6 +361,8 @@ func (*AdminMessage_TaskCancellation) isAdminMessage_Message() {}
func (*AdminMessage_AdminShutdown) isAdminMessage_Message() {}
+func (*AdminMessage_TaskLogRequest) isAdminMessage_Message() {}
+
// WorkerRegistration message when worker connects
type WorkerRegistration struct {
state protoimpl.MessageState `protogen:"open.v1"`
@@ -769,16 +801,24 @@ func (x *TaskAssignment) GetMetadata() map[string]string {
return nil
}
-// TaskParams contains task-specific parameters
+// TaskParams contains task-specific parameters with typed variants
type TaskParams struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
- Server string `protobuf:"bytes,2,opt,name=server,proto3" json:"server,omitempty"`
- Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
- DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"`
- Rack string `protobuf:"bytes,5,opt,name=rack,proto3" json:"rack,omitempty"`
- Replicas []string `protobuf:"bytes,6,rep,name=replicas,proto3" json:"replicas,omitempty"`
- Parameters map[string]string `protobuf:"bytes,7,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Server string `protobuf:"bytes,2,opt,name=server,proto3" json:"server,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"`
+ Rack string `protobuf:"bytes,5,opt,name=rack,proto3" json:"rack,omitempty"`
+ Replicas []string `protobuf:"bytes,6,rep,name=replicas,proto3" json:"replicas,omitempty"`
+ // Typed task parameters
+ //
+ // Types that are valid to be assigned to TaskParams:
+ //
+ // *TaskParams_VacuumParams
+ // *TaskParams_ErasureCodingParams
+ // *TaskParams_BalanceParams
+ // *TaskParams_ReplicationParams
+ TaskParams isTaskParams_TaskParams `protobuf_oneof:"task_params"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -855,13 +895,595 @@ func (x *TaskParams) GetReplicas() []string {
return nil
}
-func (x *TaskParams) GetParameters() map[string]string {
+func (x *TaskParams) GetTaskParams() isTaskParams_TaskParams {
+ if x != nil {
+ return x.TaskParams
+ }
+ return nil
+}
+
+func (x *TaskParams) GetVacuumParams() *VacuumTaskParams {
+ if x != nil {
+ if x, ok := x.TaskParams.(*TaskParams_VacuumParams); ok {
+ return x.VacuumParams
+ }
+ }
+ return nil
+}
+
+func (x *TaskParams) GetErasureCodingParams() *ErasureCodingTaskParams {
+ if x != nil {
+ if x, ok := x.TaskParams.(*TaskParams_ErasureCodingParams); ok {
+ return x.ErasureCodingParams
+ }
+ }
+ return nil
+}
+
+func (x *TaskParams) GetBalanceParams() *BalanceTaskParams {
+ if x != nil {
+ if x, ok := x.TaskParams.(*TaskParams_BalanceParams); ok {
+ return x.BalanceParams
+ }
+ }
+ return nil
+}
+
+func (x *TaskParams) GetReplicationParams() *ReplicationTaskParams {
+ if x != nil {
+ if x, ok := x.TaskParams.(*TaskParams_ReplicationParams); ok {
+ return x.ReplicationParams
+ }
+ }
+ return nil
+}
+
+type isTaskParams_TaskParams interface {
+ isTaskParams_TaskParams()
+}
+
+type TaskParams_VacuumParams struct {
+ VacuumParams *VacuumTaskParams `protobuf:"bytes,7,opt,name=vacuum_params,json=vacuumParams,proto3,oneof"`
+}
+
+type TaskParams_ErasureCodingParams struct {
+ ErasureCodingParams *ErasureCodingTaskParams `protobuf:"bytes,8,opt,name=erasure_coding_params,json=erasureCodingParams,proto3,oneof"`
+}
+
+type TaskParams_BalanceParams struct {
+ BalanceParams *BalanceTaskParams `protobuf:"bytes,9,opt,name=balance_params,json=balanceParams,proto3,oneof"`
+}
+
+type TaskParams_ReplicationParams struct {
+ ReplicationParams *ReplicationTaskParams `protobuf:"bytes,10,opt,name=replication_params,json=replicationParams,proto3,oneof"`
+}
+
+func (*TaskParams_VacuumParams) isTaskParams_TaskParams() {}
+
+func (*TaskParams_ErasureCodingParams) isTaskParams_TaskParams() {}
+
+func (*TaskParams_BalanceParams) isTaskParams_TaskParams() {}
+
+func (*TaskParams_ReplicationParams) isTaskParams_TaskParams() {}
+
+// VacuumTaskParams for vacuum operations
+type VacuumTaskParams struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ GarbageThreshold float64 `protobuf:"fixed64,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` // Minimum garbage ratio to trigger vacuum
+ ForceVacuum bool `protobuf:"varint,2,opt,name=force_vacuum,json=forceVacuum,proto3" json:"force_vacuum,omitempty"` // Force vacuum even if below threshold
+ BatchSize int32 `protobuf:"varint,3,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` // Number of files to process per batch
+ WorkingDir string `protobuf:"bytes,4,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` // Working directory for temporary files
+ VerifyChecksum bool `protobuf:"varint,5,opt,name=verify_checksum,json=verifyChecksum,proto3" json:"verify_checksum,omitempty"` // Verify file checksums during vacuum
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *VacuumTaskParams) Reset() {
+ *x = VacuumTaskParams{}
+ mi := &file_worker_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *VacuumTaskParams) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VacuumTaskParams) ProtoMessage() {}
+
+func (x *VacuumTaskParams) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VacuumTaskParams.ProtoReflect.Descriptor instead.
+func (*VacuumTaskParams) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *VacuumTaskParams) GetGarbageThreshold() float64 {
+ if x != nil {
+ return x.GarbageThreshold
+ }
+ return 0
+}
+
+func (x *VacuumTaskParams) GetForceVacuum() bool {
+ if x != nil {
+ return x.ForceVacuum
+ }
+ return false
+}
+
+func (x *VacuumTaskParams) GetBatchSize() int32 {
+ if x != nil {
+ return x.BatchSize
+ }
+ return 0
+}
+
+func (x *VacuumTaskParams) GetWorkingDir() string {
+ if x != nil {
+ return x.WorkingDir
+ }
+ return ""
+}
+
+func (x *VacuumTaskParams) GetVerifyChecksum() bool {
+ if x != nil {
+ return x.VerifyChecksum
+ }
+ return false
+}
+
+// ErasureCodingTaskParams for EC encoding operations
+type ErasureCodingTaskParams struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ EstimatedShardSize uint64 `protobuf:"varint,3,opt,name=estimated_shard_size,json=estimatedShardSize,proto3" json:"estimated_shard_size,omitempty"` // Estimated size per shard
+ DataShards int32 `protobuf:"varint,4,opt,name=data_shards,json=dataShards,proto3" json:"data_shards,omitempty"` // Number of data shards (default: 10)
+ ParityShards int32 `protobuf:"varint,5,opt,name=parity_shards,json=parityShards,proto3" json:"parity_shards,omitempty"` // Number of parity shards (default: 4)
+ WorkingDir string `protobuf:"bytes,6,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` // Working directory for EC processing
+ MasterClient string `protobuf:"bytes,7,opt,name=master_client,json=masterClient,proto3" json:"master_client,omitempty"` // Master server address
+ CleanupSource bool `protobuf:"varint,8,opt,name=cleanup_source,json=cleanupSource,proto3" json:"cleanup_source,omitempty"` // Whether to cleanup source volume after EC
+ PlacementConflicts []string `protobuf:"bytes,9,rep,name=placement_conflicts,json=placementConflicts,proto3" json:"placement_conflicts,omitempty"` // Any placement rule conflicts
+ Destinations []*ECDestination `protobuf:"bytes,10,rep,name=destinations,proto3" json:"destinations,omitempty"` // Planned destinations with disk information
+ ExistingShardLocations []*ExistingECShardLocation `protobuf:"bytes,11,rep,name=existing_shard_locations,json=existingShardLocations,proto3" json:"existing_shard_locations,omitempty"` // Existing EC shards to cleanup
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ErasureCodingTaskParams) Reset() {
+ *x = ErasureCodingTaskParams{}
+ mi := &file_worker_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ErasureCodingTaskParams) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErasureCodingTaskParams) ProtoMessage() {}
+
+func (x *ErasureCodingTaskParams) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErasureCodingTaskParams.ProtoReflect.Descriptor instead.
+func (*ErasureCodingTaskParams) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *ErasureCodingTaskParams) GetEstimatedShardSize() uint64 {
+ if x != nil {
+ return x.EstimatedShardSize
+ }
+ return 0
+}
+
+func (x *ErasureCodingTaskParams) GetDataShards() int32 {
+ if x != nil {
+ return x.DataShards
+ }
+ return 0
+}
+
+func (x *ErasureCodingTaskParams) GetParityShards() int32 {
+ if x != nil {
+ return x.ParityShards
+ }
+ return 0
+}
+
+func (x *ErasureCodingTaskParams) GetWorkingDir() string {
+ if x != nil {
+ return x.WorkingDir
+ }
+ return ""
+}
+
+func (x *ErasureCodingTaskParams) GetMasterClient() string {
+ if x != nil {
+ return x.MasterClient
+ }
+ return ""
+}
+
+func (x *ErasureCodingTaskParams) GetCleanupSource() bool {
+ if x != nil {
+ return x.CleanupSource
+ }
+ return false
+}
+
+func (x *ErasureCodingTaskParams) GetPlacementConflicts() []string {
+ if x != nil {
+ return x.PlacementConflicts
+ }
+ return nil
+}
+
+func (x *ErasureCodingTaskParams) GetDestinations() []*ECDestination {
+ if x != nil {
+ return x.Destinations
+ }
+ return nil
+}
+
+func (x *ErasureCodingTaskParams) GetExistingShardLocations() []*ExistingECShardLocation {
if x != nil {
- return x.Parameters
+ return x.ExistingShardLocations
}
return nil
}
+// ECDestination represents a planned destination for EC shards with disk information
+type ECDestination struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Target server address
+ DiskId uint32 `protobuf:"varint,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` // Target disk ID
+ Rack string `protobuf:"bytes,3,opt,name=rack,proto3" json:"rack,omitempty"` // Target rack for placement tracking
+ DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // Target data center for placement tracking
+ PlacementScore float64 `protobuf:"fixed64,5,opt,name=placement_score,json=placementScore,proto3" json:"placement_score,omitempty"` // Quality score of the placement
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ECDestination) Reset() {
+ *x = ECDestination{}
+ mi := &file_worker_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ECDestination) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ECDestination) ProtoMessage() {}
+
+func (x *ECDestination) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ECDestination.ProtoReflect.Descriptor instead.
+func (*ECDestination) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *ECDestination) GetNode() string {
+ if x != nil {
+ return x.Node
+ }
+ return ""
+}
+
+func (x *ECDestination) GetDiskId() uint32 {
+ if x != nil {
+ return x.DiskId
+ }
+ return 0
+}
+
+func (x *ECDestination) GetRack() string {
+ if x != nil {
+ return x.Rack
+ }
+ return ""
+}
+
+func (x *ECDestination) GetDataCenter() string {
+ if x != nil {
+ return x.DataCenter
+ }
+ return ""
+}
+
+func (x *ECDestination) GetPlacementScore() float64 {
+ if x != nil {
+ return x.PlacementScore
+ }
+ return 0
+}
+
+// ExistingECShardLocation represents existing EC shards that need cleanup
+type ExistingECShardLocation struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Server address with existing shards
+ ShardIds []uint32 `protobuf:"varint,2,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` // List of shard IDs on this server
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExistingECShardLocation) Reset() {
+ *x = ExistingECShardLocation{}
+ mi := &file_worker_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExistingECShardLocation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExistingECShardLocation) ProtoMessage() {}
+
+func (x *ExistingECShardLocation) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExistingECShardLocation.ProtoReflect.Descriptor instead.
+func (*ExistingECShardLocation) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *ExistingECShardLocation) GetNode() string {
+ if x != nil {
+ return x.Node
+ }
+ return ""
+}
+
+func (x *ExistingECShardLocation) GetShardIds() []uint32 {
+ if x != nil {
+ return x.ShardIds
+ }
+ return nil
+}
+
+// BalanceTaskParams for volume balancing operations
+type BalanceTaskParams struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ DestNode string `protobuf:"bytes,1,opt,name=dest_node,json=destNode,proto3" json:"dest_node,omitempty"` // Planned destination node
+ EstimatedSize uint64 `protobuf:"varint,2,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` // Estimated volume size
+ DestRack string `protobuf:"bytes,3,opt,name=dest_rack,json=destRack,proto3" json:"dest_rack,omitempty"` // Destination rack for placement rules
+ DestDc string `protobuf:"bytes,4,opt,name=dest_dc,json=destDc,proto3" json:"dest_dc,omitempty"` // Destination data center
+ PlacementScore float64 `protobuf:"fixed64,5,opt,name=placement_score,json=placementScore,proto3" json:"placement_score,omitempty"` // Quality score of the planned placement
+ PlacementConflicts []string `protobuf:"bytes,6,rep,name=placement_conflicts,json=placementConflicts,proto3" json:"placement_conflicts,omitempty"` // Any placement rule conflicts
+ ForceMove bool `protobuf:"varint,7,opt,name=force_move,json=forceMove,proto3" json:"force_move,omitempty"` // Force move even with conflicts
+ TimeoutSeconds int32 `protobuf:"varint,8,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` // Operation timeout
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *BalanceTaskParams) Reset() {
+ *x = BalanceTaskParams{}
+ mi := &file_worker_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BalanceTaskParams) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BalanceTaskParams) ProtoMessage() {}
+
+func (x *BalanceTaskParams) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BalanceTaskParams.ProtoReflect.Descriptor instead.
+func (*BalanceTaskParams) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *BalanceTaskParams) GetDestNode() string {
+ if x != nil {
+ return x.DestNode
+ }
+ return ""
+}
+
+func (x *BalanceTaskParams) GetEstimatedSize() uint64 {
+ if x != nil {
+ return x.EstimatedSize
+ }
+ return 0
+}
+
+func (x *BalanceTaskParams) GetDestRack() string {
+ if x != nil {
+ return x.DestRack
+ }
+ return ""
+}
+
+func (x *BalanceTaskParams) GetDestDc() string {
+ if x != nil {
+ return x.DestDc
+ }
+ return ""
+}
+
+func (x *BalanceTaskParams) GetPlacementScore() float64 {
+ if x != nil {
+ return x.PlacementScore
+ }
+ return 0
+}
+
+func (x *BalanceTaskParams) GetPlacementConflicts() []string {
+ if x != nil {
+ return x.PlacementConflicts
+ }
+ return nil
+}
+
+func (x *BalanceTaskParams) GetForceMove() bool {
+ if x != nil {
+ return x.ForceMove
+ }
+ return false
+}
+
+func (x *BalanceTaskParams) GetTimeoutSeconds() int32 {
+ if x != nil {
+ return x.TimeoutSeconds
+ }
+ return 0
+}
+
+// ReplicationTaskParams for adding replicas
+type ReplicationTaskParams struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ DestNode string `protobuf:"bytes,1,opt,name=dest_node,json=destNode,proto3" json:"dest_node,omitempty"` // Planned destination node for new replica
+ EstimatedSize uint64 `protobuf:"varint,2,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` // Estimated replica size
+ DestRack string `protobuf:"bytes,3,opt,name=dest_rack,json=destRack,proto3" json:"dest_rack,omitempty"` // Destination rack for placement rules
+ DestDc string `protobuf:"bytes,4,opt,name=dest_dc,json=destDc,proto3" json:"dest_dc,omitempty"` // Destination data center
+ PlacementScore float64 `protobuf:"fixed64,5,opt,name=placement_score,json=placementScore,proto3" json:"placement_score,omitempty"` // Quality score of the planned placement
+ PlacementConflicts []string `protobuf:"bytes,6,rep,name=placement_conflicts,json=placementConflicts,proto3" json:"placement_conflicts,omitempty"` // Any placement rule conflicts
+ ReplicaCount int32 `protobuf:"varint,7,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` // Target replica count
+ VerifyConsistency bool `protobuf:"varint,8,opt,name=verify_consistency,json=verifyConsistency,proto3" json:"verify_consistency,omitempty"` // Verify replica consistency after creation
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ReplicationTaskParams) Reset() {
+ *x = ReplicationTaskParams{}
+ mi := &file_worker_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ReplicationTaskParams) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReplicationTaskParams) ProtoMessage() {}
+
+func (x *ReplicationTaskParams) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReplicationTaskParams.ProtoReflect.Descriptor instead.
+func (*ReplicationTaskParams) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *ReplicationTaskParams) GetDestNode() string {
+ if x != nil {
+ return x.DestNode
+ }
+ return ""
+}
+
+func (x *ReplicationTaskParams) GetEstimatedSize() uint64 {
+ if x != nil {
+ return x.EstimatedSize
+ }
+ return 0
+}
+
+func (x *ReplicationTaskParams) GetDestRack() string {
+ if x != nil {
+ return x.DestRack
+ }
+ return ""
+}
+
+func (x *ReplicationTaskParams) GetDestDc() string {
+ if x != nil {
+ return x.DestDc
+ }
+ return ""
+}
+
+func (x *ReplicationTaskParams) GetPlacementScore() float64 {
+ if x != nil {
+ return x.PlacementScore
+ }
+ return 0
+}
+
+func (x *ReplicationTaskParams) GetPlacementConflicts() []string {
+ if x != nil {
+ return x.PlacementConflicts
+ }
+ return nil
+}
+
+func (x *ReplicationTaskParams) GetReplicaCount() int32 {
+ if x != nil {
+ return x.ReplicaCount
+ }
+ return 0
+}
+
+func (x *ReplicationTaskParams) GetVerifyConsistency() bool {
+ if x != nil {
+ return x.VerifyConsistency
+ }
+ return false
+}
+
// TaskUpdate reports task progress
type TaskUpdate struct {
state protoimpl.MessageState `protogen:"open.v1"`
@@ -877,7 +1499,7 @@ type TaskUpdate struct {
func (x *TaskUpdate) Reset() {
*x = TaskUpdate{}
- mi := &file_worker_proto_msgTypes[9]
+ mi := &file_worker_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -889,7 +1511,7 @@ func (x *TaskUpdate) String() string {
func (*TaskUpdate) ProtoMessage() {}
func (x *TaskUpdate) ProtoReflect() protoreflect.Message {
- mi := &file_worker_proto_msgTypes[9]
+ mi := &file_worker_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -902,7 +1524,7 @@ func (x *TaskUpdate) ProtoReflect() protoreflect.Message {
// Deprecated: Use TaskUpdate.ProtoReflect.Descriptor instead.
func (*TaskUpdate) Descriptor() ([]byte, []int) {
- return file_worker_proto_rawDescGZIP(), []int{9}
+ return file_worker_proto_rawDescGZIP(), []int{15}
}
func (x *TaskUpdate) GetTaskId() string {
@@ -962,7 +1584,7 @@ type TaskComplete struct {
func (x *TaskComplete) Reset() {
*x = TaskComplete{}
- mi := &file_worker_proto_msgTypes[10]
+ mi := &file_worker_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -974,7 +1596,7 @@ func (x *TaskComplete) String() string {
func (*TaskComplete) ProtoMessage() {}
func (x *TaskComplete) ProtoReflect() protoreflect.Message {
- mi := &file_worker_proto_msgTypes[10]
+ mi := &file_worker_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -987,7 +1609,7 @@ func (x *TaskComplete) ProtoReflect() protoreflect.Message {
// Deprecated: Use TaskComplete.ProtoReflect.Descriptor instead.
func (*TaskComplete) Descriptor() ([]byte, []int) {
- return file_worker_proto_rawDescGZIP(), []int{10}
+ return file_worker_proto_rawDescGZIP(), []int{16}
}
func (x *TaskComplete) GetTaskId() string {
@@ -1044,7 +1666,7 @@ type TaskCancellation struct {
func (x *TaskCancellation) Reset() {
*x = TaskCancellation{}
- mi := &file_worker_proto_msgTypes[11]
+ mi := &file_worker_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1056,7 +1678,7 @@ func (x *TaskCancellation) String() string {
func (*TaskCancellation) ProtoMessage() {}
func (x *TaskCancellation) ProtoReflect() protoreflect.Message {
- mi := &file_worker_proto_msgTypes[11]
+ mi := &file_worker_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1069,7 +1691,7 @@ func (x *TaskCancellation) ProtoReflect() protoreflect.Message {
// Deprecated: Use TaskCancellation.ProtoReflect.Descriptor instead.
func (*TaskCancellation) Descriptor() ([]byte, []int) {
- return file_worker_proto_rawDescGZIP(), []int{11}
+ return file_worker_proto_rawDescGZIP(), []int{17}
}
func (x *TaskCancellation) GetTaskId() string {
@@ -1105,7 +1727,7 @@ type WorkerShutdown struct {
func (x *WorkerShutdown) Reset() {
*x = WorkerShutdown{}
- mi := &file_worker_proto_msgTypes[12]
+ mi := &file_worker_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1117,7 +1739,7 @@ func (x *WorkerShutdown) String() string {
func (*WorkerShutdown) ProtoMessage() {}
func (x *WorkerShutdown) ProtoReflect() protoreflect.Message {
- mi := &file_worker_proto_msgTypes[12]
+ mi := &file_worker_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1130,7 +1752,7 @@ func (x *WorkerShutdown) ProtoReflect() protoreflect.Message {
// Deprecated: Use WorkerShutdown.ProtoReflect.Descriptor instead.
func (*WorkerShutdown) Descriptor() ([]byte, []int) {
- return file_worker_proto_rawDescGZIP(), []int{12}
+ return file_worker_proto_rawDescGZIP(), []int{18}
}
func (x *WorkerShutdown) GetWorkerId() string {
@@ -1165,7 +1787,7 @@ type AdminShutdown struct {
func (x *AdminShutdown) Reset() {
*x = AdminShutdown{}
- mi := &file_worker_proto_msgTypes[13]
+ mi := &file_worker_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1177,7 +1799,7 @@ func (x *AdminShutdown) String() string {
func (*AdminShutdown) ProtoMessage() {}
func (x *AdminShutdown) ProtoReflect() protoreflect.Message {
- mi := &file_worker_proto_msgTypes[13]
+ mi := &file_worker_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1190,7 +1812,7 @@ func (x *AdminShutdown) ProtoReflect() protoreflect.Message {
// Deprecated: Use AdminShutdown.ProtoReflect.Descriptor instead.
func (*AdminShutdown) Descriptor() ([]byte, []int) {
- return file_worker_proto_rawDescGZIP(), []int{13}
+ return file_worker_proto_rawDescGZIP(), []int{19}
}
func (x *AdminShutdown) GetReason() string {
@@ -1207,11 +1829,978 @@ func (x *AdminShutdown) GetGracefulShutdownSeconds() int32 {
return 0
}
+// TaskLogRequest requests logs for a specific task
+type TaskLogRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
+ WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
+ IncludeMetadata bool `protobuf:"varint,3,opt,name=include_metadata,json=includeMetadata,proto3" json:"include_metadata,omitempty"` // Include task metadata
+ MaxEntries int32 `protobuf:"varint,4,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` // Maximum number of log entries (0 = all)
+ LogLevel string `protobuf:"bytes,5,opt,name=log_level,json=logLevel,proto3" json:"log_level,omitempty"` // Filter by log level (INFO, WARNING, ERROR, DEBUG)
+ StartTime int64 `protobuf:"varint,6,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // Unix timestamp for start time filter
+ EndTime int64 `protobuf:"varint,7,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` // Unix timestamp for end time filter
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *TaskLogRequest) Reset() {
+ *x = TaskLogRequest{}
+ mi := &file_worker_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TaskLogRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskLogRequest) ProtoMessage() {}
+
+func (x *TaskLogRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[20]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskLogRequest.ProtoReflect.Descriptor instead.
+func (*TaskLogRequest) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *TaskLogRequest) GetTaskId() string {
+ if x != nil {
+ return x.TaskId
+ }
+ return ""
+}
+
+func (x *TaskLogRequest) GetWorkerId() string {
+ if x != nil {
+ return x.WorkerId
+ }
+ return ""
+}
+
+func (x *TaskLogRequest) GetIncludeMetadata() bool {
+ if x != nil {
+ return x.IncludeMetadata
+ }
+ return false
+}
+
+func (x *TaskLogRequest) GetMaxEntries() int32 {
+ if x != nil {
+ return x.MaxEntries
+ }
+ return 0
+}
+
+func (x *TaskLogRequest) GetLogLevel() string {
+ if x != nil {
+ return x.LogLevel
+ }
+ return ""
+}
+
+func (x *TaskLogRequest) GetStartTime() int64 {
+ if x != nil {
+ return x.StartTime
+ }
+ return 0
+}
+
+func (x *TaskLogRequest) GetEndTime() int64 {
+ if x != nil {
+ return x.EndTime
+ }
+ return 0
+}
+
+// TaskLogResponse returns task logs and metadata
+type TaskLogResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
+ WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
+ Success bool `protobuf:"varint,3,opt,name=success,proto3" json:"success,omitempty"`
+ ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+ Metadata *TaskLogMetadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ LogEntries []*TaskLogEntry `protobuf:"bytes,6,rep,name=log_entries,json=logEntries,proto3" json:"log_entries,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *TaskLogResponse) Reset() {
+ *x = TaskLogResponse{}
+ mi := &file_worker_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TaskLogResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskLogResponse) ProtoMessage() {}
+
+func (x *TaskLogResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[21]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskLogResponse.ProtoReflect.Descriptor instead.
+func (*TaskLogResponse) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *TaskLogResponse) GetTaskId() string {
+ if x != nil {
+ return x.TaskId
+ }
+ return ""
+}
+
+func (x *TaskLogResponse) GetWorkerId() string {
+ if x != nil {
+ return x.WorkerId
+ }
+ return ""
+}
+
+func (x *TaskLogResponse) GetSuccess() bool {
+ if x != nil {
+ return x.Success
+ }
+ return false
+}
+
+func (x *TaskLogResponse) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+
+func (x *TaskLogResponse) GetMetadata() *TaskLogMetadata {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
+func (x *TaskLogResponse) GetLogEntries() []*TaskLogEntry {
+ if x != nil {
+ return x.LogEntries
+ }
+ return nil
+}
+
+// TaskLogMetadata contains metadata about task execution
+type TaskLogMetadata struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
+ TaskType string `protobuf:"bytes,2,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
+ WorkerId string `protobuf:"bytes,3,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
+ StartTime int64 `protobuf:"varint,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ EndTime int64 `protobuf:"varint,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"`
+ Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
+ Progress float32 `protobuf:"fixed32,8,opt,name=progress,proto3" json:"progress,omitempty"`
+ VolumeId uint32 `protobuf:"varint,9,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Server string `protobuf:"bytes,10,opt,name=server,proto3" json:"server,omitempty"`
+ Collection string `protobuf:"bytes,11,opt,name=collection,proto3" json:"collection,omitempty"`
+ LogFilePath string `protobuf:"bytes,12,opt,name=log_file_path,json=logFilePath,proto3" json:"log_file_path,omitempty"`
+ CreatedAt int64 `protobuf:"varint,13,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
+ CustomData map[string]string `protobuf:"bytes,14,rep,name=custom_data,json=customData,proto3" json:"custom_data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *TaskLogMetadata) Reset() {
+ *x = TaskLogMetadata{}
+ mi := &file_worker_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TaskLogMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskLogMetadata) ProtoMessage() {}
+
+func (x *TaskLogMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[22]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskLogMetadata.ProtoReflect.Descriptor instead.
+func (*TaskLogMetadata) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *TaskLogMetadata) GetTaskId() string {
+ if x != nil {
+ return x.TaskId
+ }
+ return ""
+}
+
+func (x *TaskLogMetadata) GetTaskType() string {
+ if x != nil {
+ return x.TaskType
+ }
+ return ""
+}
+
+func (x *TaskLogMetadata) GetWorkerId() string {
+ if x != nil {
+ return x.WorkerId
+ }
+ return ""
+}
+
+func (x *TaskLogMetadata) GetStartTime() int64 {
+ if x != nil {
+ return x.StartTime
+ }
+ return 0
+}
+
+func (x *TaskLogMetadata) GetEndTime() int64 {
+ if x != nil {
+ return x.EndTime
+ }
+ return 0
+}
+
+func (x *TaskLogMetadata) GetDurationMs() int64 {
+ if x != nil {
+ return x.DurationMs
+ }
+ return 0
+}
+
+func (x *TaskLogMetadata) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *TaskLogMetadata) GetProgress() float32 {
+ if x != nil {
+ return x.Progress
+ }
+ return 0
+}
+
+func (x *TaskLogMetadata) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *TaskLogMetadata) GetServer() string {
+ if x != nil {
+ return x.Server
+ }
+ return ""
+}
+
+func (x *TaskLogMetadata) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *TaskLogMetadata) GetLogFilePath() string {
+ if x != nil {
+ return x.LogFilePath
+ }
+ return ""
+}
+
+func (x *TaskLogMetadata) GetCreatedAt() int64 {
+ if x != nil {
+ return x.CreatedAt
+ }
+ return 0
+}
+
+func (x *TaskLogMetadata) GetCustomData() map[string]string {
+ if x != nil {
+ return x.CustomData
+ }
+ return nil
+}
+
+// TaskLogEntry represents a single log entry
+type TaskLogEntry struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Level string `protobuf:"bytes,2,opt,name=level,proto3" json:"level,omitempty"`
+ Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
+ Fields map[string]string `protobuf:"bytes,4,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Progress float32 `protobuf:"fixed32,5,opt,name=progress,proto3" json:"progress,omitempty"`
+ Status string `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *TaskLogEntry) Reset() {
+ *x = TaskLogEntry{}
+ mi := &file_worker_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TaskLogEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskLogEntry) ProtoMessage() {}
+
+func (x *TaskLogEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[23]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskLogEntry.ProtoReflect.Descriptor instead.
+func (*TaskLogEntry) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *TaskLogEntry) GetTimestamp() int64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+
+func (x *TaskLogEntry) GetLevel() string {
+ if x != nil {
+ return x.Level
+ }
+ return ""
+}
+
+func (x *TaskLogEntry) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+func (x *TaskLogEntry) GetFields() map[string]string {
+ if x != nil {
+ return x.Fields
+ }
+ return nil
+}
+
+func (x *TaskLogEntry) GetProgress() float32 {
+ if x != nil {
+ return x.Progress
+ }
+ return 0
+}
+
+func (x *TaskLogEntry) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+// MaintenanceConfig holds configuration for the maintenance system
+type MaintenanceConfig struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ ScanIntervalSeconds int32 `protobuf:"varint,2,opt,name=scan_interval_seconds,json=scanIntervalSeconds,proto3" json:"scan_interval_seconds,omitempty"` // How often to scan for maintenance needs
+ WorkerTimeoutSeconds int32 `protobuf:"varint,3,opt,name=worker_timeout_seconds,json=workerTimeoutSeconds,proto3" json:"worker_timeout_seconds,omitempty"` // Worker heartbeat timeout
+ TaskTimeoutSeconds int32 `protobuf:"varint,4,opt,name=task_timeout_seconds,json=taskTimeoutSeconds,proto3" json:"task_timeout_seconds,omitempty"` // Individual task timeout
+ RetryDelaySeconds int32 `protobuf:"varint,5,opt,name=retry_delay_seconds,json=retryDelaySeconds,proto3" json:"retry_delay_seconds,omitempty"` // Delay between retries
+ MaxRetries int32 `protobuf:"varint,6,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` // Default max retries for tasks
+ CleanupIntervalSeconds int32 `protobuf:"varint,7,opt,name=cleanup_interval_seconds,json=cleanupIntervalSeconds,proto3" json:"cleanup_interval_seconds,omitempty"` // How often to clean up old tasks
+ TaskRetentionSeconds int32 `protobuf:"varint,8,opt,name=task_retention_seconds,json=taskRetentionSeconds,proto3" json:"task_retention_seconds,omitempty"` // How long to keep completed/failed tasks
+ Policy *MaintenancePolicy `protobuf:"bytes,9,opt,name=policy,proto3" json:"policy,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *MaintenanceConfig) Reset() {
+ *x = MaintenanceConfig{}
+ mi := &file_worker_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MaintenanceConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MaintenanceConfig) ProtoMessage() {}
+
+func (x *MaintenanceConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[24]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MaintenanceConfig.ProtoReflect.Descriptor instead.
+func (*MaintenanceConfig) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *MaintenanceConfig) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
+ }
+ return false
+}
+
+func (x *MaintenanceConfig) GetScanIntervalSeconds() int32 {
+ if x != nil {
+ return x.ScanIntervalSeconds
+ }
+ return 0
+}
+
+func (x *MaintenanceConfig) GetWorkerTimeoutSeconds() int32 {
+ if x != nil {
+ return x.WorkerTimeoutSeconds
+ }
+ return 0
+}
+
+func (x *MaintenanceConfig) GetTaskTimeoutSeconds() int32 {
+ if x != nil {
+ return x.TaskTimeoutSeconds
+ }
+ return 0
+}
+
+func (x *MaintenanceConfig) GetRetryDelaySeconds() int32 {
+ if x != nil {
+ return x.RetryDelaySeconds
+ }
+ return 0
+}
+
+func (x *MaintenanceConfig) GetMaxRetries() int32 {
+ if x != nil {
+ return x.MaxRetries
+ }
+ return 0
+}
+
+func (x *MaintenanceConfig) GetCleanupIntervalSeconds() int32 {
+ if x != nil {
+ return x.CleanupIntervalSeconds
+ }
+ return 0
+}
+
+func (x *MaintenanceConfig) GetTaskRetentionSeconds() int32 {
+ if x != nil {
+ return x.TaskRetentionSeconds
+ }
+ return 0
+}
+
+func (x *MaintenanceConfig) GetPolicy() *MaintenancePolicy {
+ if x != nil {
+ return x.Policy
+ }
+ return nil
+}
+
+// MaintenancePolicy defines policies for maintenance operations
+type MaintenancePolicy struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ TaskPolicies map[string]*TaskPolicy `protobuf:"bytes,1,rep,name=task_policies,json=taskPolicies,proto3" json:"task_policies,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Task type -> policy mapping
+ GlobalMaxConcurrent int32 `protobuf:"varint,2,opt,name=global_max_concurrent,json=globalMaxConcurrent,proto3" json:"global_max_concurrent,omitempty"` // Overall limit across all task types
+ DefaultRepeatIntervalSeconds int32 `protobuf:"varint,3,opt,name=default_repeat_interval_seconds,json=defaultRepeatIntervalSeconds,proto3" json:"default_repeat_interval_seconds,omitempty"` // Default seconds if task doesn't specify
+ DefaultCheckIntervalSeconds int32 `protobuf:"varint,4,opt,name=default_check_interval_seconds,json=defaultCheckIntervalSeconds,proto3" json:"default_check_interval_seconds,omitempty"` // Default seconds for periodic checks
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *MaintenancePolicy) Reset() {
+ *x = MaintenancePolicy{}
+ mi := &file_worker_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MaintenancePolicy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MaintenancePolicy) ProtoMessage() {}
+
+func (x *MaintenancePolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[25]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MaintenancePolicy.ProtoReflect.Descriptor instead.
+func (*MaintenancePolicy) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *MaintenancePolicy) GetTaskPolicies() map[string]*TaskPolicy {
+ if x != nil {
+ return x.TaskPolicies
+ }
+ return nil
+}
+
+func (x *MaintenancePolicy) GetGlobalMaxConcurrent() int32 {
+ if x != nil {
+ return x.GlobalMaxConcurrent
+ }
+ return 0
+}
+
+func (x *MaintenancePolicy) GetDefaultRepeatIntervalSeconds() int32 {
+ if x != nil {
+ return x.DefaultRepeatIntervalSeconds
+ }
+ return 0
+}
+
+func (x *MaintenancePolicy) GetDefaultCheckIntervalSeconds() int32 {
+ if x != nil {
+ return x.DefaultCheckIntervalSeconds
+ }
+ return 0
+}
+
+// TaskPolicy represents configuration for a specific task type
+type TaskPolicy struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ MaxConcurrent int32 `protobuf:"varint,2,opt,name=max_concurrent,json=maxConcurrent,proto3" json:"max_concurrent,omitempty"`
+ RepeatIntervalSeconds int32 `protobuf:"varint,3,opt,name=repeat_interval_seconds,json=repeatIntervalSeconds,proto3" json:"repeat_interval_seconds,omitempty"` // Seconds to wait before repeating
+ CheckIntervalSeconds int32 `protobuf:"varint,4,opt,name=check_interval_seconds,json=checkIntervalSeconds,proto3" json:"check_interval_seconds,omitempty"` // Seconds between checks
+ // Typed task-specific configuration (replaces generic map)
+ //
+ // Types that are valid to be assigned to TaskConfig:
+ //
+ // *TaskPolicy_VacuumConfig
+ // *TaskPolicy_ErasureCodingConfig
+ // *TaskPolicy_BalanceConfig
+ // *TaskPolicy_ReplicationConfig
+ TaskConfig isTaskPolicy_TaskConfig `protobuf_oneof:"task_config"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *TaskPolicy) Reset() {
+ *x = TaskPolicy{}
+ mi := &file_worker_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TaskPolicy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskPolicy) ProtoMessage() {}
+
+func (x *TaskPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[26]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskPolicy.ProtoReflect.Descriptor instead.
+func (*TaskPolicy) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *TaskPolicy) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
+ }
+ return false
+}
+
+func (x *TaskPolicy) GetMaxConcurrent() int32 {
+ if x != nil {
+ return x.MaxConcurrent
+ }
+ return 0
+}
+
+func (x *TaskPolicy) GetRepeatIntervalSeconds() int32 {
+ if x != nil {
+ return x.RepeatIntervalSeconds
+ }
+ return 0
+}
+
+func (x *TaskPolicy) GetCheckIntervalSeconds() int32 {
+ if x != nil {
+ return x.CheckIntervalSeconds
+ }
+ return 0
+}
+
+func (x *TaskPolicy) GetTaskConfig() isTaskPolicy_TaskConfig {
+ if x != nil {
+ return x.TaskConfig
+ }
+ return nil
+}
+
+func (x *TaskPolicy) GetVacuumConfig() *VacuumTaskConfig {
+ if x != nil {
+ if x, ok := x.TaskConfig.(*TaskPolicy_VacuumConfig); ok {
+ return x.VacuumConfig
+ }
+ }
+ return nil
+}
+
+func (x *TaskPolicy) GetErasureCodingConfig() *ErasureCodingTaskConfig {
+ if x != nil {
+ if x, ok := x.TaskConfig.(*TaskPolicy_ErasureCodingConfig); ok {
+ return x.ErasureCodingConfig
+ }
+ }
+ return nil
+}
+
+func (x *TaskPolicy) GetBalanceConfig() *BalanceTaskConfig {
+ if x != nil {
+ if x, ok := x.TaskConfig.(*TaskPolicy_BalanceConfig); ok {
+ return x.BalanceConfig
+ }
+ }
+ return nil
+}
+
+func (x *TaskPolicy) GetReplicationConfig() *ReplicationTaskConfig {
+ if x != nil {
+ if x, ok := x.TaskConfig.(*TaskPolicy_ReplicationConfig); ok {
+ return x.ReplicationConfig
+ }
+ }
+ return nil
+}
+
+type isTaskPolicy_TaskConfig interface {
+ isTaskPolicy_TaskConfig()
+}
+
+type TaskPolicy_VacuumConfig struct {
+ VacuumConfig *VacuumTaskConfig `protobuf:"bytes,5,opt,name=vacuum_config,json=vacuumConfig,proto3,oneof"`
+}
+
+type TaskPolicy_ErasureCodingConfig struct {
+ ErasureCodingConfig *ErasureCodingTaskConfig `protobuf:"bytes,6,opt,name=erasure_coding_config,json=erasureCodingConfig,proto3,oneof"`
+}
+
+type TaskPolicy_BalanceConfig struct {
+ BalanceConfig *BalanceTaskConfig `protobuf:"bytes,7,opt,name=balance_config,json=balanceConfig,proto3,oneof"`
+}
+
+type TaskPolicy_ReplicationConfig struct {
+ ReplicationConfig *ReplicationTaskConfig `protobuf:"bytes,8,opt,name=replication_config,json=replicationConfig,proto3,oneof"`
+}
+
+func (*TaskPolicy_VacuumConfig) isTaskPolicy_TaskConfig() {}
+
+func (*TaskPolicy_ErasureCodingConfig) isTaskPolicy_TaskConfig() {}
+
+func (*TaskPolicy_BalanceConfig) isTaskPolicy_TaskConfig() {}
+
+func (*TaskPolicy_ReplicationConfig) isTaskPolicy_TaskConfig() {}
+
+// VacuumTaskConfig contains vacuum-specific configuration
+type VacuumTaskConfig struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ GarbageThreshold float64 `protobuf:"fixed64,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` // Minimum garbage ratio to trigger vacuum (0.0-1.0)
+ MinVolumeAgeHours int32 `protobuf:"varint,2,opt,name=min_volume_age_hours,json=minVolumeAgeHours,proto3" json:"min_volume_age_hours,omitempty"` // Minimum age before vacuum is considered
+ MinIntervalSeconds int32 `protobuf:"varint,3,opt,name=min_interval_seconds,json=minIntervalSeconds,proto3" json:"min_interval_seconds,omitempty"` // Minimum time between vacuum operations on the same volume
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *VacuumTaskConfig) Reset() {
+ *x = VacuumTaskConfig{}
+ mi := &file_worker_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *VacuumTaskConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VacuumTaskConfig) ProtoMessage() {}
+
+func (x *VacuumTaskConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[27]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VacuumTaskConfig.ProtoReflect.Descriptor instead.
+func (*VacuumTaskConfig) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{27}
+}
+
+func (x *VacuumTaskConfig) GetGarbageThreshold() float64 {
+ if x != nil {
+ return x.GarbageThreshold
+ }
+ return 0
+}
+
+func (x *VacuumTaskConfig) GetMinVolumeAgeHours() int32 {
+ if x != nil {
+ return x.MinVolumeAgeHours
+ }
+ return 0
+}
+
+func (x *VacuumTaskConfig) GetMinIntervalSeconds() int32 {
+ if x != nil {
+ return x.MinIntervalSeconds
+ }
+ return 0
+}
+
+// ErasureCodingTaskConfig contains EC-specific configuration
+type ErasureCodingTaskConfig struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ FullnessRatio float64 `protobuf:"fixed64,1,opt,name=fullness_ratio,json=fullnessRatio,proto3" json:"fullness_ratio,omitempty"` // Minimum fullness ratio to trigger EC (0.0-1.0)
+ QuietForSeconds int32 `protobuf:"varint,2,opt,name=quiet_for_seconds,json=quietForSeconds,proto3" json:"quiet_for_seconds,omitempty"` // Minimum quiet time before EC
+ MinVolumeSizeMb int32 `protobuf:"varint,3,opt,name=min_volume_size_mb,json=minVolumeSizeMb,proto3" json:"min_volume_size_mb,omitempty"` // Minimum volume size for EC
+ CollectionFilter string `protobuf:"bytes,4,opt,name=collection_filter,json=collectionFilter,proto3" json:"collection_filter,omitempty"` // Only process volumes from specific collections
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ErasureCodingTaskConfig) Reset() {
+ *x = ErasureCodingTaskConfig{}
+ mi := &file_worker_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ErasureCodingTaskConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErasureCodingTaskConfig) ProtoMessage() {}
+
+func (x *ErasureCodingTaskConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[28]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErasureCodingTaskConfig.ProtoReflect.Descriptor instead.
+func (*ErasureCodingTaskConfig) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *ErasureCodingTaskConfig) GetFullnessRatio() float64 {
+ if x != nil {
+ return x.FullnessRatio
+ }
+ return 0
+}
+
+func (x *ErasureCodingTaskConfig) GetQuietForSeconds() int32 {
+ if x != nil {
+ return x.QuietForSeconds
+ }
+ return 0
+}
+
+func (x *ErasureCodingTaskConfig) GetMinVolumeSizeMb() int32 {
+ if x != nil {
+ return x.MinVolumeSizeMb
+ }
+ return 0
+}
+
+func (x *ErasureCodingTaskConfig) GetCollectionFilter() string {
+ if x != nil {
+ return x.CollectionFilter
+ }
+ return ""
+}
+
+// BalanceTaskConfig contains balance-specific configuration
+type BalanceTaskConfig struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ImbalanceThreshold float64 `protobuf:"fixed64,1,opt,name=imbalance_threshold,json=imbalanceThreshold,proto3" json:"imbalance_threshold,omitempty"` // Threshold for triggering rebalancing (0.0-1.0)
+ MinServerCount int32 `protobuf:"varint,2,opt,name=min_server_count,json=minServerCount,proto3" json:"min_server_count,omitempty"` // Minimum number of servers required for balancing
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *BalanceTaskConfig) Reset() {
+ *x = BalanceTaskConfig{}
+ mi := &file_worker_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BalanceTaskConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BalanceTaskConfig) ProtoMessage() {}
+
+func (x *BalanceTaskConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[29]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BalanceTaskConfig.ProtoReflect.Descriptor instead.
+func (*BalanceTaskConfig) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{29}
+}
+
+func (x *BalanceTaskConfig) GetImbalanceThreshold() float64 {
+ if x != nil {
+ return x.ImbalanceThreshold
+ }
+ return 0
+}
+
+func (x *BalanceTaskConfig) GetMinServerCount() int32 {
+ if x != nil {
+ return x.MinServerCount
+ }
+ return 0
+}
+
+// ReplicationTaskConfig contains replication-specific configuration
+type ReplicationTaskConfig struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ TargetReplicaCount int32 `protobuf:"varint,1,opt,name=target_replica_count,json=targetReplicaCount,proto3" json:"target_replica_count,omitempty"` // Target number of replicas
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ReplicationTaskConfig) Reset() {
+ *x = ReplicationTaskConfig{}
+ mi := &file_worker_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ReplicationTaskConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReplicationTaskConfig) ProtoMessage() {}
+
+func (x *ReplicationTaskConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_worker_proto_msgTypes[30]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReplicationTaskConfig.ProtoReflect.Descriptor instead.
+func (*ReplicationTaskConfig) Descriptor() ([]byte, []int) {
+ return file_worker_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *ReplicationTaskConfig) GetTargetReplicaCount() int32 {
+ if x != nil {
+ return x.TargetReplicaCount
+ }
+ return 0
+}
+
var File_worker_proto protoreflect.FileDescriptor
const file_worker_proto_rawDesc = "" +
"\n" +
- "\fworker.proto\x12\tworker_pb\"\xc6\x03\n" +
+ "\fworker.proto\x12\tworker_pb\"\x90\x04\n" +
"\rWorkerMessage\x12\x1b\n" +
"\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\x1c\n" +
"\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12C\n" +
@@ -1221,8 +2810,9 @@ const file_worker_proto_rawDesc = "" +
"\vtask_update\x18\x06 \x01(\v2\x15.worker_pb.TaskUpdateH\x00R\n" +
"taskUpdate\x12>\n" +
"\rtask_complete\x18\a \x01(\v2\x17.worker_pb.TaskCompleteH\x00R\ftaskComplete\x127\n" +
- "\bshutdown\x18\b \x01(\v2\x19.worker_pb.WorkerShutdownH\x00R\bshutdownB\t\n" +
- "\amessage\"\xce\x03\n" +
+ "\bshutdown\x18\b \x01(\v2\x19.worker_pb.WorkerShutdownH\x00R\bshutdown\x12H\n" +
+ "\x11task_log_response\x18\t \x01(\v2\x1a.worker_pb.TaskLogResponseH\x00R\x0ftaskLogResponseB\t\n" +
+ "\amessage\"\x95\x04\n" +
"\fAdminMessage\x12\x19\n" +
"\badmin_id\x18\x01 \x01(\tR\aadminId\x12\x1c\n" +
"\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12V\n" +
@@ -1230,7 +2820,8 @@ const file_worker_proto_rawDesc = "" +
"\x12heartbeat_response\x18\x04 \x01(\v2\x1c.worker_pb.HeartbeatResponseH\x00R\x11heartbeatResponse\x12D\n" +
"\x0ftask_assignment\x18\x05 \x01(\v2\x19.worker_pb.TaskAssignmentH\x00R\x0etaskAssignment\x12J\n" +
"\x11task_cancellation\x18\x06 \x01(\v2\x1b.worker_pb.TaskCancellationH\x00R\x10taskCancellation\x12A\n" +
- "\x0eadmin_shutdown\x18\a \x01(\v2\x18.worker_pb.AdminShutdownH\x00R\radminShutdownB\t\n" +
+ "\x0eadmin_shutdown\x18\a \x01(\v2\x18.worker_pb.AdminShutdownH\x00R\radminShutdown\x12E\n" +
+ "\x10task_log_request\x18\b \x01(\v2\x19.worker_pb.TaskLogRequestH\x00R\x0etaskLogRequestB\t\n" +
"\amessage\"\x9c\x02\n" +
"\x12WorkerRegistration\x12\x1b\n" +
"\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\x18\n" +
@@ -1270,7 +2861,7 @@ const file_worker_proto_rawDesc = "" +
"\bmetadata\x18\x06 \x03(\v2'.worker_pb.TaskAssignment.MetadataEntryR\bmetadata\x1a;\n" +
"\rMetadataEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
- "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb8\x02\n" +
+ "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xf9\x03\n" +
"\n" +
"TaskParams\x12\x1b\n" +
"\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x16\n" +
@@ -1281,13 +2872,63 @@ const file_worker_proto_rawDesc = "" +
"\vdata_center\x18\x04 \x01(\tR\n" +
"dataCenter\x12\x12\n" +
"\x04rack\x18\x05 \x01(\tR\x04rack\x12\x1a\n" +
- "\breplicas\x18\x06 \x03(\tR\breplicas\x12E\n" +
+ "\breplicas\x18\x06 \x03(\tR\breplicas\x12B\n" +
+ "\rvacuum_params\x18\a \x01(\v2\x1b.worker_pb.VacuumTaskParamsH\x00R\fvacuumParams\x12X\n" +
+ "\x15erasure_coding_params\x18\b \x01(\v2\".worker_pb.ErasureCodingTaskParamsH\x00R\x13erasureCodingParams\x12E\n" +
+ "\x0ebalance_params\x18\t \x01(\v2\x1c.worker_pb.BalanceTaskParamsH\x00R\rbalanceParams\x12Q\n" +
+ "\x12replication_params\x18\n" +
+ " \x01(\v2 .worker_pb.ReplicationTaskParamsH\x00R\x11replicationParamsB\r\n" +
+ "\vtask_params\"\xcb\x01\n" +
+ "\x10VacuumTaskParams\x12+\n" +
+ "\x11garbage_threshold\x18\x01 \x01(\x01R\x10garbageThreshold\x12!\n" +
+ "\fforce_vacuum\x18\x02 \x01(\bR\vforceVacuum\x12\x1d\n" +
"\n" +
- "parameters\x18\a \x03(\v2%.worker_pb.TaskParams.ParametersEntryR\n" +
- "parameters\x1a=\n" +
- "\x0fParametersEntry\x12\x10\n" +
- "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
- "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x8e\x02\n" +
+ "batch_size\x18\x03 \x01(\x05R\tbatchSize\x12\x1f\n" +
+ "\vworking_dir\x18\x04 \x01(\tR\n" +
+ "workingDir\x12'\n" +
+ "\x0fverify_checksum\x18\x05 \x01(\bR\x0everifyChecksum\"\xcb\x03\n" +
+ "\x17ErasureCodingTaskParams\x120\n" +
+ "\x14estimated_shard_size\x18\x03 \x01(\x04R\x12estimatedShardSize\x12\x1f\n" +
+ "\vdata_shards\x18\x04 \x01(\x05R\n" +
+ "dataShards\x12#\n" +
+ "\rparity_shards\x18\x05 \x01(\x05R\fparityShards\x12\x1f\n" +
+ "\vworking_dir\x18\x06 \x01(\tR\n" +
+ "workingDir\x12#\n" +
+ "\rmaster_client\x18\a \x01(\tR\fmasterClient\x12%\n" +
+ "\x0ecleanup_source\x18\b \x01(\bR\rcleanupSource\x12/\n" +
+ "\x13placement_conflicts\x18\t \x03(\tR\x12placementConflicts\x12<\n" +
+ "\fdestinations\x18\n" +
+ " \x03(\v2\x18.worker_pb.ECDestinationR\fdestinations\x12\\\n" +
+ "\x18existing_shard_locations\x18\v \x03(\v2\".worker_pb.ExistingECShardLocationR\x16existingShardLocations\"\x9a\x01\n" +
+ "\rECDestination\x12\x12\n" +
+ "\x04node\x18\x01 \x01(\tR\x04node\x12\x17\n" +
+ "\adisk_id\x18\x02 \x01(\rR\x06diskId\x12\x12\n" +
+ "\x04rack\x18\x03 \x01(\tR\x04rack\x12\x1f\n" +
+ "\vdata_center\x18\x04 \x01(\tR\n" +
+ "dataCenter\x12'\n" +
+ "\x0fplacement_score\x18\x05 \x01(\x01R\x0eplacementScore\"J\n" +
+ "\x17ExistingECShardLocation\x12\x12\n" +
+ "\x04node\x18\x01 \x01(\tR\x04node\x12\x1b\n" +
+ "\tshard_ids\x18\x02 \x03(\rR\bshardIds\"\xaf\x02\n" +
+ "\x11BalanceTaskParams\x12\x1b\n" +
+ "\tdest_node\x18\x01 \x01(\tR\bdestNode\x12%\n" +
+ "\x0eestimated_size\x18\x02 \x01(\x04R\restimatedSize\x12\x1b\n" +
+ "\tdest_rack\x18\x03 \x01(\tR\bdestRack\x12\x17\n" +
+ "\adest_dc\x18\x04 \x01(\tR\x06destDc\x12'\n" +
+ "\x0fplacement_score\x18\x05 \x01(\x01R\x0eplacementScore\x12/\n" +
+ "\x13placement_conflicts\x18\x06 \x03(\tR\x12placementConflicts\x12\x1d\n" +
+ "\n" +
+ "force_move\x18\a \x01(\bR\tforceMove\x12'\n" +
+ "\x0ftimeout_seconds\x18\b \x01(\x05R\x0etimeoutSeconds\"\xbf\x02\n" +
+ "\x15ReplicationTaskParams\x12\x1b\n" +
+ "\tdest_node\x18\x01 \x01(\tR\bdestNode\x12%\n" +
+ "\x0eestimated_size\x18\x02 \x01(\x04R\restimatedSize\x12\x1b\n" +
+ "\tdest_rack\x18\x03 \x01(\tR\bdestRack\x12\x17\n" +
+ "\adest_dc\x18\x04 \x01(\tR\x06destDc\x12'\n" +
+ "\x0fplacement_score\x18\x05 \x01(\x01R\x0eplacementScore\x12/\n" +
+ "\x13placement_conflicts\x18\x06 \x03(\tR\x12placementConflicts\x12#\n" +
+ "\rreplica_count\x18\a \x01(\x05R\freplicaCount\x12-\n" +
+ "\x12verify_consistency\x18\b \x01(\bR\x11verifyConsistency\"\x8e\x02\n" +
"\n" +
"TaskUpdate\x12\x17\n" +
"\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" +
@@ -1319,7 +2960,104 @@ const file_worker_proto_rawDesc = "" +
"\x10pending_task_ids\x18\x03 \x03(\tR\x0ependingTaskIds\"c\n" +
"\rAdminShutdown\x12\x16\n" +
"\x06reason\x18\x01 \x01(\tR\x06reason\x12:\n" +
- "\x19graceful_shutdown_seconds\x18\x02 \x01(\x05R\x17gracefulShutdownSeconds2V\n" +
+ "\x19graceful_shutdown_seconds\x18\x02 \x01(\x05R\x17gracefulShutdownSeconds\"\xe9\x01\n" +
+ "\x0eTaskLogRequest\x12\x17\n" +
+ "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" +
+ "\tworker_id\x18\x02 \x01(\tR\bworkerId\x12)\n" +
+ "\x10include_metadata\x18\x03 \x01(\bR\x0fincludeMetadata\x12\x1f\n" +
+ "\vmax_entries\x18\x04 \x01(\x05R\n" +
+ "maxEntries\x12\x1b\n" +
+ "\tlog_level\x18\x05 \x01(\tR\blogLevel\x12\x1d\n" +
+ "\n" +
+ "start_time\x18\x06 \x01(\x03R\tstartTime\x12\x19\n" +
+ "\bend_time\x18\a \x01(\x03R\aendTime\"\xf8\x01\n" +
+ "\x0fTaskLogResponse\x12\x17\n" +
+ "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" +
+ "\tworker_id\x18\x02 \x01(\tR\bworkerId\x12\x18\n" +
+ "\asuccess\x18\x03 \x01(\bR\asuccess\x12#\n" +
+ "\rerror_message\x18\x04 \x01(\tR\ferrorMessage\x126\n" +
+ "\bmetadata\x18\x05 \x01(\v2\x1a.worker_pb.TaskLogMetadataR\bmetadata\x128\n" +
+ "\vlog_entries\x18\x06 \x03(\v2\x17.worker_pb.TaskLogEntryR\n" +
+ "logEntries\"\x97\x04\n" +
+ "\x0fTaskLogMetadata\x12\x17\n" +
+ "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" +
+ "\ttask_type\x18\x02 \x01(\tR\btaskType\x12\x1b\n" +
+ "\tworker_id\x18\x03 \x01(\tR\bworkerId\x12\x1d\n" +
+ "\n" +
+ "start_time\x18\x04 \x01(\x03R\tstartTime\x12\x19\n" +
+ "\bend_time\x18\x05 \x01(\x03R\aendTime\x12\x1f\n" +
+ "\vduration_ms\x18\x06 \x01(\x03R\n" +
+ "durationMs\x12\x16\n" +
+ "\x06status\x18\a \x01(\tR\x06status\x12\x1a\n" +
+ "\bprogress\x18\b \x01(\x02R\bprogress\x12\x1b\n" +
+ "\tvolume_id\x18\t \x01(\rR\bvolumeId\x12\x16\n" +
+ "\x06server\x18\n" +
+ " \x01(\tR\x06server\x12\x1e\n" +
+ "\n" +
+ "collection\x18\v \x01(\tR\n" +
+ "collection\x12\"\n" +
+ "\rlog_file_path\x18\f \x01(\tR\vlogFilePath\x12\x1d\n" +
+ "\n" +
+ "created_at\x18\r \x01(\x03R\tcreatedAt\x12K\n" +
+ "\vcustom_data\x18\x0e \x03(\v2*.worker_pb.TaskLogMetadata.CustomDataEntryR\n" +
+ "customData\x1a=\n" +
+ "\x0fCustomDataEntry\x12\x10\n" +
+ "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x88\x02\n" +
+ "\fTaskLogEntry\x12\x1c\n" +
+ "\ttimestamp\x18\x01 \x01(\x03R\ttimestamp\x12\x14\n" +
+ "\x05level\x18\x02 \x01(\tR\x05level\x12\x18\n" +
+ "\amessage\x18\x03 \x01(\tR\amessage\x12;\n" +
+ "\x06fields\x18\x04 \x03(\v2#.worker_pb.TaskLogEntry.FieldsEntryR\x06fields\x12\x1a\n" +
+ "\bprogress\x18\x05 \x01(\x02R\bprogress\x12\x16\n" +
+ "\x06status\x18\x06 \x01(\tR\x06status\x1a9\n" +
+ "\vFieldsEntry\x12\x10\n" +
+ "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xc0\x03\n" +
+ "\x11MaintenanceConfig\x12\x18\n" +
+ "\aenabled\x18\x01 \x01(\bR\aenabled\x122\n" +
+ "\x15scan_interval_seconds\x18\x02 \x01(\x05R\x13scanIntervalSeconds\x124\n" +
+ "\x16worker_timeout_seconds\x18\x03 \x01(\x05R\x14workerTimeoutSeconds\x120\n" +
+ "\x14task_timeout_seconds\x18\x04 \x01(\x05R\x12taskTimeoutSeconds\x12.\n" +
+ "\x13retry_delay_seconds\x18\x05 \x01(\x05R\x11retryDelaySeconds\x12\x1f\n" +
+ "\vmax_retries\x18\x06 \x01(\x05R\n" +
+ "maxRetries\x128\n" +
+ "\x18cleanup_interval_seconds\x18\a \x01(\x05R\x16cleanupIntervalSeconds\x124\n" +
+ "\x16task_retention_seconds\x18\b \x01(\x05R\x14taskRetentionSeconds\x124\n" +
+ "\x06policy\x18\t \x01(\v2\x1c.worker_pb.MaintenancePolicyR\x06policy\"\x80\x03\n" +
+ "\x11MaintenancePolicy\x12S\n" +
+ "\rtask_policies\x18\x01 \x03(\v2..worker_pb.MaintenancePolicy.TaskPoliciesEntryR\ftaskPolicies\x122\n" +
+ "\x15global_max_concurrent\x18\x02 \x01(\x05R\x13globalMaxConcurrent\x12E\n" +
+ "\x1fdefault_repeat_interval_seconds\x18\x03 \x01(\x05R\x1cdefaultRepeatIntervalSeconds\x12C\n" +
+ "\x1edefault_check_interval_seconds\x18\x04 \x01(\x05R\x1bdefaultCheckIntervalSeconds\x1aV\n" +
+ "\x11TaskPoliciesEntry\x12\x10\n" +
+ "\x03key\x18\x01 \x01(\tR\x03key\x12+\n" +
+ "\x05value\x18\x02 \x01(\v2\x15.worker_pb.TaskPolicyR\x05value:\x028\x01\"\x82\x04\n" +
+ "\n" +
+ "TaskPolicy\x12\x18\n" +
+ "\aenabled\x18\x01 \x01(\bR\aenabled\x12%\n" +
+ "\x0emax_concurrent\x18\x02 \x01(\x05R\rmaxConcurrent\x126\n" +
+ "\x17repeat_interval_seconds\x18\x03 \x01(\x05R\x15repeatIntervalSeconds\x124\n" +
+ "\x16check_interval_seconds\x18\x04 \x01(\x05R\x14checkIntervalSeconds\x12B\n" +
+ "\rvacuum_config\x18\x05 \x01(\v2\x1b.worker_pb.VacuumTaskConfigH\x00R\fvacuumConfig\x12X\n" +
+ "\x15erasure_coding_config\x18\x06 \x01(\v2\".worker_pb.ErasureCodingTaskConfigH\x00R\x13erasureCodingConfig\x12E\n" +
+ "\x0ebalance_config\x18\a \x01(\v2\x1c.worker_pb.BalanceTaskConfigH\x00R\rbalanceConfig\x12Q\n" +
+ "\x12replication_config\x18\b \x01(\v2 .worker_pb.ReplicationTaskConfigH\x00R\x11replicationConfigB\r\n" +
+ "\vtask_config\"\xa2\x01\n" +
+ "\x10VacuumTaskConfig\x12+\n" +
+ "\x11garbage_threshold\x18\x01 \x01(\x01R\x10garbageThreshold\x12/\n" +
+ "\x14min_volume_age_hours\x18\x02 \x01(\x05R\x11minVolumeAgeHours\x120\n" +
+ "\x14min_interval_seconds\x18\x03 \x01(\x05R\x12minIntervalSeconds\"\xc6\x01\n" +
+ "\x17ErasureCodingTaskConfig\x12%\n" +
+ "\x0efullness_ratio\x18\x01 \x01(\x01R\rfullnessRatio\x12*\n" +
+ "\x11quiet_for_seconds\x18\x02 \x01(\x05R\x0fquietForSeconds\x12+\n" +
+ "\x12min_volume_size_mb\x18\x03 \x01(\x05R\x0fminVolumeSizeMb\x12+\n" +
+ "\x11collection_filter\x18\x04 \x01(\tR\x10collectionFilter\"n\n" +
+ "\x11BalanceTaskConfig\x12/\n" +
+ "\x13imbalance_threshold\x18\x01 \x01(\x01R\x12imbalanceThreshold\x12(\n" +
+ "\x10min_server_count\x18\x02 \x01(\x05R\x0eminServerCount\"I\n" +
+ "\x15ReplicationTaskConfig\x120\n" +
+ "\x14target_replica_count\x18\x01 \x01(\x05R\x12targetReplicaCount2V\n" +
"\rWorkerService\x12E\n" +
"\fWorkerStream\x12\x18.worker_pb.WorkerMessage\x1a\x17.worker_pb.AdminMessage(\x010\x01B2Z0github.com/seaweedfs/seaweedfs/weed/pb/worker_pbb\x06proto3"
@@ -1335,53 +3073,90 @@ func file_worker_proto_rawDescGZIP() []byte {
return file_worker_proto_rawDescData
}
-var file_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
+var file_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 38)
var file_worker_proto_goTypes = []any{
- (*WorkerMessage)(nil), // 0: worker_pb.WorkerMessage
- (*AdminMessage)(nil), // 1: worker_pb.AdminMessage
- (*WorkerRegistration)(nil), // 2: worker_pb.WorkerRegistration
- (*RegistrationResponse)(nil), // 3: worker_pb.RegistrationResponse
- (*WorkerHeartbeat)(nil), // 4: worker_pb.WorkerHeartbeat
- (*HeartbeatResponse)(nil), // 5: worker_pb.HeartbeatResponse
- (*TaskRequest)(nil), // 6: worker_pb.TaskRequest
- (*TaskAssignment)(nil), // 7: worker_pb.TaskAssignment
- (*TaskParams)(nil), // 8: worker_pb.TaskParams
- (*TaskUpdate)(nil), // 9: worker_pb.TaskUpdate
- (*TaskComplete)(nil), // 10: worker_pb.TaskComplete
- (*TaskCancellation)(nil), // 11: worker_pb.TaskCancellation
- (*WorkerShutdown)(nil), // 12: worker_pb.WorkerShutdown
- (*AdminShutdown)(nil), // 13: worker_pb.AdminShutdown
- nil, // 14: worker_pb.WorkerRegistration.MetadataEntry
- nil, // 15: worker_pb.TaskAssignment.MetadataEntry
- nil, // 16: worker_pb.TaskParams.ParametersEntry
- nil, // 17: worker_pb.TaskUpdate.MetadataEntry
- nil, // 18: worker_pb.TaskComplete.ResultMetadataEntry
+ (*WorkerMessage)(nil), // 0: worker_pb.WorkerMessage
+ (*AdminMessage)(nil), // 1: worker_pb.AdminMessage
+ (*WorkerRegistration)(nil), // 2: worker_pb.WorkerRegistration
+ (*RegistrationResponse)(nil), // 3: worker_pb.RegistrationResponse
+ (*WorkerHeartbeat)(nil), // 4: worker_pb.WorkerHeartbeat
+ (*HeartbeatResponse)(nil), // 5: worker_pb.HeartbeatResponse
+ (*TaskRequest)(nil), // 6: worker_pb.TaskRequest
+ (*TaskAssignment)(nil), // 7: worker_pb.TaskAssignment
+ (*TaskParams)(nil), // 8: worker_pb.TaskParams
+ (*VacuumTaskParams)(nil), // 9: worker_pb.VacuumTaskParams
+ (*ErasureCodingTaskParams)(nil), // 10: worker_pb.ErasureCodingTaskParams
+ (*ECDestination)(nil), // 11: worker_pb.ECDestination
+ (*ExistingECShardLocation)(nil), // 12: worker_pb.ExistingECShardLocation
+ (*BalanceTaskParams)(nil), // 13: worker_pb.BalanceTaskParams
+ (*ReplicationTaskParams)(nil), // 14: worker_pb.ReplicationTaskParams
+ (*TaskUpdate)(nil), // 15: worker_pb.TaskUpdate
+ (*TaskComplete)(nil), // 16: worker_pb.TaskComplete
+ (*TaskCancellation)(nil), // 17: worker_pb.TaskCancellation
+ (*WorkerShutdown)(nil), // 18: worker_pb.WorkerShutdown
+ (*AdminShutdown)(nil), // 19: worker_pb.AdminShutdown
+ (*TaskLogRequest)(nil), // 20: worker_pb.TaskLogRequest
+ (*TaskLogResponse)(nil), // 21: worker_pb.TaskLogResponse
+ (*TaskLogMetadata)(nil), // 22: worker_pb.TaskLogMetadata
+ (*TaskLogEntry)(nil), // 23: worker_pb.TaskLogEntry
+ (*MaintenanceConfig)(nil), // 24: worker_pb.MaintenanceConfig
+ (*MaintenancePolicy)(nil), // 25: worker_pb.MaintenancePolicy
+ (*TaskPolicy)(nil), // 26: worker_pb.TaskPolicy
+ (*VacuumTaskConfig)(nil), // 27: worker_pb.VacuumTaskConfig
+ (*ErasureCodingTaskConfig)(nil), // 28: worker_pb.ErasureCodingTaskConfig
+ (*BalanceTaskConfig)(nil), // 29: worker_pb.BalanceTaskConfig
+ (*ReplicationTaskConfig)(nil), // 30: worker_pb.ReplicationTaskConfig
+ nil, // 31: worker_pb.WorkerRegistration.MetadataEntry
+ nil, // 32: worker_pb.TaskAssignment.MetadataEntry
+ nil, // 33: worker_pb.TaskUpdate.MetadataEntry
+ nil, // 34: worker_pb.TaskComplete.ResultMetadataEntry
+ nil, // 35: worker_pb.TaskLogMetadata.CustomDataEntry
+ nil, // 36: worker_pb.TaskLogEntry.FieldsEntry
+ nil, // 37: worker_pb.MaintenancePolicy.TaskPoliciesEntry
}
var file_worker_proto_depIdxs = []int32{
2, // 0: worker_pb.WorkerMessage.registration:type_name -> worker_pb.WorkerRegistration
4, // 1: worker_pb.WorkerMessage.heartbeat:type_name -> worker_pb.WorkerHeartbeat
6, // 2: worker_pb.WorkerMessage.task_request:type_name -> worker_pb.TaskRequest
- 9, // 3: worker_pb.WorkerMessage.task_update:type_name -> worker_pb.TaskUpdate
- 10, // 4: worker_pb.WorkerMessage.task_complete:type_name -> worker_pb.TaskComplete
- 12, // 5: worker_pb.WorkerMessage.shutdown:type_name -> worker_pb.WorkerShutdown
- 3, // 6: worker_pb.AdminMessage.registration_response:type_name -> worker_pb.RegistrationResponse
- 5, // 7: worker_pb.AdminMessage.heartbeat_response:type_name -> worker_pb.HeartbeatResponse
- 7, // 8: worker_pb.AdminMessage.task_assignment:type_name -> worker_pb.TaskAssignment
- 11, // 9: worker_pb.AdminMessage.task_cancellation:type_name -> worker_pb.TaskCancellation
- 13, // 10: worker_pb.AdminMessage.admin_shutdown:type_name -> worker_pb.AdminShutdown
- 14, // 11: worker_pb.WorkerRegistration.metadata:type_name -> worker_pb.WorkerRegistration.MetadataEntry
- 8, // 12: worker_pb.TaskAssignment.params:type_name -> worker_pb.TaskParams
- 15, // 13: worker_pb.TaskAssignment.metadata:type_name -> worker_pb.TaskAssignment.MetadataEntry
- 16, // 14: worker_pb.TaskParams.parameters:type_name -> worker_pb.TaskParams.ParametersEntry
- 17, // 15: worker_pb.TaskUpdate.metadata:type_name -> worker_pb.TaskUpdate.MetadataEntry
- 18, // 16: worker_pb.TaskComplete.result_metadata:type_name -> worker_pb.TaskComplete.ResultMetadataEntry
- 0, // 17: worker_pb.WorkerService.WorkerStream:input_type -> worker_pb.WorkerMessage
- 1, // 18: worker_pb.WorkerService.WorkerStream:output_type -> worker_pb.AdminMessage
- 18, // [18:19] is the sub-list for method output_type
- 17, // [17:18] is the sub-list for method input_type
- 17, // [17:17] is the sub-list for extension type_name
- 17, // [17:17] is the sub-list for extension extendee
- 0, // [0:17] is the sub-list for field type_name
+ 15, // 3: worker_pb.WorkerMessage.task_update:type_name -> worker_pb.TaskUpdate
+ 16, // 4: worker_pb.WorkerMessage.task_complete:type_name -> worker_pb.TaskComplete
+ 18, // 5: worker_pb.WorkerMessage.shutdown:type_name -> worker_pb.WorkerShutdown
+ 21, // 6: worker_pb.WorkerMessage.task_log_response:type_name -> worker_pb.TaskLogResponse
+ 3, // 7: worker_pb.AdminMessage.registration_response:type_name -> worker_pb.RegistrationResponse
+ 5, // 8: worker_pb.AdminMessage.heartbeat_response:type_name -> worker_pb.HeartbeatResponse
+ 7, // 9: worker_pb.AdminMessage.task_assignment:type_name -> worker_pb.TaskAssignment
+ 17, // 10: worker_pb.AdminMessage.task_cancellation:type_name -> worker_pb.TaskCancellation
+ 19, // 11: worker_pb.AdminMessage.admin_shutdown:type_name -> worker_pb.AdminShutdown
+ 20, // 12: worker_pb.AdminMessage.task_log_request:type_name -> worker_pb.TaskLogRequest
+ 31, // 13: worker_pb.WorkerRegistration.metadata:type_name -> worker_pb.WorkerRegistration.MetadataEntry
+ 8, // 14: worker_pb.TaskAssignment.params:type_name -> worker_pb.TaskParams
+ 32, // 15: worker_pb.TaskAssignment.metadata:type_name -> worker_pb.TaskAssignment.MetadataEntry
+ 9, // 16: worker_pb.TaskParams.vacuum_params:type_name -> worker_pb.VacuumTaskParams
+ 10, // 17: worker_pb.TaskParams.erasure_coding_params:type_name -> worker_pb.ErasureCodingTaskParams
+ 13, // 18: worker_pb.TaskParams.balance_params:type_name -> worker_pb.BalanceTaskParams
+ 14, // 19: worker_pb.TaskParams.replication_params:type_name -> worker_pb.ReplicationTaskParams
+ 11, // 20: worker_pb.ErasureCodingTaskParams.destinations:type_name -> worker_pb.ECDestination
+ 12, // 21: worker_pb.ErasureCodingTaskParams.existing_shard_locations:type_name -> worker_pb.ExistingECShardLocation
+ 33, // 22: worker_pb.TaskUpdate.metadata:type_name -> worker_pb.TaskUpdate.MetadataEntry
+ 34, // 23: worker_pb.TaskComplete.result_metadata:type_name -> worker_pb.TaskComplete.ResultMetadataEntry
+ 22, // 24: worker_pb.TaskLogResponse.metadata:type_name -> worker_pb.TaskLogMetadata
+ 23, // 25: worker_pb.TaskLogResponse.log_entries:type_name -> worker_pb.TaskLogEntry
+ 35, // 26: worker_pb.TaskLogMetadata.custom_data:type_name -> worker_pb.TaskLogMetadata.CustomDataEntry
+ 36, // 27: worker_pb.TaskLogEntry.fields:type_name -> worker_pb.TaskLogEntry.FieldsEntry
+ 25, // 28: worker_pb.MaintenanceConfig.policy:type_name -> worker_pb.MaintenancePolicy
+ 37, // 29: worker_pb.MaintenancePolicy.task_policies:type_name -> worker_pb.MaintenancePolicy.TaskPoliciesEntry
+ 27, // 30: worker_pb.TaskPolicy.vacuum_config:type_name -> worker_pb.VacuumTaskConfig
+ 28, // 31: worker_pb.TaskPolicy.erasure_coding_config:type_name -> worker_pb.ErasureCodingTaskConfig
+ 29, // 32: worker_pb.TaskPolicy.balance_config:type_name -> worker_pb.BalanceTaskConfig
+ 30, // 33: worker_pb.TaskPolicy.replication_config:type_name -> worker_pb.ReplicationTaskConfig
+ 26, // 34: worker_pb.MaintenancePolicy.TaskPoliciesEntry.value:type_name -> worker_pb.TaskPolicy
+ 0, // 35: worker_pb.WorkerService.WorkerStream:input_type -> worker_pb.WorkerMessage
+ 1, // 36: worker_pb.WorkerService.WorkerStream:output_type -> worker_pb.AdminMessage
+ 36, // [36:37] is the sub-list for method output_type
+ 35, // [35:36] is the sub-list for method input_type
+ 35, // [35:35] is the sub-list for extension type_name
+ 35, // [35:35] is the sub-list for extension extendee
+ 0, // [0:35] is the sub-list for field type_name
}
func init() { file_worker_proto_init() }
@@ -1396,6 +3171,7 @@ func file_worker_proto_init() {
(*WorkerMessage_TaskUpdate)(nil),
(*WorkerMessage_TaskComplete)(nil),
(*WorkerMessage_Shutdown)(nil),
+ (*WorkerMessage_TaskLogResponse)(nil),
}
file_worker_proto_msgTypes[1].OneofWrappers = []any{
(*AdminMessage_RegistrationResponse)(nil),
@@ -1403,6 +3179,19 @@ func file_worker_proto_init() {
(*AdminMessage_TaskAssignment)(nil),
(*AdminMessage_TaskCancellation)(nil),
(*AdminMessage_AdminShutdown)(nil),
+ (*AdminMessage_TaskLogRequest)(nil),
+ }
+ file_worker_proto_msgTypes[8].OneofWrappers = []any{
+ (*TaskParams_VacuumParams)(nil),
+ (*TaskParams_ErasureCodingParams)(nil),
+ (*TaskParams_BalanceParams)(nil),
+ (*TaskParams_ReplicationParams)(nil),
+ }
+ file_worker_proto_msgTypes[26].OneofWrappers = []any{
+ (*TaskPolicy_VacuumConfig)(nil),
+ (*TaskPolicy_ErasureCodingConfig)(nil),
+ (*TaskPolicy_BalanceConfig)(nil),
+ (*TaskPolicy_ReplicationConfig)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -1410,7 +3199,7 @@ func file_worker_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_worker_proto_rawDesc), len(file_worker_proto_rawDesc)),
NumEnums: 0,
- NumMessages: 19,
+ NumMessages: 38,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go
index 0e733fc0a..84a9035ca 100644
--- a/weed/server/volume_grpc_copy.go
+++ b/weed/server/volume_grpc_copy.go
@@ -402,3 +402,120 @@ func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream v
return nil
}
+
+// ReceiveFile receives a file stream from client and writes it to storage
+func (vs *VolumeServer) ReceiveFile(stream volume_server_pb.VolumeServer_ReceiveFileServer) error {
+ var fileInfo *volume_server_pb.ReceiveFileInfo
+ var targetFile *os.File
+ var filePath string
+ var bytesWritten uint64
+
+ defer func() {
+ if targetFile != nil {
+ targetFile.Close()
+ }
+ }()
+
+ for {
+ req, err := stream.Recv()
+ if err == io.EOF {
+ // Stream completed successfully
+ if targetFile != nil {
+ targetFile.Sync()
+ glog.V(1).Infof("Successfully received file %s (%d bytes)", filePath, bytesWritten)
+ }
+ return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
+ BytesWritten: bytesWritten,
+ })
+ }
+ if err != nil {
+ // Clean up on error
+ if targetFile != nil {
+ targetFile.Close()
+ os.Remove(filePath)
+ }
+ glog.Errorf("Failed to receive stream: %v", err)
+ return fmt.Errorf("failed to receive stream: %v", err)
+ }
+
+ switch data := req.Data.(type) {
+ case *volume_server_pb.ReceiveFileRequest_Info:
+ // First message contains file info
+ fileInfo = data.Info
+ glog.V(1).Infof("ReceiveFile: volume %d, ext %s, collection %s, shard %d, size %d",
+ fileInfo.VolumeId, fileInfo.Ext, fileInfo.Collection, fileInfo.ShardId, fileInfo.FileSize)
+
+ // Create file path based on file info
+ if fileInfo.IsEcVolume {
+ // Find storage location for EC shard
+ var targetLocation *storage.DiskLocation
+ for _, location := range vs.store.Locations {
+ if location.DiskType == types.HardDriveType {
+ targetLocation = location
+ break
+ }
+ }
+ if targetLocation == nil && len(vs.store.Locations) > 0 {
+ targetLocation = vs.store.Locations[0] // Fall back to first available location
+ }
+ if targetLocation == nil {
+ glog.Errorf("ReceiveFile: no storage location available")
+ return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
+ Error: "no storage location available",
+ })
+ }
+
+ // Create EC shard file path
+ baseFileName := erasure_coding.EcShardBaseFileName(fileInfo.Collection, int(fileInfo.VolumeId))
+ filePath = util.Join(targetLocation.Directory, baseFileName+fileInfo.Ext)
+ } else {
+ // Regular volume file
+ v := vs.store.GetVolume(needle.VolumeId(fileInfo.VolumeId))
+ if v == nil {
+ glog.Errorf("ReceiveFile: volume %d not found", fileInfo.VolumeId)
+ return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
+ Error: fmt.Sprintf("volume %d not found", fileInfo.VolumeId),
+ })
+ }
+ filePath = v.FileName(fileInfo.Ext)
+ }
+
+ // Create target file
+ targetFile, err = os.Create(filePath)
+ if err != nil {
+ glog.Errorf("ReceiveFile: failed to create file %s: %v", filePath, err)
+ return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
+ Error: fmt.Sprintf("failed to create file: %v", err),
+ })
+ }
+ glog.V(1).Infof("ReceiveFile: created target file %s", filePath)
+
+ case *volume_server_pb.ReceiveFileRequest_FileContent:
+ // Subsequent messages contain file content
+ if targetFile == nil {
+ glog.Errorf("ReceiveFile: file info must be sent first")
+ return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
+ Error: "file info must be sent first",
+ })
+ }
+
+ n, err := targetFile.Write(data.FileContent)
+ if err != nil {
+ targetFile.Close()
+ os.Remove(filePath)
+ glog.Errorf("ReceiveFile: failed to write to file %s: %v", filePath, err)
+ return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
+ Error: fmt.Sprintf("failed to write file: %v", err),
+ })
+ }
+ bytesWritten += uint64(n)
+ glog.V(2).Infof("ReceiveFile: wrote %d bytes to %s (total: %d)", n, filePath, bytesWritten)
+
+ default:
+ glog.Errorf("ReceiveFile: unknown message type")
+ return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
+ Error: "unknown message type",
+ })
+ }
+ }
+}
diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go
index 23cc29e0d..5981c5efe 100644
--- a/weed/server/volume_grpc_erasure_coding.go
+++ b/weed/server/volume_grpc_erasure_coding.go
@@ -141,20 +141,31 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
glog.V(0).Infof("VolumeEcShardsCopy: %v", req)
var location *storage.DiskLocation
- if req.CopyEcxFile {
- location = vs.store.FindFreeLocation(func(location *storage.DiskLocation) bool {
- return location.DiskType == types.HardDriveType
- })
+
+ // Use disk_id if provided (disk-aware storage)
+ if req.DiskId > 0 || (req.DiskId == 0 && len(vs.store.Locations) > 0) {
+ // Validate disk ID is within bounds
+ if int(req.DiskId) >= len(vs.store.Locations) {
+ return nil, fmt.Errorf("invalid disk_id %d: only have %d disks", req.DiskId, len(vs.store.Locations))
+ }
+
+ // Use the specific disk location
+ location = vs.store.Locations[req.DiskId]
+ glog.V(1).Infof("Using disk %d for EC shard copy: %s", req.DiskId, location.Directory)
} else {
- location = vs.store.FindFreeLocation(func(location *storage.DiskLocation) bool {
- //(location.FindEcVolume) This method is error, will cause location is nil, redundant judgment
- // _, found := location.FindEcVolume(needle.VolumeId(req.VolumeId))
- // return found
- return true
- })
- }
- if location == nil {
- return nil, fmt.Errorf("no space left")
+ // Fallback to old behavior for backward compatibility
+ if req.CopyEcxFile {
+ location = vs.store.FindFreeLocation(func(location *storage.DiskLocation) bool {
+ return location.DiskType == types.HardDriveType
+ })
+ } else {
+ location = vs.store.FindFreeLocation(func(location *storage.DiskLocation) bool {
+ return true
+ })
+ }
+ if location == nil {
+ return nil, fmt.Errorf("no space left")
+ }
}
dataBaseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId))
@@ -467,3 +478,30 @@ func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_
return &volume_server_pb.VolumeEcShardsToVolumeResponse{}, nil
}
+
+func (vs *VolumeServer) VolumeEcShardsInfo(ctx context.Context, req *volume_server_pb.VolumeEcShardsInfoRequest) (*volume_server_pb.VolumeEcShardsInfoResponse, error) {
+ glog.V(0).Infof("VolumeEcShardsInfo: volume %d", req.VolumeId)
+
+ var ecShardInfos []*volume_server_pb.EcShardInfo
+
+ // Find the EC volume
+ for _, location := range vs.store.Locations {
+ if v, found := location.FindEcVolume(needle.VolumeId(req.VolumeId)); found {
+ // Get shard details from the EC volume
+ shardDetails := v.ShardDetails()
+ for _, shardDetail := range shardDetails {
+ ecShardInfo := &volume_server_pb.EcShardInfo{
+ ShardId: uint32(shardDetail.ShardId),
+ Size: shardDetail.Size,
+ Collection: v.Collection,
+ }
+ ecShardInfos = append(ecShardInfos, ecShardInfo)
+ }
+ break
+ }
+ }
+
+ return &volume_server_pb.VolumeEcShardsInfoResponse{
+ EcShardInfos: ecShardInfos,
+ }, nil
+}
diff --git a/weed/server/volume_server_handlers_admin.go b/weed/server/volume_server_handlers_admin.go
index 27797add3..ec6490662 100644
--- a/weed/server/volume_server_handlers_admin.go
+++ b/weed/server/volume_server_handlers_admin.go
@@ -1,11 +1,12 @@
package weed_server
import (
- "github.com/seaweedfs/seaweedfs/weed/topology"
- "github.com/seaweedfs/seaweedfs/weed/util/version"
"net/http"
"path/filepath"
+ "github.com/seaweedfs/seaweedfs/weed/topology"
+ "github.com/seaweedfs/seaweedfs/weed/util/version"
+
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/stats"
)
diff --git a/weed/server/volume_server_ui/volume.html b/weed/server/volume_server_ui/volume.html
index 565d14368..605eb52f0 100644
--- a/weed/server/volume_server_ui/volume.html
+++ b/weed/server/volume_server_ui/volume.html
@@ -175,8 +175,8 @@
<tr>
<th>Id</th>
<th>Collection</th>
- <th>Shard Size</th>
- <th>Shards</th>
+ <th>Total Size</th>
+ <th>Shard Details</th>
<th>CreatedAt</th>
</tr>
</thead>
@@ -185,8 +185,14 @@
<tr>
<td><code>{{ .VolumeId }}</code></td>
<td>{{ .Collection }}</td>
- <td>{{ bytesToHumanReadable .ShardSize }}</td>
- <td>{{ .ShardIdList }}</td>
+ <td>{{ bytesToHumanReadable .Size }}</td>
+ <td>
+ {{ range .ShardDetails }}
+ <span class="label label-info" style="margin-right: 5px;">
+ {{ .ShardId }}: {{ bytesToHumanReadable .Size }}
+ </span>
+ {{ end }}
+ </td>
<td>{{ .CreatedAt.Format "2006-01-02 15:04" }}</td>
</tr>
{{ end }}
diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go
index 8c556dff4..9872736a4 100644
--- a/weed/shell/command_volume_list.go
+++ b/weed/shell/command_volume_list.go
@@ -4,15 +4,16 @@ import (
"bytes"
"flag"
"fmt"
- "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
- "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
- "github.com/seaweedfs/seaweedfs/weed/storage/types"
"path/filepath"
"slices"
"sort"
"strings"
"time"
+ "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
+ "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
+ "github.com/seaweedfs/seaweedfs/weed/storage/types"
+
"io"
)
@@ -220,13 +221,14 @@ func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInf
return int(a.Id) - int(b.Id)
})
volumeInfosFound := false
+
for _, vi := range t.VolumeInfos {
if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id, int64(vi.Size)) {
continue
}
if !volumeInfosFound {
outNodeInfo()
- output(verbosityLevel >= 4, writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
+ output(verbosityLevel >= 4, writer, " Disk %s(%s) id:%d\n", diskType, diskInfoToString(t), t.DiskId)
volumeInfosFound = true
}
s = s.plus(writeVolumeInformationMessage(writer, vi, verbosityLevel))
@@ -238,7 +240,7 @@ func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInf
}
if !volumeInfosFound && !ecShardInfoFound {
outNodeInfo()
- output(verbosityLevel >= 4, writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
+ output(verbosityLevel >= 4, writer, " Disk %s(%s) id:%d\n", diskType, diskInfoToString(t), t.DiskId)
ecShardInfoFound = true
}
var expireAtString string
@@ -246,7 +248,8 @@ func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInf
if destroyTime > 0 {
expireAtString = fmt.Sprintf("expireAt:%s", time.Unix(int64(destroyTime), 0).Format("2006-01-02 15:04:05"))
}
- output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v %s\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds(), expireAtString)
+ output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v %s\n",
+ ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds(), expireAtString)
}
output((volumeInfosFound || ecShardInfoFound) && verbosityLevel >= 4, writer, " Disk %s %+v \n", diskType, s)
return s
diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go
index a3f0b6585..02f5f5923 100644
--- a/weed/storage/disk_location.go
+++ b/weed/storage/disk_location.go
@@ -134,7 +134,7 @@ func getValidVolumeName(basename string) string {
return ""
}
-func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind, skipIfEcVolumesExists bool, ldbTimeout int64) bool {
+func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind, skipIfEcVolumesExists bool, ldbTimeout int64, diskId uint32) bool {
basename := dirEntry.Name()
if dirEntry.IsDir() {
return false
@@ -184,15 +184,16 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne
return false
}
+ v.diskId = diskId // Set the disk ID for existing volumes
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
- glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
- l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
+ glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s disk_id=%d",
+ l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String(), diskId)
return true
}
-func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int, ldbTimeout int64) {
+func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int, ldbTimeout int64, diskId uint32) {
task_queue := make(chan os.DirEntry, 10*concurrency)
go func() {
@@ -218,7 +219,7 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, con
go func() {
defer wg.Done()
for fi := range task_queue {
- _ = l.loadExistingVolume(fi, needleMapKind, true, ldbTimeout)
+ _ = l.loadExistingVolume(fi, needleMapKind, true, ldbTimeout, diskId)
}
}()
}
@@ -227,6 +228,10 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, con
}
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeout int64) {
+ l.loadExistingVolumesWithId(needleMapKind, ldbTimeout, 0) // Default disk ID for backward compatibility
+}
+
+func (l *DiskLocation) loadExistingVolumesWithId(needleMapKind NeedleMapKind, ldbTimeout int64, diskId uint32) {
workerNum := runtime.NumCPU()
val, ok := os.LookupEnv("GOMAXPROCS")
@@ -242,11 +247,11 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeo
workerNum = 10
}
}
- l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout)
- glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
+ l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout, diskId)
+ glog.V(0).Infof("Store started on dir: %s with %d volumes max %d (disk ID: %d)", l.Directory, len(l.volumes), l.MaxVolumeCount, diskId)
l.loadAllEcShards()
- glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
+ glog.V(0).Infof("Store started on dir: %s with %d ec shards (disk ID: %d)", l.Directory, len(l.ecVolumes), diskId)
}
@@ -310,9 +315,9 @@ func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId, onlyEmpty bool) (fo
return
}
-func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
+func (l *DiskLocation) LoadVolume(diskId uint32, vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
if fileInfo, found := l.LocateVolume(vid); found {
- return l.loadExistingVolume(fileInfo, needleMapKind, false, 0)
+ return l.loadExistingVolume(fileInfo, needleMapKind, false, 0, diskId)
}
return false
}
diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go
index 228faf640..33bc4ac7d 100644
--- a/weed/storage/erasure_coding/ec_volume.go
+++ b/weed/storage/erasure_coding/ec_volume.go
@@ -196,7 +196,22 @@ func (ev *EcVolume) ShardIdList() (shardIds []ShardId) {
return
}
-func (ev *EcVolume) ToVolumeEcShardInformationMessage() (messages []*master_pb.VolumeEcShardInformationMessage) {
+type ShardInfo struct {
+ ShardId ShardId
+ Size int64
+}
+
+func (ev *EcVolume) ShardDetails() (shards []ShardInfo) {
+ for _, s := range ev.Shards {
+ shards = append(shards, ShardInfo{
+ ShardId: s.ShardId,
+ Size: s.Size(),
+ })
+ }
+ return
+}
+
+func (ev *EcVolume) ToVolumeEcShardInformationMessage(diskId uint32) (messages []*master_pb.VolumeEcShardInformationMessage) {
prevVolumeId := needle.VolumeId(math.MaxUint32)
var m *master_pb.VolumeEcShardInformationMessage
for _, s := range ev.Shards {
@@ -206,6 +221,7 @@ func (ev *EcVolume) ToVolumeEcShardInformationMessage() (messages []*master_pb.V
Collection: s.Collection,
DiskType: string(ev.diskType),
ExpireAtSec: ev.ExpireAtSec,
+ DiskId: diskId,
}
messages = append(messages, m)
}
diff --git a/weed/storage/erasure_coding/ec_volume_info.go b/weed/storage/erasure_coding/ec_volume_info.go
index f464a6d3d..787910b0c 100644
--- a/weed/storage/erasure_coding/ec_volume_info.go
+++ b/weed/storage/erasure_coding/ec_volume_info.go
@@ -11,15 +11,17 @@ type EcVolumeInfo struct {
Collection string
ShardBits ShardBits
DiskType string
- ExpireAtSec uint64 //ec volume destroy time, calculated from the ec volume was created
+ DiskId uint32 // ID of the disk this EC volume is on
+ ExpireAtSec uint64 // ec volume destroy time, calculated from the ec volume was created
}
-func NewEcVolumeInfo(diskType string, collection string, vid needle.VolumeId, shardBits ShardBits, expireAtSec uint64) *EcVolumeInfo {
+func NewEcVolumeInfo(diskType string, collection string, vid needle.VolumeId, shardBits ShardBits, expireAtSec uint64, diskId uint32) *EcVolumeInfo {
return &EcVolumeInfo{
Collection: collection,
VolumeId: vid,
ShardBits: shardBits,
DiskType: diskType,
+ DiskId: diskId,
ExpireAtSec: expireAtSec,
}
}
@@ -62,6 +64,7 @@ func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb.
Collection: ecInfo.Collection,
DiskType: ecInfo.DiskType,
ExpireAtSec: ecInfo.ExpireAtSec,
+ DiskId: ecInfo.DiskId,
}
}
diff --git a/weed/storage/store.go b/weed/storage/store.go
index 69bb5bc3b..2d9707571 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -91,11 +91,12 @@ func NewStore(grpcDialOption grpc.DialOption, ip string, port int, grpcPort int,
s.Locations = append(s.Locations, location)
stats.VolumeServerMaxVolumeCounter.Add(float64(maxVolumeCounts[i]))
+ diskId := uint32(i) // Track disk ID
wg.Add(1)
- go func() {
+ go func(id uint32, diskLoc *DiskLocation) {
defer wg.Done()
- location.loadExistingVolumes(needleMapKind, ldbTimeout)
- }()
+ diskLoc.loadExistingVolumesWithId(needleMapKind, ldbTimeout, id)
+ }(diskId, location)
}
wg.Wait()
@@ -163,14 +164,25 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind
if s.findVolume(vid) != nil {
return fmt.Errorf("Volume Id %d already exists!", vid)
}
- if location := s.FindFreeLocation(func(location *DiskLocation) bool {
- return location.DiskType == diskType
- }); location != nil {
- glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
- location.Directory, vid, collection, replicaPlacement, ttl)
+
+ // Find location and its index
+ var location *DiskLocation
+ var diskId uint32
+ for i, loc := range s.Locations {
+ if loc.DiskType == diskType && s.hasFreeDiskLocation(loc) {
+ location = loc
+ diskId = uint32(i)
+ break
+ }
+ }
+
+ if location != nil {
+ glog.V(0).Infof("In dir %s (disk ID %d) adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
+ location.Directory, diskId, vid, collection, replicaPlacement, ttl)
if volume, err := NewVolume(location.Directory, location.IdxDirectory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, ver, memoryMapMaxSizeMb, ldbTimeout); err == nil {
+ volume.diskId = diskId // Set the disk ID
location.SetVolume(vid, volume)
- glog.V(0).Infof("add volume %d", vid)
+ glog.V(0).Infof("add volume %d on disk ID %d", vid, diskId)
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
Id: uint32(vid),
Collection: collection,
@@ -178,6 +190,7 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind
Version: uint32(volume.Version()),
Ttl: ttl.ToUint32(),
DiskType: string(diskType),
+ DiskId: diskId,
}
return nil
} else {
@@ -187,6 +200,11 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind
return fmt.Errorf("No more free space left")
}
+// hasFreeDiskLocation checks if a disk location has free space
+func (s *Store) hasFreeDiskLocation(location *DiskLocation) bool {
+ return int64(location.VolumesLen()) < int64(location.MaxVolumeCount)
+}
+
func (s *Store) VolumeInfos() (allStats []*VolumeInfo) {
for _, location := range s.Locations {
stats := collectStatsForOneLocation(location)
@@ -218,21 +236,10 @@ func collectStatForOneVolume(vid needle.VolumeId, v *Volume) (s *VolumeInfo) {
Ttl: v.Ttl,
CompactRevision: uint32(v.CompactionRevision),
DiskType: v.DiskType().String(),
+ DiskId: v.diskId,
}
s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey()
-
- v.dataFileAccessLock.RLock()
- defer v.dataFileAccessLock.RUnlock()
-
- if v.nm == nil {
- return
- }
-
- s.FileCount = v.nm.FileCount()
- s.DeleteCount = v.nm.DeletedCount()
- s.DeletedByteCount = v.nm.DeletedSize()
- s.Size = v.nm.ContentSize()
-
+ s.Size, _, _ = v.FileStat()
return
}
@@ -384,7 +391,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
}
func (s *Store) deleteExpiredEcVolumes() (ecShards, deleted []*master_pb.VolumeEcShardInformationMessage) {
- for _, location := range s.Locations {
+ for diskId, location := range s.Locations {
// Collect ecVolume to be deleted
var toDeleteEvs []*erasure_coding.EcVolume
location.ecVolumesLock.RLock()
@@ -392,7 +399,7 @@ func (s *Store) deleteExpiredEcVolumes() (ecShards, deleted []*master_pb.VolumeE
if ev.IsTimeToDestroy() {
toDeleteEvs = append(toDeleteEvs, ev)
} else {
- messages := ev.ToVolumeEcShardInformationMessage()
+ messages := ev.ToVolumeEcShardInformationMessage(uint32(diskId))
ecShards = append(ecShards, messages...)
}
}
@@ -400,7 +407,7 @@ func (s *Store) deleteExpiredEcVolumes() (ecShards, deleted []*master_pb.VolumeE
// Delete expired volumes
for _, ev := range toDeleteEvs {
- messages := ev.ToVolumeEcShardInformationMessage()
+ messages := ev.ToVolumeEcShardInformationMessage(uint32(diskId))
// deleteEcVolumeById has its own lock
err := location.deleteEcVolumeById(ev.VolumeId)
if err != nil {
@@ -515,10 +522,11 @@ func (s *Store) MarkVolumeWritable(i needle.VolumeId) error {
}
func (s *Store) MountVolume(i needle.VolumeId) error {
- for _, location := range s.Locations {
- if found := location.LoadVolume(i, s.NeedleMapKind); found == true {
+ for diskId, location := range s.Locations {
+ if found := location.LoadVolume(uint32(diskId), i, s.NeedleMapKind); found == true {
glog.V(0).Infof("mount volume %d", i)
v := s.findVolume(i)
+ v.diskId = uint32(diskId) // Set disk ID when mounting
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
Id: uint32(v.Id),
Collection: v.Collection,
@@ -526,6 +534,7 @@ func (s *Store) MountVolume(i needle.VolumeId) error {
Version: uint32(v.Version()),
Ttl: v.Ttl.ToUint32(),
DiskType: string(v.location.DiskType),
+ DiskId: uint32(diskId),
}
return nil
}
@@ -546,6 +555,7 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error {
Version: uint32(v.Version()),
Ttl: v.Ttl.ToUint32(),
DiskType: string(v.location.DiskType),
+ DiskId: v.diskId,
}
for _, location := range s.Locations {
@@ -574,6 +584,7 @@ func (s *Store) DeleteVolume(i needle.VolumeId, onlyEmpty bool) error {
Version: uint32(v.Version()),
Ttl: v.Ttl.ToUint32(),
DiskType: string(v.location.DiskType),
+ DiskId: v.diskId,
}
for _, location := range s.Locations {
err := location.DeleteVolume(i, onlyEmpty)
diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go
index a915e1dbd..0126ad9d4 100644
--- a/weed/storage/store_ec.go
+++ b/weed/storage/store_ec.go
@@ -25,10 +25,10 @@ import (
func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat {
var ecShardMessages []*master_pb.VolumeEcShardInformationMessage
collectionEcShardSize := make(map[string]int64)
- for _, location := range s.Locations {
+ for diskId, location := range s.Locations {
location.ecVolumesLock.RLock()
for _, ecShards := range location.ecVolumes {
- ecShardMessages = append(ecShardMessages, ecShards.ToVolumeEcShardInformationMessage()...)
+ ecShardMessages = append(ecShardMessages, ecShards.ToVolumeEcShardInformationMessage(uint32(diskId))...)
for _, ecShard := range ecShards.Shards {
collectionEcShardSize[ecShards.Collection] += ecShard.Size()
@@ -49,9 +49,9 @@ func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat {
}
func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) error {
- for _, location := range s.Locations {
+ for diskId, location := range s.Locations {
if ecVolume, err := location.LoadEcShard(collection, vid, shardId); err == nil {
- glog.V(0).Infof("MountEcShards %d.%d", vid, shardId)
+ glog.V(0).Infof("MountEcShards %d.%d on disk ID %d", vid, shardId, diskId)
var shardBits erasure_coding.ShardBits
@@ -61,6 +61,7 @@ func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId er
EcIndexBits: uint32(shardBits.AddShardId(shardId)),
DiskType: string(location.DiskType),
ExpireAtSec: ecVolume.ExpireAtSec,
+ DiskId: uint32(diskId),
}
return nil
} else if err == os.ErrNotExist {
@@ -75,7 +76,7 @@ func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId er
func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.ShardId) error {
- ecShard, found := s.findEcShard(vid, shardId)
+ diskId, ecShard, found := s.findEcShard(vid, shardId)
if !found {
return nil
}
@@ -86,26 +87,27 @@ func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.Shar
Collection: ecShard.Collection,
EcIndexBits: uint32(shardBits.AddShardId(shardId)),
DiskType: string(ecShard.DiskType),
+ DiskId: diskId,
}
- for _, location := range s.Locations {
- if deleted := location.UnloadEcShard(vid, shardId); deleted {
- glog.V(0).Infof("UnmountEcShards %d.%d", vid, shardId)
- s.DeletedEcShardsChan <- message
- return nil
- }
+ location := s.Locations[diskId]
+
+ if deleted := location.UnloadEcShard(vid, shardId); deleted {
+ glog.V(0).Infof("UnmountEcShards %d.%d", vid, shardId)
+ s.DeletedEcShardsChan <- message
+ return nil
}
return fmt.Errorf("UnmountEcShards %d.%d not found on disk", vid, shardId)
}
-func (s *Store) findEcShard(vid needle.VolumeId, shardId erasure_coding.ShardId) (*erasure_coding.EcVolumeShard, bool) {
- for _, location := range s.Locations {
+func (s *Store) findEcShard(vid needle.VolumeId, shardId erasure_coding.ShardId) (diskId uint32, shard *erasure_coding.EcVolumeShard, found bool) {
+ for diskId, location := range s.Locations {
if v, found := location.FindEcShard(vid, shardId); found {
- return v, found
+ return uint32(diskId), v, found
}
}
- return nil, false
+ return 0, nil, false
}
func (s *Store) FindEcVolume(vid needle.VolumeId) (*erasure_coding.EcVolume, bool) {
diff --git a/weed/storage/volume.go b/weed/storage/volume.go
index b495b379d..dd8ecbdce 100644
--- a/weed/storage/volume.go
+++ b/weed/storage/volume.go
@@ -51,6 +51,7 @@ type Volume struct {
volumeInfoRWLock sync.RWMutex
volumeInfo *volume_server_pb.VolumeInfo
location *DiskLocation
+ diskId uint32 // ID of this volume's disk in Store.Locations array
lastIoError error
}
@@ -337,6 +338,7 @@ func (v *Volume) ToVolumeInformationMessage() (types.NeedleId, *master_pb.Volume
CompactRevision: uint32(v.SuperBlock.CompactionRevision),
ModifiedAtSecond: modTime.Unix(),
DiskType: string(v.location.DiskType),
+ DiskId: v.diskId,
}
volumeInfo.RemoteStorageName, volumeInfo.RemoteStorageKey = v.RemoteStorageNameKey()
diff --git a/weed/storage/volume_info.go b/weed/storage/volume_info.go
index 326b8f401..4d0a30658 100644
--- a/weed/storage/volume_info.go
+++ b/weed/storage/volume_info.go
@@ -15,6 +15,7 @@ type VolumeInfo struct {
ReplicaPlacement *super_block.ReplicaPlacement
Ttl *needle.TTL
DiskType string
+ DiskId uint32
Collection string
Version needle.Version
FileCount int
@@ -42,6 +43,7 @@ func NewVolumeInfo(m *master_pb.VolumeInformationMessage) (vi VolumeInfo, err er
RemoteStorageName: m.RemoteStorageName,
RemoteStorageKey: m.RemoteStorageKey,
DiskType: m.DiskType,
+ DiskId: m.DiskId,
}
rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement))
if e != nil {
@@ -94,6 +96,7 @@ func (vi VolumeInfo) ToVolumeInformationMessage() *master_pb.VolumeInformationMe
RemoteStorageName: vi.RemoteStorageName,
RemoteStorageKey: vi.RemoteStorageKey,
DiskType: vi.DiskType,
+ DiskId: vi.DiskId,
}
}
diff --git a/weed/topology/disk.go b/weed/topology/disk.go
index 80a4aaa2d..8ca25c244 100644
--- a/weed/topology/disk.go
+++ b/weed/topology/disk.go
@@ -246,6 +246,17 @@ func (d *Disk) FreeSpace() int64 {
func (d *Disk) ToDiskInfo() *master_pb.DiskInfo {
diskUsage := d.diskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id())))
+
+ // Get disk ID from first volume or EC shard
+ var diskId uint32
+ volumes := d.GetVolumes()
+ ecShards := d.GetEcShards()
+ if len(volumes) > 0 {
+ diskId = volumes[0].DiskId
+ } else if len(ecShards) > 0 {
+ diskId = ecShards[0].DiskId
+ }
+
m := &master_pb.DiskInfo{
Type: string(d.Id()),
VolumeCount: diskUsage.volumeCount,
@@ -253,11 +264,12 @@ func (d *Disk) ToDiskInfo() *master_pb.DiskInfo {
FreeVolumeCount: diskUsage.maxVolumeCount - (diskUsage.volumeCount - diskUsage.remoteVolumeCount) - (diskUsage.ecShardCount+1)/erasure_coding.DataShardsCount,
ActiveVolumeCount: diskUsage.activeVolumeCount,
RemoteVolumeCount: diskUsage.remoteVolumeCount,
+ DiskId: diskId,
}
- for _, v := range d.GetVolumes() {
+ for _, v := range volumes {
m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage())
}
- for _, ecv := range d.GetEcShards() {
+ for _, ecv := range ecShards {
m.EcShardInfos = append(m.EcShardInfos, ecv.ToVolumeEcShardInformationMessage())
}
return m
diff --git a/weed/topology/topology_ec.go b/weed/topology/topology_ec.go
index 53762b49a..0ad028625 100644
--- a/weed/topology/topology_ec.go
+++ b/weed/topology/topology_ec.go
@@ -23,7 +23,8 @@ func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInf
shardInfo.Collection,
needle.VolumeId(shardInfo.Id),
erasure_coding.ShardBits(shardInfo.EcIndexBits),
- shardInfo.ExpireAtSec))
+ shardInfo.ExpireAtSec,
+ shardInfo.DiskId))
}
// find out the delta volumes
newShards, deletedShards = dn.UpdateEcShards(shards)
@@ -45,7 +46,9 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
shardInfo.DiskType,
shardInfo.Collection,
needle.VolumeId(shardInfo.Id),
- erasure_coding.ShardBits(shardInfo.EcIndexBits), shardInfo.ExpireAtSec))
+ erasure_coding.ShardBits(shardInfo.EcIndexBits),
+ shardInfo.ExpireAtSec,
+ shardInfo.DiskId))
}
for _, shardInfo := range deletedEcShards {
deletedShards = append(deletedShards,
@@ -53,7 +56,9 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
shardInfo.DiskType,
shardInfo.Collection,
needle.VolumeId(shardInfo.Id),
- erasure_coding.ShardBits(shardInfo.EcIndexBits), shardInfo.ExpireAtSec))
+ erasure_coding.ShardBits(shardInfo.EcIndexBits),
+ shardInfo.ExpireAtSec,
+ shardInfo.DiskId))
}
dn.DeltaUpdateEcShards(newShards, deletedShards)
diff --git a/weed/worker/client.go b/weed/worker/client.go
index 60b33fb31..53854c6e3 100644
--- a/weed/worker/client.go
+++ b/weed/worker/client.go
@@ -80,6 +80,21 @@ func (c *GrpcAdminClient) Connect() error {
return fmt.Errorf("already connected")
}
+ // Always start the reconnection loop, even if initial connection fails
+ go c.reconnectionLoop()
+
+ // Attempt initial connection
+ err := c.attemptConnection()
+ if err != nil {
+ glog.V(1).Infof("Initial connection failed, reconnection loop will retry: %v", err)
+ return err
+ }
+
+ return nil
+}
+
+// attemptConnection tries to establish the connection without managing the reconnection loop
+func (c *GrpcAdminClient) attemptConnection() error {
// Detect TLS support and create appropriate connection
conn, err := c.createConnection()
if err != nil {
@@ -100,10 +115,34 @@ func (c *GrpcAdminClient) Connect() error {
c.stream = stream
c.connected = true
- // Start stream handlers and reconnection loop
- go c.handleOutgoing()
- go c.handleIncoming()
- go c.reconnectionLoop()
+ // Always check for worker info and send registration immediately as the very first message
+ c.mutex.RLock()
+ workerInfo := c.lastWorkerInfo
+ c.mutex.RUnlock()
+
+ if workerInfo != nil {
+ // Send registration synchronously as the very first message
+ if err := c.sendRegistrationSync(workerInfo); err != nil {
+ c.conn.Close()
+ c.connected = false
+ return fmt.Errorf("failed to register worker: %w", err)
+ }
+ glog.Infof("Worker registered successfully with admin server")
+ } else {
+ // No worker info yet - stream will wait for registration
+ glog.V(1).Infof("Connected to admin server, waiting for worker registration info")
+ }
+
+ // Start stream handlers with synchronization
+ outgoingReady := make(chan struct{})
+ incomingReady := make(chan struct{})
+
+ go c.handleOutgoingWithReady(outgoingReady)
+ go c.handleIncomingWithReady(incomingReady)
+
+ // Wait for both handlers to be ready
+ <-outgoingReady
+ <-incomingReady
glog.Infof("Connected to admin server at %s", c.adminAddress)
return nil
@@ -268,53 +307,16 @@ func (c *GrpcAdminClient) reconnect() error {
if c.conn != nil {
c.conn.Close()
}
+ c.connected = false
c.mutex.Unlock()
- // Create new connection
- conn, err := c.createConnection()
+ // Attempt to re-establish connection using the same logic as initial connection
+ err := c.attemptConnection()
if err != nil {
- return fmt.Errorf("failed to create connection: %w", err)
- }
-
- client := worker_pb.NewWorkerServiceClient(conn)
-
- // Create new stream
- streamCtx, streamCancel := context.WithCancel(context.Background())
- stream, err := client.WorkerStream(streamCtx)
- if err != nil {
- conn.Close()
- streamCancel()
- return fmt.Errorf("failed to create stream: %w", err)
- }
-
- // Update client state
- c.mutex.Lock()
- c.conn = conn
- c.client = client
- c.stream = stream
- c.streamCtx = streamCtx
- c.streamCancel = streamCancel
- c.connected = true
- c.mutex.Unlock()
-
- // Restart stream handlers
- go c.handleOutgoing()
- go c.handleIncoming()
-
- // Re-register worker if we have previous registration info
- c.mutex.RLock()
- workerInfo := c.lastWorkerInfo
- c.mutex.RUnlock()
-
- if workerInfo != nil {
- glog.Infof("Re-registering worker after reconnection...")
- if err := c.sendRegistration(workerInfo); err != nil {
- glog.Errorf("Failed to re-register worker: %v", err)
- // Don't fail the reconnection because of registration failure
- // The registration will be retried on next heartbeat or operation
- }
+ return fmt.Errorf("failed to reconnect: %w", err)
}
+ // Registration is now handled in attemptConnection if worker info is available
return nil
}
@@ -340,8 +342,19 @@ func (c *GrpcAdminClient) handleOutgoing() {
}
}
+// handleOutgoingWithReady processes outgoing messages and signals when ready
+func (c *GrpcAdminClient) handleOutgoingWithReady(ready chan struct{}) {
+ // Signal that this handler is ready to process messages
+ close(ready)
+
+ // Now process messages normally
+ c.handleOutgoing()
+}
+
// handleIncoming processes incoming messages from admin
func (c *GrpcAdminClient) handleIncoming() {
+ glog.V(1).Infof("📡 INCOMING HANDLER STARTED: Worker %s incoming message handler started", c.workerID)
+
for {
c.mutex.RLock()
connected := c.connected
@@ -349,15 +362,17 @@ func (c *GrpcAdminClient) handleIncoming() {
c.mutex.RUnlock()
if !connected {
+ glog.V(1).Infof("🔌 INCOMING HANDLER STOPPED: Worker %s stopping incoming handler - not connected", c.workerID)
break
}
+ glog.V(4).Infof("👂 LISTENING: Worker %s waiting for message from admin server", c.workerID)
msg, err := stream.Recv()
if err != nil {
if err == io.EOF {
- glog.Infof("Admin server closed the stream")
+ glog.Infof("🔚 STREAM CLOSED: Worker %s admin server closed the stream", c.workerID)
} else {
- glog.Errorf("Failed to receive message from admin: %v", err)
+ glog.Errorf("❌ RECEIVE ERROR: Worker %s failed to receive message from admin: %v", c.workerID, err)
}
c.mutex.Lock()
c.connected = false
@@ -365,26 +380,42 @@ func (c *GrpcAdminClient) handleIncoming() {
break
}
+ glog.V(4).Infof("📨 MESSAGE RECEIVED: Worker %s received message from admin server: %T", c.workerID, msg.Message)
+
// Route message to waiting goroutines or general handler
select {
case c.incoming <- msg:
+ glog.V(3).Infof("✅ MESSAGE ROUTED: Worker %s successfully routed message to handler", c.workerID)
case <-time.After(time.Second):
- glog.Warningf("Incoming message buffer full, dropping message")
+ glog.Warningf("🚫 MESSAGE DROPPED: Worker %s incoming message buffer full, dropping message: %T", c.workerID, msg.Message)
}
}
+
+ glog.V(1).Infof("🏁 INCOMING HANDLER FINISHED: Worker %s incoming message handler finished", c.workerID)
+}
+
+// handleIncomingWithReady processes incoming messages and signals when ready
+func (c *GrpcAdminClient) handleIncomingWithReady(ready chan struct{}) {
+ // Signal that this handler is ready to process messages
+ close(ready)
+
+ // Now process messages normally
+ c.handleIncoming()
}
// RegisterWorker registers the worker with the admin server
func (c *GrpcAdminClient) RegisterWorker(worker *types.Worker) error {
- if !c.connected {
- return fmt.Errorf("not connected to admin server")
- }
-
// Store worker info for re-registration after reconnection
c.mutex.Lock()
c.lastWorkerInfo = worker
c.mutex.Unlock()
+ // If not connected, registration will happen when connection is established
+ if !c.connected {
+ glog.V(1).Infof("Not connected yet, worker info stored for registration upon connection")
+ return nil
+ }
+
return c.sendRegistration(worker)
}
@@ -435,9 +466,88 @@ func (c *GrpcAdminClient) sendRegistration(worker *types.Worker) error {
}
}
+// sendRegistrationSync sends the registration message synchronously
+func (c *GrpcAdminClient) sendRegistrationSync(worker *types.Worker) error {
+ capabilities := make([]string, len(worker.Capabilities))
+ for i, cap := range worker.Capabilities {
+ capabilities[i] = string(cap)
+ }
+
+ msg := &worker_pb.WorkerMessage{
+ WorkerId: c.workerID,
+ Timestamp: time.Now().Unix(),
+ Message: &worker_pb.WorkerMessage_Registration{
+ Registration: &worker_pb.WorkerRegistration{
+ WorkerId: c.workerID,
+ Address: worker.Address,
+ Capabilities: capabilities,
+ MaxConcurrent: int32(worker.MaxConcurrent),
+ Metadata: make(map[string]string),
+ },
+ },
+ }
+
+ // Send directly to stream to ensure it's the first message
+ if err := c.stream.Send(msg); err != nil {
+ return fmt.Errorf("failed to send registration message: %w", err)
+ }
+
+ // Create a channel to receive the response
+ responseChan := make(chan *worker_pb.AdminMessage, 1)
+ errChan := make(chan error, 1)
+
+ // Start a goroutine to listen for the response
+ go func() {
+ for {
+ response, err := c.stream.Recv()
+ if err != nil {
+ errChan <- fmt.Errorf("failed to receive registration response: %w", err)
+ return
+ }
+
+ if regResp := response.GetRegistrationResponse(); regResp != nil {
+ responseChan <- response
+ return
+ }
+ // Continue waiting if it's not a registration response
+ }
+ }()
+
+ // Wait for registration response with timeout
+ timeout := time.NewTimer(10 * time.Second)
+ defer timeout.Stop()
+
+ select {
+ case response := <-responseChan:
+ if regResp := response.GetRegistrationResponse(); regResp != nil {
+ if regResp.Success {
+ glog.V(1).Infof("Worker registered successfully: %s", regResp.Message)
+ return nil
+ }
+ return fmt.Errorf("registration failed: %s", regResp.Message)
+ }
+ return fmt.Errorf("unexpected response type")
+ case err := <-errChan:
+ return err
+ case <-timeout.C:
+ return fmt.Errorf("registration timeout")
+ }
+}
+
// SendHeartbeat sends heartbeat to admin server
func (c *GrpcAdminClient) SendHeartbeat(workerID string, status *types.WorkerStatus) error {
if !c.connected {
+ // If we're currently reconnecting, don't wait - just skip the heartbeat
+ c.mutex.RLock()
+ reconnecting := c.reconnecting
+ c.mutex.RUnlock()
+
+ if reconnecting {
+ // Don't treat as an error - reconnection is in progress
+ glog.V(2).Infof("Skipping heartbeat during reconnection")
+ return nil
+ }
+
// Wait for reconnection for a short time
if err := c.waitForConnection(10 * time.Second); err != nil {
return fmt.Errorf("not connected to admin server: %w", err)
@@ -477,6 +587,17 @@ func (c *GrpcAdminClient) SendHeartbeat(workerID string, status *types.WorkerSta
// RequestTask requests a new task from admin server
func (c *GrpcAdminClient) RequestTask(workerID string, capabilities []types.TaskType) (*types.Task, error) {
if !c.connected {
+ // If we're currently reconnecting, don't wait - just return no task
+ c.mutex.RLock()
+ reconnecting := c.reconnecting
+ c.mutex.RUnlock()
+
+ if reconnecting {
+ // Don't treat as an error - reconnection is in progress
+ glog.V(2).Infof("🔄 RECONNECTING: Worker %s skipping task request during reconnection", workerID)
+ return nil, nil
+ }
+
// Wait for reconnection for a short time
if err := c.waitForConnection(5 * time.Second); err != nil {
return nil, fmt.Errorf("not connected to admin server: %w", err)
@@ -488,6 +609,9 @@ func (c *GrpcAdminClient) RequestTask(workerID string, capabilities []types.Task
caps[i] = string(cap)
}
+ glog.V(3).Infof("📤 SENDING TASK REQUEST: Worker %s sending task request to admin server with capabilities: %v",
+ workerID, capabilities)
+
msg := &worker_pb.WorkerMessage{
WorkerId: c.workerID,
Timestamp: time.Now().Unix(),
@@ -502,23 +626,24 @@ func (c *GrpcAdminClient) RequestTask(workerID string, capabilities []types.Task
select {
case c.outgoing <- msg:
+ glog.V(3).Infof("✅ TASK REQUEST SENT: Worker %s successfully sent task request to admin server", workerID)
case <-time.After(time.Second):
+ glog.Errorf("❌ TASK REQUEST TIMEOUT: Worker %s failed to send task request: timeout", workerID)
return nil, fmt.Errorf("failed to send task request: timeout")
}
// Wait for task assignment
+ glog.V(3).Infof("⏳ WAITING FOR RESPONSE: Worker %s waiting for task assignment response (5s timeout)", workerID)
timeout := time.NewTimer(5 * time.Second)
defer timeout.Stop()
for {
select {
case response := <-c.incoming:
+ glog.V(3).Infof("📨 RESPONSE RECEIVED: Worker %s received response from admin server: %T", workerID, response.Message)
if taskAssign := response.GetTaskAssignment(); taskAssign != nil {
- // Convert parameters map[string]string to map[string]interface{}
- parameters := make(map[string]interface{})
- for k, v := range taskAssign.Params.Parameters {
- parameters[k] = v
- }
+ glog.V(1).Infof("Worker %s received task assignment in response: %s (type: %s, volume: %d)",
+ workerID, taskAssign.TaskId, taskAssign.TaskType, taskAssign.Params.VolumeId)
// Convert to our task type
task := &types.Task{
@@ -530,11 +655,15 @@ func (c *GrpcAdminClient) RequestTask(workerID string, capabilities []types.Task
Collection: taskAssign.Params.Collection,
Priority: types.TaskPriority(taskAssign.Priority),
CreatedAt: time.Unix(taskAssign.CreatedTime, 0),
- Parameters: parameters,
+ // Use typed protobuf parameters directly
+ TypedParams: taskAssign.Params,
}
return task, nil
+ } else {
+ glog.V(3).Infof("📭 NON-TASK RESPONSE: Worker %s received non-task response: %T", workerID, response.Message)
}
case <-timeout.C:
+ glog.V(3).Infof("⏰ TASK REQUEST TIMEOUT: Worker %s - no task assignment received within 5 seconds", workerID)
return nil, nil // No task available
}
}
@@ -542,24 +671,47 @@ func (c *GrpcAdminClient) RequestTask(workerID string, capabilities []types.Task
// CompleteTask reports task completion to admin server
func (c *GrpcAdminClient) CompleteTask(taskID string, success bool, errorMsg string) error {
+ return c.CompleteTaskWithMetadata(taskID, success, errorMsg, nil)
+}
+
+// CompleteTaskWithMetadata reports task completion with additional metadata
+func (c *GrpcAdminClient) CompleteTaskWithMetadata(taskID string, success bool, errorMsg string, metadata map[string]string) error {
if !c.connected {
+ // If we're currently reconnecting, don't wait - just skip the completion report
+ c.mutex.RLock()
+ reconnecting := c.reconnecting
+ c.mutex.RUnlock()
+
+ if reconnecting {
+ // Don't treat as an error - reconnection is in progress
+ glog.V(2).Infof("Skipping task completion report during reconnection for task %s", taskID)
+ return nil
+ }
+
// Wait for reconnection for a short time
if err := c.waitForConnection(5 * time.Second); err != nil {
return fmt.Errorf("not connected to admin server: %w", err)
}
}
+ taskComplete := &worker_pb.TaskComplete{
+ TaskId: taskID,
+ WorkerId: c.workerID,
+ Success: success,
+ ErrorMessage: errorMsg,
+ CompletionTime: time.Now().Unix(),
+ }
+
+ // Add metadata if provided
+ if metadata != nil {
+ taskComplete.ResultMetadata = metadata
+ }
+
msg := &worker_pb.WorkerMessage{
WorkerId: c.workerID,
Timestamp: time.Now().Unix(),
Message: &worker_pb.WorkerMessage_TaskComplete{
- TaskComplete: &worker_pb.TaskComplete{
- TaskId: taskID,
- WorkerId: c.workerID,
- Success: success,
- ErrorMessage: errorMsg,
- CompletionTime: time.Now().Unix(),
- },
+ TaskComplete: taskComplete,
},
}
@@ -574,6 +726,17 @@ func (c *GrpcAdminClient) CompleteTask(taskID string, success bool, errorMsg str
// UpdateTaskProgress updates task progress to admin server
func (c *GrpcAdminClient) UpdateTaskProgress(taskID string, progress float64) error {
if !c.connected {
+ // If we're currently reconnecting, don't wait - just skip the progress update
+ c.mutex.RLock()
+ reconnecting := c.reconnecting
+ c.mutex.RUnlock()
+
+ if reconnecting {
+ // Don't treat as an error - reconnection is in progress
+ glog.V(2).Infof("Skipping task progress update during reconnection for task %s", taskID)
+ return nil
+ }
+
// Wait for reconnection for a short time
if err := c.waitForConnection(5 * time.Second); err != nil {
return fmt.Errorf("not connected to admin server: %w", err)
@@ -663,6 +826,12 @@ func (c *GrpcAdminClient) waitForConnection(timeout time.Duration) error {
return fmt.Errorf("timeout waiting for connection")
}
+// GetIncomingChannel returns the incoming message channel for message processing
+// This allows the worker to process admin messages directly
+func (c *GrpcAdminClient) GetIncomingChannel() <-chan *worker_pb.AdminMessage {
+ return c.incoming
+}
+
// MockAdminClient provides a mock implementation for testing
type MockAdminClient struct {
workerID string
@@ -741,6 +910,12 @@ func (m *MockAdminClient) UpdateTaskProgress(taskID string, progress float64) er
return nil
}
+// CompleteTaskWithMetadata mock implementation
+func (m *MockAdminClient) CompleteTaskWithMetadata(taskID string, success bool, errorMsg string, metadata map[string]string) error {
+ glog.Infof("Mock: Task %s completed: success=%v, error=%s, metadata=%v", taskID, success, errorMsg, metadata)
+ return nil
+}
+
// IsConnected mock implementation
func (m *MockAdminClient) IsConnected() bool {
m.mutex.RLock()
diff --git a/weed/worker/client_test.go b/weed/worker/client_test.go
deleted file mode 100644
index c57ea0240..000000000
--- a/weed/worker/client_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package worker
-
-import (
- "context"
- "testing"
-
- "github.com/seaweedfs/seaweedfs/weed/pb"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
-)
-
-func TestGrpcConnection(t *testing.T) {
- // Test that we can create a gRPC connection with insecure credentials
- // This tests the connection setup without requiring a running server
- adminAddress := "localhost:33646" // gRPC port for admin server on port 23646
-
- // This should not fail with transport security errors
- conn, err := pb.GrpcDial(context.Background(), adminAddress, false, grpc.WithTransportCredentials(insecure.NewCredentials()))
- if err != nil {
- // Connection failure is expected when no server is running
- // But it should NOT be a transport security error
- if err.Error() == "grpc: no transport security set" {
- t.Fatalf("Transport security error should not occur with insecure credentials: %v", err)
- }
- t.Logf("Connection failed as expected (no server running): %v", err)
- } else {
- // If connection succeeds, clean up
- conn.Close()
- t.Log("Connection succeeded")
- }
-}
-
-func TestGrpcAdminClient_Connect(t *testing.T) {
- // Test that the GrpcAdminClient can be created and attempt connection
- dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
- client := NewGrpcAdminClient("localhost:23646", "test-worker", dialOption)
-
- // This should not fail with transport security errors
- err := client.Connect()
- if err != nil {
- // Connection failure is expected when no server is running
- // But it should NOT be a transport security error
- if err.Error() == "grpc: no transport security set" {
- t.Fatalf("Transport security error should not occur with insecure credentials: %v", err)
- }
- t.Logf("Connection failed as expected (no server running): %v", err)
- } else {
- // If connection succeeds, clean up
- client.Disconnect()
- t.Log("Connection succeeded")
- }
-}
-
-func TestAdminAddressToGrpcAddress(t *testing.T) {
- tests := []struct {
- adminAddress string
- expected string
- }{
- {"localhost:9333", "localhost:19333"},
- {"localhost:23646", "localhost:33646"},
- {"admin.example.com:9333", "admin.example.com:19333"},
- {"127.0.0.1:8080", "127.0.0.1:18080"},
- }
-
- for _, test := range tests {
- dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
- client := NewGrpcAdminClient(test.adminAddress, "test-worker", dialOption)
- result := client.adminAddress
- if result != test.expected {
- t.Errorf("For admin address %s, expected gRPC address %s, got %s",
- test.adminAddress, test.expected, result)
- }
- }
-}
-
-func TestMockAdminClient(t *testing.T) {
- // Test that the mock client works correctly
- client := NewMockAdminClient()
-
- // Should be able to connect/disconnect without errors
- err := client.Connect()
- if err != nil {
- t.Fatalf("Mock client connect failed: %v", err)
- }
-
- if !client.IsConnected() {
- t.Error("Mock client should be connected")
- }
-
- err = client.Disconnect()
- if err != nil {
- t.Fatalf("Mock client disconnect failed: %v", err)
- }
-
- if client.IsConnected() {
- t.Error("Mock client should be disconnected")
- }
-}
-
-func TestCreateAdminClient(t *testing.T) {
- // Test client creation
- dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
- client, err := CreateAdminClient("localhost:9333", "test-worker", dialOption)
- if err != nil {
- t.Fatalf("Failed to create admin client: %v", err)
- }
-
- if client == nil {
- t.Fatal("Client should not be nil")
- }
-}
diff --git a/weed/worker/client_tls_test.go b/weed/worker/client_tls_test.go
deleted file mode 100644
index d95d5f4f5..000000000
--- a/weed/worker/client_tls_test.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package worker
-
-import (
- "strings"
- "testing"
- "time"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
-)
-
-func TestGrpcClientTLSDetection(t *testing.T) {
- // Test that the client can be created with a dial option
- dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
- client := NewGrpcAdminClient("localhost:33646", "test-worker", dialOption)
-
- // Test that the client has the correct dial option
- if client.dialOption == nil {
- t.Error("Client should have a dial option")
- }
-
- t.Logf("Client created successfully with dial option")
-}
-
-func TestCreateAdminClientGrpc(t *testing.T) {
- // Test client creation - admin server port gets transformed to gRPC port
- dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
- client, err := CreateAdminClient("localhost:23646", "test-worker", dialOption)
- if err != nil {
- t.Fatalf("Failed to create admin client: %v", err)
- }
-
- if client == nil {
- t.Fatal("Client should not be nil")
- }
-
- // Verify it's the correct type
- grpcClient, ok := client.(*GrpcAdminClient)
- if !ok {
- t.Fatal("Client should be GrpcAdminClient type")
- }
-
- // The admin address should be transformed to the gRPC port (HTTP + 10000)
- expectedAddress := "localhost:33646" // 23646 + 10000
- if grpcClient.adminAddress != expectedAddress {
- t.Errorf("Expected admin address %s, got %s", expectedAddress, grpcClient.adminAddress)
- }
-
- if grpcClient.workerID != "test-worker" {
- t.Errorf("Expected worker ID test-worker, got %s", grpcClient.workerID)
- }
-}
-
-func TestConnectionTimeouts(t *testing.T) {
- // Test that connections have proper timeouts
- // Use localhost with a port that's definitely closed
- dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
- client := NewGrpcAdminClient("localhost:1", "test-worker", dialOption) // Port 1 is reserved and won't be open
-
- // Test that the connection creation fails when actually trying to use it
- start := time.Now()
- err := client.Connect() // This should fail when trying to establish the stream
- duration := time.Since(start)
-
- if err == nil {
- t.Error("Expected connection to closed port to fail")
- } else {
- t.Logf("Connection failed as expected: %v", err)
- }
-
- // Should fail quickly but not too quickly
- if duration > 10*time.Second {
- t.Errorf("Connection attempt took too long: %v", duration)
- }
-}
-
-func TestConnectionWithDialOption(t *testing.T) {
- // Test that the connection uses the provided dial option
- dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
- client := NewGrpcAdminClient("localhost:1", "test-worker", dialOption) // Port 1 is reserved and won't be open
-
- // Test the actual connection
- err := client.Connect()
- if err == nil {
- t.Error("Expected connection to closed port to fail")
- client.Disconnect() // Clean up if it somehow succeeded
- } else {
- t.Logf("Connection failed as expected: %v", err)
- }
-
- // The error should indicate a connection failure
- if err != nil && err.Error() != "" {
- t.Logf("Connection error message: %s", err.Error())
- // The error should contain connection-related terms
- if !strings.Contains(err.Error(), "connection") && !strings.Contains(err.Error(), "dial") {
- t.Logf("Error message doesn't indicate connection issues: %s", err.Error())
- }
- }
-}
-
-func TestClientWithSecureDialOption(t *testing.T) {
- // Test that the client correctly uses a secure dial option
- // This would normally use LoadClientTLS, but for testing we'll use insecure
- dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
- client := NewGrpcAdminClient("localhost:33646", "test-worker", dialOption)
-
- if client.dialOption == nil {
- t.Error("Client should have a dial option")
- }
-
- t.Logf("Client created successfully with dial option")
-}
-
-func TestConnectionWithRealAddress(t *testing.T) {
- // Test connection behavior with a real address that doesn't support gRPC
- dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
- client := NewGrpcAdminClient("www.google.com:80", "test-worker", dialOption) // HTTP port, not gRPC
-
- err := client.Connect()
- if err == nil {
- t.Log("Connection succeeded unexpectedly")
- client.Disconnect()
- } else {
- t.Logf("Connection failed as expected: %v", err)
- }
-}
-
-func TestDialOptionUsage(t *testing.T) {
- // Test that the provided dial option is used for connections
- dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
- client := NewGrpcAdminClient("localhost:1", "test-worker", dialOption) // Port 1 won't support gRPC at all
-
- // Verify the dial option is stored
- if client.dialOption == nil {
- t.Error("Dial option should be stored in client")
- }
-
- // Test connection fails appropriately
- err := client.Connect()
- if err == nil {
- t.Error("Connection should fail to non-gRPC port")
- client.Disconnect()
- } else {
- t.Logf("Connection failed as expected: %v", err)
- }
-}
diff --git a/weed/worker/tasks/balance/balance.go b/weed/worker/tasks/balance/balance.go
index ea867d950..0becb3415 100644
--- a/weed/worker/tasks/balance/balance.go
+++ b/weed/worker/tasks/balance/balance.go
@@ -1,6 +1,7 @@
package balance
import (
+ "context"
"fmt"
"time"
@@ -15,6 +16,9 @@ type Task struct {
server string
volumeID uint32
collection string
+
+ // Task parameters for accessing planned destinations
+ taskParams types.TaskParams
}
// NewTask creates a new balance task instance
@@ -30,7 +34,31 @@ func NewTask(server string, volumeID uint32, collection string) *Task {
// Execute executes the balance task
func (t *Task) Execute(params types.TaskParams) error {
- glog.Infof("Starting balance task for volume %d on server %s (collection: %s)", t.volumeID, t.server, t.collection)
+ // Use BaseTask.ExecuteTask to handle logging initialization
+ return t.ExecuteTask(context.Background(), params, t.executeImpl)
+}
+
+// executeImpl is the actual balance implementation
+func (t *Task) executeImpl(ctx context.Context, params types.TaskParams) error {
+ // Store task parameters for accessing planned destinations
+ t.taskParams = params
+
+ // Get planned destination
+ destNode := t.getPlannedDestination()
+ if destNode != "" {
+ t.LogWithFields("INFO", "Starting balance task with planned destination", map[string]interface{}{
+ "volume_id": t.volumeID,
+ "source": t.server,
+ "destination": destNode,
+ "collection": t.collection,
+ })
+ } else {
+ t.LogWithFields("INFO", "Starting balance task without specific destination", map[string]interface{}{
+ "volume_id": t.volumeID,
+ "server": t.server,
+ "collection": t.collection,
+ })
+ }
// Simulate balance operation with progress updates
steps := []struct {
@@ -46,18 +74,36 @@ func (t *Task) Execute(params types.TaskParams) error {
}
for _, step := range steps {
+ select {
+ case <-ctx.Done():
+ t.LogWarning("Balance task cancelled during step: %s", step.name)
+ return ctx.Err()
+ default:
+ }
+
if t.IsCancelled() {
+ t.LogWarning("Balance task cancelled by request during step: %s", step.name)
return fmt.Errorf("balance task cancelled")
}
- glog.V(1).Infof("Balance task step: %s", step.name)
+ t.LogWithFields("INFO", "Executing balance step", map[string]interface{}{
+ "step": step.name,
+ "progress": step.progress,
+ "duration": step.duration.String(),
+ "volume_id": t.volumeID,
+ })
t.SetProgress(step.progress)
// Simulate work
time.Sleep(step.duration)
}
- glog.Infof("Balance task completed for volume %d on server %s", t.volumeID, t.server)
+ t.LogWithFields("INFO", "Balance task completed successfully", map[string]interface{}{
+ "volume_id": t.volumeID,
+ "server": t.server,
+ "collection": t.collection,
+ "final_progress": 100.0,
+ })
return nil
}
@@ -72,6 +118,19 @@ func (t *Task) Validate(params types.TaskParams) error {
return nil
}
+// getPlannedDestination extracts the planned destination node from task parameters
+func (t *Task) getPlannedDestination() string {
+ if t.taskParams.TypedParams != nil {
+ if balanceParams := t.taskParams.TypedParams.GetBalanceParams(); balanceParams != nil {
+ if balanceParams.DestNode != "" {
+ glog.V(2).Infof("Found planned destination for volume %d: %s", t.volumeID, balanceParams.DestNode)
+ return balanceParams.DestNode
+ }
+ }
+ }
+ return ""
+}
+
// EstimateTime estimates the time needed for the task
func (t *Task) EstimateTime(params types.TaskParams) time.Duration {
// Base time for balance operation
diff --git a/weed/worker/tasks/balance/balance_detector.go b/weed/worker/tasks/balance/balance_detector.go
deleted file mode 100644
index f082b7a77..000000000
--- a/weed/worker/tasks/balance/balance_detector.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package balance
-
-import (
- "fmt"
- "time"
-
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/worker/types"
-)
-
-// BalanceDetector implements TaskDetector for balance tasks
-type BalanceDetector struct {
- enabled bool
- threshold float64 // Imbalance threshold (0.1 = 10%)
- minCheckInterval time.Duration
- minVolumeCount int
- lastCheck time.Time
-}
-
-// Compile-time interface assertions
-var (
- _ types.TaskDetector = (*BalanceDetector)(nil)
-)
-
-// NewBalanceDetector creates a new balance detector
-func NewBalanceDetector() *BalanceDetector {
- return &BalanceDetector{
- enabled: true,
- threshold: 0.1, // 10% imbalance threshold
- minCheckInterval: 1 * time.Hour,
- minVolumeCount: 10, // Don't balance small clusters
- lastCheck: time.Time{},
- }
-}
-
-// GetTaskType returns the task type
-func (d *BalanceDetector) GetTaskType() types.TaskType {
- return types.TaskTypeBalance
-}
-
-// ScanForTasks checks if cluster balance is needed
-func (d *BalanceDetector) ScanForTasks(volumeMetrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo) ([]*types.TaskDetectionResult, error) {
- if !d.enabled {
- return nil, nil
- }
-
- glog.V(2).Infof("Scanning for balance tasks...")
-
- // Don't check too frequently
- if time.Since(d.lastCheck) < d.minCheckInterval {
- return nil, nil
- }
- d.lastCheck = time.Now()
-
- // Skip if cluster is too small
- if len(volumeMetrics) < d.minVolumeCount {
- glog.V(2).Infof("Cluster too small for balance (%d volumes < %d minimum)", len(volumeMetrics), d.minVolumeCount)
- return nil, nil
- }
-
- // Analyze volume distribution across servers
- serverVolumeCounts := make(map[string]int)
- for _, metric := range volumeMetrics {
- serverVolumeCounts[metric.Server]++
- }
-
- if len(serverVolumeCounts) < 2 {
- glog.V(2).Infof("Not enough servers for balance (%d servers)", len(serverVolumeCounts))
- return nil, nil
- }
-
- // Calculate balance metrics
- totalVolumes := len(volumeMetrics)
- avgVolumesPerServer := float64(totalVolumes) / float64(len(serverVolumeCounts))
-
- maxVolumes := 0
- minVolumes := totalVolumes
- maxServer := ""
- minServer := ""
-
- for server, count := range serverVolumeCounts {
- if count > maxVolumes {
- maxVolumes = count
- maxServer = server
- }
- if count < minVolumes {
- minVolumes = count
- minServer = server
- }
- }
-
- // Check if imbalance exceeds threshold
- imbalanceRatio := float64(maxVolumes-minVolumes) / avgVolumesPerServer
- if imbalanceRatio <= d.threshold {
- glog.V(2).Infof("Cluster is balanced (imbalance ratio: %.2f <= %.2f)", imbalanceRatio, d.threshold)
- return nil, nil
- }
-
- // Create balance task
- reason := fmt.Sprintf("Cluster imbalance detected: %.1f%% (max: %d on %s, min: %d on %s, avg: %.1f)",
- imbalanceRatio*100, maxVolumes, maxServer, minVolumes, minServer, avgVolumesPerServer)
-
- task := &types.TaskDetectionResult{
- TaskType: types.TaskTypeBalance,
- Priority: types.TaskPriorityNormal,
- Reason: reason,
- ScheduleAt: time.Now(),
- Parameters: map[string]interface{}{
- "imbalance_ratio": imbalanceRatio,
- "threshold": d.threshold,
- "max_volumes": maxVolumes,
- "min_volumes": minVolumes,
- "avg_volumes_per_server": avgVolumesPerServer,
- "max_server": maxServer,
- "min_server": minServer,
- "total_servers": len(serverVolumeCounts),
- },
- }
-
- glog.V(1).Infof("🔄 Found balance task: %s", reason)
- return []*types.TaskDetectionResult{task}, nil
-}
-
-// ScanInterval returns how often to scan
-func (d *BalanceDetector) ScanInterval() time.Duration {
- return d.minCheckInterval
-}
-
-// IsEnabled returns whether the detector is enabled
-func (d *BalanceDetector) IsEnabled() bool {
- return d.enabled
-}
-
-// SetEnabled sets whether the detector is enabled
-func (d *BalanceDetector) SetEnabled(enabled bool) {
- d.enabled = enabled
- glog.V(1).Infof("🔄 Balance detector enabled: %v", enabled)
-}
-
-// SetThreshold sets the imbalance threshold
-func (d *BalanceDetector) SetThreshold(threshold float64) {
- d.threshold = threshold
- glog.V(1).Infof("🔄 Balance threshold set to: %.1f%%", threshold*100)
-}
-
-// SetMinCheckInterval sets the minimum time between balance checks
-func (d *BalanceDetector) SetMinCheckInterval(interval time.Duration) {
- d.minCheckInterval = interval
- glog.V(1).Infof("🔄 Balance check interval set to: %v", interval)
-}
-
-// SetMinVolumeCount sets the minimum volume count for balance operations
-func (d *BalanceDetector) SetMinVolumeCount(count int) {
- d.minVolumeCount = count
- glog.V(1).Infof("🔄 Balance minimum volume count set to: %d", count)
-}
-
-// GetThreshold returns the current imbalance threshold
-func (d *BalanceDetector) GetThreshold() float64 {
- return d.threshold
-}
-
-// GetMinCheckInterval returns the minimum check interval
-func (d *BalanceDetector) GetMinCheckInterval() time.Duration {
- return d.minCheckInterval
-}
-
-// GetMinVolumeCount returns the minimum volume count
-func (d *BalanceDetector) GetMinVolumeCount() int {
- return d.minVolumeCount
-}
diff --git a/weed/worker/tasks/balance/balance_register.go b/weed/worker/tasks/balance/balance_register.go
index 7c2d5a520..b26a40782 100644
--- a/weed/worker/tasks/balance/balance_register.go
+++ b/weed/worker/tasks/balance/balance_register.go
@@ -2,80 +2,71 @@ package balance
import (
"fmt"
+ "time"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
-// Factory creates balance task instances
-type Factory struct {
- *tasks.BaseTaskFactory
-}
+// Global variable to hold the task definition for configuration updates
+var globalTaskDef *base.TaskDefinition
-// NewFactory creates a new balance task factory
-func NewFactory() *Factory {
- return &Factory{
- BaseTaskFactory: tasks.NewBaseTaskFactory(
- types.TaskTypeBalance,
- []string{"balance", "storage", "optimization"},
- "Balance data across volume servers for optimal performance",
- ),
- }
-}
-
-// Create creates a new balance task instance
-func (f *Factory) Create(params types.TaskParams) (types.TaskInterface, error) {
- // Validate parameters
- if params.VolumeID == 0 {
- return nil, fmt.Errorf("volume_id is required")
- }
- if params.Server == "" {
- return nil, fmt.Errorf("server is required")
- }
-
- task := NewTask(params.Server, params.VolumeID, params.Collection)
- task.SetEstimatedDuration(task.EstimateTime(params))
+// Auto-register this task when the package is imported
+func init() {
+ RegisterBalanceTask()
- return task, nil
+ // Register config updater
+ tasks.AutoRegisterConfigUpdater(types.TaskTypeBalance, UpdateConfigFromPersistence)
}
-// Shared detector and scheduler instances
-var (
- sharedDetector *BalanceDetector
- sharedScheduler *BalanceScheduler
-)
+// RegisterBalanceTask registers the balance task with the new architecture
+func RegisterBalanceTask() {
+ // Create configuration instance
+ config := NewDefaultConfig()
-// getSharedInstances returns the shared detector and scheduler instances
-func getSharedInstances() (*BalanceDetector, *BalanceScheduler) {
- if sharedDetector == nil {
- sharedDetector = NewBalanceDetector()
- }
- if sharedScheduler == nil {
- sharedScheduler = NewBalanceScheduler()
+ // Create complete task definition
+ taskDef := &base.TaskDefinition{
+ Type: types.TaskTypeBalance,
+ Name: "balance",
+ DisplayName: "Volume Balance",
+ Description: "Balances volume distribution across servers",
+ Icon: "fas fa-balance-scale text-warning",
+ Capabilities: []string{"balance", "distribution"},
+
+ Config: config,
+ ConfigSpec: GetConfigSpec(),
+ CreateTask: CreateTask,
+ DetectionFunc: Detection,
+ ScanInterval: 30 * time.Minute,
+ SchedulingFunc: Scheduling,
+ MaxConcurrent: 1,
+ RepeatInterval: 2 * time.Hour,
}
- return sharedDetector, sharedScheduler
-}
-// GetSharedInstances returns the shared detector and scheduler instances (public access)
-func GetSharedInstances() (*BalanceDetector, *BalanceScheduler) {
- return getSharedInstances()
+ // Store task definition globally for configuration updates
+ globalTaskDef = taskDef
+
+ // Register everything with a single function call!
+ base.RegisterTask(taskDef)
}
-// Auto-register this task when the package is imported
-func init() {
- factory := NewFactory()
- tasks.AutoRegister(types.TaskTypeBalance, factory)
+// UpdateConfigFromPersistence updates the balance configuration from persistence
+func UpdateConfigFromPersistence(configPersistence interface{}) error {
+ if globalTaskDef == nil {
+ return fmt.Errorf("balance task not registered")
+ }
- // Get shared instances for all registrations
- detector, scheduler := getSharedInstances()
+ // Load configuration from persistence
+ newConfig := LoadConfigFromPersistence(configPersistence)
+ if newConfig == nil {
+ return fmt.Errorf("failed to load configuration from persistence")
+ }
- // Register with types registry
- tasks.AutoRegisterTypes(func(registry *types.TaskRegistry) {
- registry.RegisterTask(detector, scheduler)
- })
+ // Update the task definition's config
+ globalTaskDef.Config = newConfig
- // Register with UI registry using the same instances
- tasks.AutoRegisterUI(func(uiRegistry *types.UIRegistry) {
- RegisterUI(uiRegistry, detector, scheduler)
- })
+ glog.V(1).Infof("Updated balance task configuration from persistence")
+ return nil
}
diff --git a/weed/worker/tasks/balance/balance_scheduler.go b/weed/worker/tasks/balance/balance_scheduler.go
deleted file mode 100644
index a8fefe465..000000000
--- a/weed/worker/tasks/balance/balance_scheduler.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package balance
-
-import (
- "time"
-
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/worker/types"
-)
-
-// BalanceScheduler implements TaskScheduler for balance tasks
-type BalanceScheduler struct {
- enabled bool
- maxConcurrent int
- minInterval time.Duration
- lastScheduled map[string]time.Time // track when we last scheduled a balance for each task type
- minServerCount int
- moveDuringOffHours bool
- offHoursStart string
- offHoursEnd string
-}
-
-// Compile-time interface assertions
-var (
- _ types.TaskScheduler = (*BalanceScheduler)(nil)
-)
-
-// NewBalanceScheduler creates a new balance scheduler
-func NewBalanceScheduler() *BalanceScheduler {
- return &BalanceScheduler{
- enabled: true,
- maxConcurrent: 1, // Only run one balance at a time
- minInterval: 6 * time.Hour,
- lastScheduled: make(map[string]time.Time),
- minServerCount: 3,
- moveDuringOffHours: true,
- offHoursStart: "23:00",
- offHoursEnd: "06:00",
- }
-}
-
-// GetTaskType returns the task type
-func (s *BalanceScheduler) GetTaskType() types.TaskType {
- return types.TaskTypeBalance
-}
-
-// CanScheduleNow determines if a balance task can be scheduled
-func (s *BalanceScheduler) CanScheduleNow(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker) bool {
- if !s.enabled {
- return false
- }
-
- // Count running balance tasks
- runningBalanceCount := 0
- for _, runningTask := range runningTasks {
- if runningTask.Type == types.TaskTypeBalance {
- runningBalanceCount++
- }
- }
-
- // Check concurrency limit
- if runningBalanceCount >= s.maxConcurrent {
- glog.V(3).Infof("⏸️ Balance task blocked: too many running (%d >= %d)", runningBalanceCount, s.maxConcurrent)
- return false
- }
-
- // Check minimum interval between balance operations
- if lastTime, exists := s.lastScheduled["balance"]; exists {
- if time.Since(lastTime) < s.minInterval {
- timeLeft := s.minInterval - time.Since(lastTime)
- glog.V(3).Infof("⏸️ Balance task blocked: too soon (wait %v)", timeLeft)
- return false
- }
- }
-
- // Check if we have available workers
- availableWorkerCount := 0
- for _, worker := range availableWorkers {
- for _, capability := range worker.Capabilities {
- if capability == types.TaskTypeBalance {
- availableWorkerCount++
- break
- }
- }
- }
-
- if availableWorkerCount == 0 {
- glog.V(3).Infof("⏸️ Balance task blocked: no available workers")
- return false
- }
-
- // All checks passed - can schedule
- s.lastScheduled["balance"] = time.Now()
- glog.V(2).Infof("✅ Balance task can be scheduled (running: %d/%d, workers: %d)",
- runningBalanceCount, s.maxConcurrent, availableWorkerCount)
- return true
-}
-
-// GetPriority returns the priority for balance tasks
-func (s *BalanceScheduler) GetPriority(task *types.Task) types.TaskPriority {
- // Balance is typically normal priority - not urgent but important for optimization
- return types.TaskPriorityNormal
-}
-
-// GetMaxConcurrent returns the maximum concurrent balance tasks
-func (s *BalanceScheduler) GetMaxConcurrent() int {
- return s.maxConcurrent
-}
-
-// GetDefaultRepeatInterval returns the default interval to wait before repeating balance tasks
-func (s *BalanceScheduler) GetDefaultRepeatInterval() time.Duration {
- return s.minInterval
-}
-
-// IsEnabled returns whether the scheduler is enabled
-func (s *BalanceScheduler) IsEnabled() bool {
- return s.enabled
-}
-
-// SetEnabled sets whether the scheduler is enabled
-func (s *BalanceScheduler) SetEnabled(enabled bool) {
- s.enabled = enabled
- glog.V(1).Infof("🔄 Balance scheduler enabled: %v", enabled)
-}
-
-// SetMaxConcurrent sets the maximum concurrent balance tasks
-func (s *BalanceScheduler) SetMaxConcurrent(max int) {
- s.maxConcurrent = max
- glog.V(1).Infof("🔄 Balance max concurrent set to: %d", max)
-}
-
-// SetMinInterval sets the minimum interval between balance operations
-func (s *BalanceScheduler) SetMinInterval(interval time.Duration) {
- s.minInterval = interval
- glog.V(1).Infof("🔄 Balance minimum interval set to: %v", interval)
-}
-
-// GetLastScheduled returns when we last scheduled this task type
-func (s *BalanceScheduler) GetLastScheduled(taskKey string) time.Time {
- if lastTime, exists := s.lastScheduled[taskKey]; exists {
- return lastTime
- }
- return time.Time{}
-}
-
-// SetLastScheduled updates when we last scheduled this task type
-func (s *BalanceScheduler) SetLastScheduled(taskKey string, when time.Time) {
- s.lastScheduled[taskKey] = when
-}
-
-// GetMinServerCount returns the minimum server count
-func (s *BalanceScheduler) GetMinServerCount() int {
- return s.minServerCount
-}
-
-// SetMinServerCount sets the minimum server count
-func (s *BalanceScheduler) SetMinServerCount(count int) {
- s.minServerCount = count
- glog.V(1).Infof("🔄 Balance minimum server count set to: %d", count)
-}
-
-// GetMoveDuringOffHours returns whether to move only during off-hours
-func (s *BalanceScheduler) GetMoveDuringOffHours() bool {
- return s.moveDuringOffHours
-}
-
-// SetMoveDuringOffHours sets whether to move only during off-hours
-func (s *BalanceScheduler) SetMoveDuringOffHours(enabled bool) {
- s.moveDuringOffHours = enabled
- glog.V(1).Infof("🔄 Balance move during off-hours: %v", enabled)
-}
-
-// GetOffHoursStart returns the off-hours start time
-func (s *BalanceScheduler) GetOffHoursStart() string {
- return s.offHoursStart
-}
-
-// SetOffHoursStart sets the off-hours start time
-func (s *BalanceScheduler) SetOffHoursStart(start string) {
- s.offHoursStart = start
- glog.V(1).Infof("🔄 Balance off-hours start time set to: %s", start)
-}
-
-// GetOffHoursEnd returns the off-hours end time
-func (s *BalanceScheduler) GetOffHoursEnd() string {
- return s.offHoursEnd
-}
-
-// SetOffHoursEnd sets the off-hours end time
-func (s *BalanceScheduler) SetOffHoursEnd(end string) {
- s.offHoursEnd = end
- glog.V(1).Infof("🔄 Balance off-hours end time set to: %s", end)
-}
-
-// GetMinInterval returns the minimum interval
-func (s *BalanceScheduler) GetMinInterval() time.Duration {
- return s.minInterval
-}
diff --git a/weed/worker/tasks/balance/balance_typed.go b/weed/worker/tasks/balance/balance_typed.go
new file mode 100644
index 000000000..91cd912f0
--- /dev/null
+++ b/weed/worker/tasks/balance/balance_typed.go
@@ -0,0 +1,156 @@
+package balance
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// TypedTask implements balance operation with typed protobuf parameters
+type TypedTask struct {
+ *base.BaseTypedTask
+
+ // Task state from protobuf
+ sourceServer string
+ destNode string
+ volumeID uint32
+ collection string
+ estimatedSize uint64
+ placementScore float64
+ forceMove bool
+ timeoutSeconds int32
+ placementConflicts []string
+}
+
+// NewTypedTask creates a new typed balance task
+func NewTypedTask() types.TypedTaskInterface {
+ task := &TypedTask{
+ BaseTypedTask: base.NewBaseTypedTask(types.TaskTypeBalance),
+ }
+ return task
+}
+
+// ValidateTyped validates the typed parameters for balance task
+func (t *TypedTask) ValidateTyped(params *worker_pb.TaskParams) error {
+ // Basic validation from base class
+ if err := t.BaseTypedTask.ValidateTyped(params); err != nil {
+ return err
+ }
+
+ // Check that we have balance-specific parameters
+ balanceParams := params.GetBalanceParams()
+ if balanceParams == nil {
+ return fmt.Errorf("balance_params is required for balance task")
+ }
+
+ // Validate destination node
+ if balanceParams.DestNode == "" {
+ return fmt.Errorf("dest_node is required for balance task")
+ }
+
+ // Validate estimated size
+ if balanceParams.EstimatedSize == 0 {
+ return fmt.Errorf("estimated_size must be greater than 0")
+ }
+
+ // Validate timeout
+ if balanceParams.TimeoutSeconds <= 0 {
+ return fmt.Errorf("timeout_seconds must be greater than 0")
+ }
+
+ return nil
+}
+
+// EstimateTimeTyped estimates the time needed for balance operation based on protobuf parameters
+func (t *TypedTask) EstimateTimeTyped(params *worker_pb.TaskParams) time.Duration {
+ balanceParams := params.GetBalanceParams()
+ if balanceParams != nil {
+ // Use the timeout from parameters if specified
+ if balanceParams.TimeoutSeconds > 0 {
+ return time.Duration(balanceParams.TimeoutSeconds) * time.Second
+ }
+
+ // Estimate based on volume size (1 minute per GB)
+ if balanceParams.EstimatedSize > 0 {
+ gbSize := balanceParams.EstimatedSize / (1024 * 1024 * 1024)
+ return time.Duration(gbSize) * time.Minute
+ }
+ }
+
+ // Default estimation
+ return 10 * time.Minute
+}
+
+// ExecuteTyped implements the balance operation with typed parameters
+func (t *TypedTask) ExecuteTyped(params *worker_pb.TaskParams) error {
+ // Extract basic parameters
+ t.volumeID = params.VolumeId
+ t.sourceServer = params.Server
+ t.collection = params.Collection
+
+ // Extract balance-specific parameters
+ balanceParams := params.GetBalanceParams()
+ if balanceParams != nil {
+ t.destNode = balanceParams.DestNode
+ t.estimatedSize = balanceParams.EstimatedSize
+ t.placementScore = balanceParams.PlacementScore
+ t.forceMove = balanceParams.ForceMove
+ t.timeoutSeconds = balanceParams.TimeoutSeconds
+ t.placementConflicts = balanceParams.PlacementConflicts
+ }
+
+ glog.Infof("Starting typed balance task for volume %d: %s -> %s (collection: %s, size: %d bytes)",
+ t.volumeID, t.sourceServer, t.destNode, t.collection, t.estimatedSize)
+
+ // Log placement information
+ if t.placementScore > 0 {
+ glog.V(1).Infof("Placement score: %.2f", t.placementScore)
+ }
+ if len(t.placementConflicts) > 0 {
+ glog.V(1).Infof("Placement conflicts: %v", t.placementConflicts)
+ if !t.forceMove {
+ return fmt.Errorf("placement conflicts detected and force_move is false: %v", t.placementConflicts)
+ }
+ glog.Warningf("Proceeding with balance despite conflicts (force_move=true): %v", t.placementConflicts)
+ }
+
+ // Simulate balance operation with progress updates
+ steps := []struct {
+ name string
+ duration time.Duration
+ progress float64
+ }{
+ {"Analyzing cluster state", 2 * time.Second, 15},
+ {"Verifying destination capacity", 1 * time.Second, 25},
+ {"Starting volume migration", 1 * time.Second, 35},
+ {"Moving volume data", 6 * time.Second, 75},
+ {"Updating cluster metadata", 2 * time.Second, 95},
+ {"Verifying balance completion", 1 * time.Second, 100},
+ }
+
+ for _, step := range steps {
+ if t.IsCancelled() {
+ return fmt.Errorf("balance task cancelled during: %s", step.name)
+ }
+
+ glog.V(1).Infof("Balance task step: %s", step.name)
+ t.SetProgress(step.progress)
+
+ // Simulate work
+ time.Sleep(step.duration)
+ }
+
+ glog.Infof("Typed balance task completed successfully for volume %d: %s -> %s",
+ t.volumeID, t.sourceServer, t.destNode)
+ return nil
+}
+
+// Register the typed task in the global registry
+func init() {
+ types.RegisterGlobalTypedTask(types.TaskTypeBalance, NewTypedTask)
+ glog.V(1).Infof("Registered typed balance task")
+}
diff --git a/weed/worker/tasks/balance/config.go b/weed/worker/tasks/balance/config.go
new file mode 100644
index 000000000..9303b4b2a
--- /dev/null
+++ b/weed/worker/tasks/balance/config.go
@@ -0,0 +1,170 @@
+package balance
+
+import (
+ "fmt"
+
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
+)
+
+// Config extends BaseConfig with balance-specific settings
+type Config struct {
+ base.BaseConfig
+ ImbalanceThreshold float64 `json:"imbalance_threshold"`
+ MinServerCount int `json:"min_server_count"`
+}
+
+// NewDefaultConfig creates a new default balance configuration
+func NewDefaultConfig() *Config {
+ return &Config{
+ BaseConfig: base.BaseConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 30 * 60, // 30 minutes
+ MaxConcurrent: 1,
+ },
+ ImbalanceThreshold: 0.2, // 20%
+ MinServerCount: 2,
+ }
+}
+
+// GetConfigSpec returns the configuration schema for balance tasks
+func GetConfigSpec() base.ConfigSpec {
+ return base.ConfigSpec{
+ Fields: []*config.Field{
+ {
+ Name: "enabled",
+ JSONName: "enabled",
+ Type: config.FieldTypeBool,
+ DefaultValue: true,
+ Required: false,
+ DisplayName: "Enable Balance Tasks",
+ Description: "Whether balance tasks should be automatically created",
+ HelpText: "Toggle this to enable or disable automatic balance task generation",
+ InputType: "checkbox",
+ CSSClasses: "form-check-input",
+ },
+ {
+ Name: "scan_interval_seconds",
+ JSONName: "scan_interval_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 30 * 60,
+ MinValue: 5 * 60,
+ MaxValue: 2 * 60 * 60,
+ Required: true,
+ DisplayName: "Scan Interval",
+ Description: "How often to scan for volume distribution imbalances",
+ HelpText: "The system will check for volume distribution imbalances at this interval",
+ Placeholder: "30",
+ Unit: config.UnitMinutes,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "max_concurrent",
+ JSONName: "max_concurrent",
+ Type: config.FieldTypeInt,
+ DefaultValue: 1,
+ MinValue: 1,
+ MaxValue: 3,
+ Required: true,
+ DisplayName: "Max Concurrent Tasks",
+ Description: "Maximum number of balance tasks that can run simultaneously",
+ HelpText: "Limits the number of balance operations running at the same time",
+ Placeholder: "1 (default)",
+ Unit: config.UnitCount,
+ InputType: "number",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "imbalance_threshold",
+ JSONName: "imbalance_threshold",
+ Type: config.FieldTypeFloat,
+ DefaultValue: 0.2,
+ MinValue: 0.05,
+ MaxValue: 0.5,
+ Required: true,
+ DisplayName: "Imbalance Threshold",
+ Description: "Minimum imbalance ratio to trigger balancing",
+ HelpText: "Volume distribution imbalances above this threshold will trigger balancing",
+ Placeholder: "0.20 (20%)",
+ Unit: config.UnitNone,
+ InputType: "number",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "min_server_count",
+ JSONName: "min_server_count",
+ Type: config.FieldTypeInt,
+ DefaultValue: 2,
+ MinValue: 2,
+ MaxValue: 10,
+ Required: true,
+ DisplayName: "Minimum Server Count",
+ Description: "Minimum number of servers required for balancing",
+ HelpText: "Balancing will only occur if there are at least this many servers",
+ Placeholder: "2 (default)",
+ Unit: config.UnitCount,
+ InputType: "number",
+ CSSClasses: "form-control",
+ },
+ },
+ }
+}
+
+// ToTaskPolicy converts configuration to a TaskPolicy protobuf message
+func (c *Config) ToTaskPolicy() *worker_pb.TaskPolicy {
+ return &worker_pb.TaskPolicy{
+ Enabled: c.Enabled,
+ MaxConcurrent: int32(c.MaxConcurrent),
+ RepeatIntervalSeconds: int32(c.ScanIntervalSeconds),
+ CheckIntervalSeconds: int32(c.ScanIntervalSeconds),
+ TaskConfig: &worker_pb.TaskPolicy_BalanceConfig{
+ BalanceConfig: &worker_pb.BalanceTaskConfig{
+ ImbalanceThreshold: float64(c.ImbalanceThreshold),
+ MinServerCount: int32(c.MinServerCount),
+ },
+ },
+ }
+}
+
+// FromTaskPolicy loads configuration from a TaskPolicy protobuf message
+func (c *Config) FromTaskPolicy(policy *worker_pb.TaskPolicy) error {
+ if policy == nil {
+ return fmt.Errorf("policy is nil")
+ }
+
+ // Set general TaskPolicy fields
+ c.Enabled = policy.Enabled
+ c.MaxConcurrent = int(policy.MaxConcurrent)
+ c.ScanIntervalSeconds = int(policy.RepeatIntervalSeconds) // Direct seconds-to-seconds mapping
+
+ // Set balance-specific fields from the task config
+ if balanceConfig := policy.GetBalanceConfig(); balanceConfig != nil {
+ c.ImbalanceThreshold = float64(balanceConfig.ImbalanceThreshold)
+ c.MinServerCount = int(balanceConfig.MinServerCount)
+ }
+
+ return nil
+}
+
+// LoadConfigFromPersistence loads configuration from the persistence layer if available
+func LoadConfigFromPersistence(configPersistence interface{}) *Config {
+ config := NewDefaultConfig()
+
+ // Try to load from persistence if available
+ if persistence, ok := configPersistence.(interface {
+ LoadBalanceTaskPolicy() (*worker_pb.TaskPolicy, error)
+ }); ok {
+ if policy, err := persistence.LoadBalanceTaskPolicy(); err == nil && policy != nil {
+ if err := config.FromTaskPolicy(policy); err == nil {
+ glog.V(1).Infof("Loaded balance configuration from persistence")
+ return config
+ }
+ }
+ }
+
+ glog.V(1).Infof("Using default balance configuration")
+ return config
+}
diff --git a/weed/worker/tasks/balance/detection.go b/weed/worker/tasks/balance/detection.go
new file mode 100644
index 000000000..f4bcf3ca3
--- /dev/null
+++ b/weed/worker/tasks/balance/detection.go
@@ -0,0 +1,134 @@
+package balance
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// Detection implements the detection logic for balance tasks
+func Detection(metrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo, config base.TaskConfig) ([]*types.TaskDetectionResult, error) {
+ if !config.IsEnabled() {
+ return nil, nil
+ }
+
+ balanceConfig := config.(*Config)
+
+ // Skip if cluster is too small
+ minVolumeCount := 2 // More reasonable for small clusters
+ if len(metrics) < minVolumeCount {
+ glog.Infof("BALANCE: No tasks created - cluster too small (%d volumes, need ≥%d)", len(metrics), minVolumeCount)
+ return nil, nil
+ }
+
+ // Analyze volume distribution across servers
+ serverVolumeCounts := make(map[string]int)
+ for _, metric := range metrics {
+ serverVolumeCounts[metric.Server]++
+ }
+
+ if len(serverVolumeCounts) < balanceConfig.MinServerCount {
+ glog.Infof("BALANCE: No tasks created - too few servers (%d servers, need ≥%d)", len(serverVolumeCounts), balanceConfig.MinServerCount)
+ return nil, nil
+ }
+
+ // Calculate balance metrics
+ totalVolumes := len(metrics)
+ avgVolumesPerServer := float64(totalVolumes) / float64(len(serverVolumeCounts))
+
+ maxVolumes := 0
+ minVolumes := totalVolumes
+ maxServer := ""
+ minServer := ""
+
+ for server, count := range serverVolumeCounts {
+ if count > maxVolumes {
+ maxVolumes = count
+ maxServer = server
+ }
+ if count < minVolumes {
+ minVolumes = count
+ minServer = server
+ }
+ }
+
+ // Check if imbalance exceeds threshold
+ imbalanceRatio := float64(maxVolumes-minVolumes) / avgVolumesPerServer
+ if imbalanceRatio <= balanceConfig.ImbalanceThreshold {
+ glog.Infof("BALANCE: No tasks created - cluster well balanced. Imbalance=%.1f%% (threshold=%.1f%%). Max=%d volumes on %s, Min=%d on %s, Avg=%.1f",
+ imbalanceRatio*100, balanceConfig.ImbalanceThreshold*100, maxVolumes, maxServer, minVolumes, minServer, avgVolumesPerServer)
+ return nil, nil
+ }
+
+ // Select a volume from the overloaded server for balance
+ var selectedVolume *types.VolumeHealthMetrics
+ for _, metric := range metrics {
+ if metric.Server == maxServer {
+ selectedVolume = metric
+ break
+ }
+ }
+
+ if selectedVolume == nil {
+ glog.Warningf("BALANCE: Could not find volume on overloaded server %s", maxServer)
+ return nil, nil
+ }
+
+ // Create balance task with volume and destination planning info
+ reason := fmt.Sprintf("Cluster imbalance detected: %.1f%% (max: %d on %s, min: %d on %s, avg: %.1f)",
+ imbalanceRatio*100, maxVolumes, maxServer, minVolumes, minServer, avgVolumesPerServer)
+
+ task := &types.TaskDetectionResult{
+ TaskType: types.TaskTypeBalance,
+ VolumeID: selectedVolume.VolumeID,
+ Server: selectedVolume.Server,
+ Collection: selectedVolume.Collection,
+ Priority: types.TaskPriorityNormal,
+ Reason: reason,
+ ScheduleAt: time.Now(),
+ // TypedParams will be populated by the maintenance integration
+ // with destination planning information
+ }
+
+ return []*types.TaskDetectionResult{task}, nil
+}
+
+// Scheduling implements the scheduling logic for balance tasks
+func Scheduling(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker, config base.TaskConfig) bool {
+ balanceConfig := config.(*Config)
+
+ // Count running balance tasks
+ runningBalanceCount := 0
+ for _, runningTask := range runningTasks {
+ if runningTask.Type == types.TaskTypeBalance {
+ runningBalanceCount++
+ }
+ }
+
+ // Check concurrency limit
+ if runningBalanceCount >= balanceConfig.MaxConcurrent {
+ return false
+ }
+
+ // Check if we have available workers
+ availableWorkerCount := 0
+ for _, worker := range availableWorkers {
+ for _, capability := range worker.Capabilities {
+ if capability == types.TaskTypeBalance {
+ availableWorkerCount++
+ break
+ }
+ }
+ }
+
+ return availableWorkerCount > 0
+}
+
+// CreateTask creates a new balance task instance
+func CreateTask(params types.TaskParams) (types.TaskInterface, error) {
+ // Create and return the balance task using existing Task type
+ return NewTask(params.Server, params.VolumeID, params.Collection), nil
+}
diff --git a/weed/worker/tasks/balance/ui.go b/weed/worker/tasks/balance/ui.go
deleted file mode 100644
index 2cea20a76..000000000
--- a/weed/worker/tasks/balance/ui.go
+++ /dev/null
@@ -1,361 +0,0 @@
-package balance
-
-import (
- "fmt"
- "html/template"
- "strconv"
- "time"
-
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/worker/types"
-)
-
-// UIProvider provides the UI for balance task configuration
-type UIProvider struct {
- detector *BalanceDetector
- scheduler *BalanceScheduler
-}
-
-// NewUIProvider creates a new balance UI provider
-func NewUIProvider(detector *BalanceDetector, scheduler *BalanceScheduler) *UIProvider {
- return &UIProvider{
- detector: detector,
- scheduler: scheduler,
- }
-}
-
-// GetTaskType returns the task type
-func (ui *UIProvider) GetTaskType() types.TaskType {
- return types.TaskTypeBalance
-}
-
-// GetDisplayName returns the human-readable name
-func (ui *UIProvider) GetDisplayName() string {
- return "Volume Balance"
-}
-
-// GetDescription returns a description of what this task does
-func (ui *UIProvider) GetDescription() string {
- return "Redistributes volumes across volume servers to optimize storage utilization and performance"
-}
-
-// GetIcon returns the icon CSS class for this task type
-func (ui *UIProvider) GetIcon() string {
- return "fas fa-balance-scale text-secondary"
-}
-
-// BalanceConfig represents the balance configuration
-type BalanceConfig struct {
- Enabled bool `json:"enabled"`
- ImbalanceThreshold float64 `json:"imbalance_threshold"`
- ScanIntervalSeconds int `json:"scan_interval_seconds"`
- MaxConcurrent int `json:"max_concurrent"`
- MinServerCount int `json:"min_server_count"`
- MoveDuringOffHours bool `json:"move_during_off_hours"`
- OffHoursStart string `json:"off_hours_start"`
- OffHoursEnd string `json:"off_hours_end"`
- MinIntervalSeconds int `json:"min_interval_seconds"`
-}
-
-// Helper functions for duration conversion
-func secondsToDuration(seconds int) time.Duration {
- return time.Duration(seconds) * time.Second
-}
-
-func durationToSeconds(d time.Duration) int {
- return int(d.Seconds())
-}
-
-// formatDurationForUser formats seconds as a user-friendly duration string
-func formatDurationForUser(seconds int) string {
- d := secondsToDuration(seconds)
- if d < time.Minute {
- return fmt.Sprintf("%ds", seconds)
- }
- if d < time.Hour {
- return fmt.Sprintf("%.0fm", d.Minutes())
- }
- if d < 24*time.Hour {
- return fmt.Sprintf("%.1fh", d.Hours())
- }
- return fmt.Sprintf("%.1fd", d.Hours()/24)
-}
-
-// RenderConfigForm renders the configuration form HTML
-func (ui *UIProvider) RenderConfigForm(currentConfig interface{}) (template.HTML, error) {
- config := ui.getCurrentBalanceConfig()
-
- // Build form using the FormBuilder helper
- form := types.NewFormBuilder()
-
- // Detection Settings
- form.AddCheckboxField(
- "enabled",
- "Enable Balance Tasks",
- "Whether balance tasks should be automatically created",
- config.Enabled,
- )
-
- form.AddNumberField(
- "imbalance_threshold",
- "Imbalance Threshold (%)",
- "Trigger balance when storage imbalance exceeds this percentage (0.0-1.0)",
- config.ImbalanceThreshold,
- true,
- )
-
- form.AddDurationField("scan_interval", "Scan Interval", "How often to scan for imbalanced volumes", secondsToDuration(config.ScanIntervalSeconds), true)
-
- // Scheduling Settings
- form.AddNumberField(
- "max_concurrent",
- "Max Concurrent Tasks",
- "Maximum number of balance tasks that can run simultaneously",
- float64(config.MaxConcurrent),
- true,
- )
-
- form.AddNumberField(
- "min_server_count",
- "Minimum Server Count",
- "Only balance when at least this many servers are available",
- float64(config.MinServerCount),
- true,
- )
-
- // Timing Settings
- form.AddCheckboxField(
- "move_during_off_hours",
- "Restrict to Off-Hours",
- "Only perform balance operations during off-peak hours",
- config.MoveDuringOffHours,
- )
-
- form.AddTextField(
- "off_hours_start",
- "Off-Hours Start Time",
- "Start time for off-hours window (e.g., 23:00)",
- config.OffHoursStart,
- false,
- )
-
- form.AddTextField(
- "off_hours_end",
- "Off-Hours End Time",
- "End time for off-hours window (e.g., 06:00)",
- config.OffHoursEnd,
- false,
- )
-
- // Timing constraints
- form.AddDurationField("min_interval", "Min Interval", "Minimum time between balance operations", secondsToDuration(config.MinIntervalSeconds), true)
-
- // Generate organized form sections using Bootstrap components
- html := `
-<div class="row">
- <div class="col-12">
- <div class="card mb-4">
- <div class="card-header">
- <h5 class="mb-0">
- <i class="fas fa-balance-scale me-2"></i>
- Balance Configuration
- </h5>
- </div>
- <div class="card-body">
-` + string(form.Build()) + `
- </div>
- </div>
- </div>
-</div>
-
-<div class="row">
- <div class="col-12">
- <div class="card mb-3">
- <div class="card-header">
- <h5 class="mb-0">
- <i class="fas fa-exclamation-triangle me-2"></i>
- Performance Considerations
- </h5>
- </div>
- <div class="card-body">
- <div class="alert alert-warning" role="alert">
- <h6 class="alert-heading">Important Considerations:</h6>
- <p class="mb-2"><strong>Performance:</strong> Volume balancing involves data movement and can impact cluster performance.</p>
- <p class="mb-2"><strong>Recommendation:</strong> Enable off-hours restriction to minimize impact on production workloads.</p>
- <p class="mb-0"><strong>Safety:</strong> Requires at least ` + fmt.Sprintf("%d", config.MinServerCount) + ` servers to ensure data safety during moves.</p>
- </div>
- </div>
- </div>
- </div>
-</div>`
-
- return template.HTML(html), nil
-}
-
-// ParseConfigForm parses form data into configuration
-func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}, error) {
- config := &BalanceConfig{}
-
- // Parse enabled
- config.Enabled = len(formData["enabled"]) > 0
-
- // Parse imbalance threshold
- if values, ok := formData["imbalance_threshold"]; ok && len(values) > 0 {
- threshold, err := strconv.ParseFloat(values[0], 64)
- if err != nil {
- return nil, fmt.Errorf("invalid imbalance threshold: %w", err)
- }
- if threshold < 0 || threshold > 1 {
- return nil, fmt.Errorf("imbalance threshold must be between 0.0 and 1.0")
- }
- config.ImbalanceThreshold = threshold
- }
-
- // Parse scan interval
- if values, ok := formData["scan_interval"]; ok && len(values) > 0 {
- duration, err := time.ParseDuration(values[0])
- if err != nil {
- return nil, fmt.Errorf("invalid scan interval: %w", err)
- }
- config.ScanIntervalSeconds = int(duration.Seconds())
- }
-
- // Parse max concurrent
- if values, ok := formData["max_concurrent"]; ok && len(values) > 0 {
- maxConcurrent, err := strconv.Atoi(values[0])
- if err != nil {
- return nil, fmt.Errorf("invalid max concurrent: %w", err)
- }
- if maxConcurrent < 1 {
- return nil, fmt.Errorf("max concurrent must be at least 1")
- }
- config.MaxConcurrent = maxConcurrent
- }
-
- // Parse min server count
- if values, ok := formData["min_server_count"]; ok && len(values) > 0 {
- minServerCount, err := strconv.Atoi(values[0])
- if err != nil {
- return nil, fmt.Errorf("invalid min server count: %w", err)
- }
- if minServerCount < 2 {
- return nil, fmt.Errorf("min server count must be at least 2")
- }
- config.MinServerCount = minServerCount
- }
-
- // Parse off-hours settings
- config.MoveDuringOffHours = len(formData["move_during_off_hours"]) > 0
-
- if values, ok := formData["off_hours_start"]; ok && len(values) > 0 {
- config.OffHoursStart = values[0]
- }
-
- if values, ok := formData["off_hours_end"]; ok && len(values) > 0 {
- config.OffHoursEnd = values[0]
- }
-
- // Parse min interval
- if values, ok := formData["min_interval"]; ok && len(values) > 0 {
- duration, err := time.ParseDuration(values[0])
- if err != nil {
- return nil, fmt.Errorf("invalid min interval: %w", err)
- }
- config.MinIntervalSeconds = int(duration.Seconds())
- }
-
- return config, nil
-}
-
-// GetCurrentConfig returns the current configuration
-func (ui *UIProvider) GetCurrentConfig() interface{} {
- return ui.getCurrentBalanceConfig()
-}
-
-// ApplyConfig applies the new configuration
-func (ui *UIProvider) ApplyConfig(config interface{}) error {
- balanceConfig, ok := config.(*BalanceConfig)
- if !ok {
- return fmt.Errorf("invalid config type, expected *BalanceConfig")
- }
-
- // Apply to detector
- if ui.detector != nil {
- ui.detector.SetEnabled(balanceConfig.Enabled)
- ui.detector.SetThreshold(balanceConfig.ImbalanceThreshold)
- ui.detector.SetMinCheckInterval(secondsToDuration(balanceConfig.ScanIntervalSeconds))
- }
-
- // Apply to scheduler
- if ui.scheduler != nil {
- ui.scheduler.SetEnabled(balanceConfig.Enabled)
- ui.scheduler.SetMaxConcurrent(balanceConfig.MaxConcurrent)
- ui.scheduler.SetMinServerCount(balanceConfig.MinServerCount)
- ui.scheduler.SetMoveDuringOffHours(balanceConfig.MoveDuringOffHours)
- ui.scheduler.SetOffHoursStart(balanceConfig.OffHoursStart)
- ui.scheduler.SetOffHoursEnd(balanceConfig.OffHoursEnd)
- }
-
- glog.V(1).Infof("Applied balance configuration: enabled=%v, threshold=%.1f%%, max_concurrent=%d, min_servers=%d, off_hours=%v",
- balanceConfig.Enabled, balanceConfig.ImbalanceThreshold*100, balanceConfig.MaxConcurrent,
- balanceConfig.MinServerCount, balanceConfig.MoveDuringOffHours)
-
- return nil
-}
-
-// getCurrentBalanceConfig gets the current configuration from detector and scheduler
-func (ui *UIProvider) getCurrentBalanceConfig() *BalanceConfig {
- config := &BalanceConfig{
- // Default values (fallback if detectors/schedulers are nil)
- Enabled: true,
- ImbalanceThreshold: 0.1, // 10% imbalance
- ScanIntervalSeconds: durationToSeconds(4 * time.Hour),
- MaxConcurrent: 1,
- MinServerCount: 3,
- MoveDuringOffHours: true,
- OffHoursStart: "23:00",
- OffHoursEnd: "06:00",
- MinIntervalSeconds: durationToSeconds(1 * time.Hour),
- }
-
- // Get current values from detector
- if ui.detector != nil {
- config.Enabled = ui.detector.IsEnabled()
- config.ImbalanceThreshold = ui.detector.GetThreshold()
- config.ScanIntervalSeconds = int(ui.detector.ScanInterval().Seconds())
- }
-
- // Get current values from scheduler
- if ui.scheduler != nil {
- config.MaxConcurrent = ui.scheduler.GetMaxConcurrent()
- config.MinServerCount = ui.scheduler.GetMinServerCount()
- config.MoveDuringOffHours = ui.scheduler.GetMoveDuringOffHours()
- config.OffHoursStart = ui.scheduler.GetOffHoursStart()
- config.OffHoursEnd = ui.scheduler.GetOffHoursEnd()
- }
-
- return config
-}
-
-// RegisterUI registers the balance UI provider with the UI registry
-func RegisterUI(uiRegistry *types.UIRegistry, detector *BalanceDetector, scheduler *BalanceScheduler) {
- uiProvider := NewUIProvider(detector, scheduler)
- uiRegistry.RegisterUI(uiProvider)
-
- glog.V(1).Infof("✅ Registered balance task UI provider")
-}
-
-// DefaultBalanceConfig returns default balance configuration
-func DefaultBalanceConfig() *BalanceConfig {
- return &BalanceConfig{
- Enabled: false,
- ImbalanceThreshold: 0.3,
- ScanIntervalSeconds: durationToSeconds(4 * time.Hour),
- MaxConcurrent: 1,
- MinServerCount: 3,
- MoveDuringOffHours: false,
- OffHoursStart: "22:00",
- OffHoursEnd: "06:00",
- MinIntervalSeconds: durationToSeconds(1 * time.Hour),
- }
-}
diff --git a/weed/worker/tasks/base/generic_components.go b/weed/worker/tasks/base/generic_components.go
new file mode 100644
index 000000000..27ad1bb29
--- /dev/null
+++ b/weed/worker/tasks/base/generic_components.go
@@ -0,0 +1,129 @@
+package base
+
+import (
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// GenericDetector implements TaskDetector using function-based logic
+type GenericDetector struct {
+ taskDef *TaskDefinition
+}
+
+// NewGenericDetector creates a detector from a task definition
+func NewGenericDetector(taskDef *TaskDefinition) *GenericDetector {
+ return &GenericDetector{taskDef: taskDef}
+}
+
+// GetTaskType returns the task type
+func (d *GenericDetector) GetTaskType() types.TaskType {
+ return d.taskDef.Type
+}
+
+// ScanForTasks scans using the task definition's detection function
+func (d *GenericDetector) ScanForTasks(volumeMetrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo) ([]*types.TaskDetectionResult, error) {
+ if d.taskDef.DetectionFunc == nil {
+ return nil, nil
+ }
+ return d.taskDef.DetectionFunc(volumeMetrics, clusterInfo, d.taskDef.Config)
+}
+
+// ScanInterval returns the scan interval from task definition
+func (d *GenericDetector) ScanInterval() time.Duration {
+ if d.taskDef.ScanInterval > 0 {
+ return d.taskDef.ScanInterval
+ }
+ return 30 * time.Minute // Default
+}
+
+// IsEnabled returns whether this detector is enabled
+func (d *GenericDetector) IsEnabled() bool {
+ return d.taskDef.Config.IsEnabled()
+}
+
+// GenericScheduler implements TaskScheduler using function-based logic
+type GenericScheduler struct {
+ taskDef *TaskDefinition
+}
+
+// NewGenericScheduler creates a scheduler from a task definition
+func NewGenericScheduler(taskDef *TaskDefinition) *GenericScheduler {
+ return &GenericScheduler{taskDef: taskDef}
+}
+
+// GetTaskType returns the task type
+func (s *GenericScheduler) GetTaskType() types.TaskType {
+ return s.taskDef.Type
+}
+
+// CanScheduleNow determines if a task can be scheduled using the task definition's function
+func (s *GenericScheduler) CanScheduleNow(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker) bool {
+ if s.taskDef.SchedulingFunc == nil {
+ return s.defaultCanSchedule(task, runningTasks, availableWorkers)
+ }
+ return s.taskDef.SchedulingFunc(task, runningTasks, availableWorkers, s.taskDef.Config)
+}
+
+// defaultCanSchedule provides default scheduling logic
+func (s *GenericScheduler) defaultCanSchedule(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker) bool {
+ if !s.taskDef.Config.IsEnabled() {
+ return false
+ }
+
+ // Count running tasks of this type
+ runningCount := 0
+ for _, runningTask := range runningTasks {
+ if runningTask.Type == s.taskDef.Type {
+ runningCount++
+ }
+ }
+
+ // Check concurrency limit
+ maxConcurrent := s.taskDef.MaxConcurrent
+ if maxConcurrent <= 0 {
+ maxConcurrent = 1 // Default
+ }
+ if runningCount >= maxConcurrent {
+ return false
+ }
+
+ // Check if we have available workers
+ for _, worker := range availableWorkers {
+ if worker.CurrentLoad < worker.MaxConcurrent {
+ for _, capability := range worker.Capabilities {
+ if capability == s.taskDef.Type {
+ return true
+ }
+ }
+ }
+ }
+
+ return false
+}
+
+// GetPriority returns the priority for this task
+func (s *GenericScheduler) GetPriority(task *types.Task) types.TaskPriority {
+ return task.Priority
+}
+
+// GetMaxConcurrent returns max concurrent tasks
+func (s *GenericScheduler) GetMaxConcurrent() int {
+ if s.taskDef.MaxConcurrent > 0 {
+ return s.taskDef.MaxConcurrent
+ }
+ return 1 // Default
+}
+
+// GetDefaultRepeatInterval returns the default repeat interval
+func (s *GenericScheduler) GetDefaultRepeatInterval() time.Duration {
+ if s.taskDef.RepeatInterval > 0 {
+ return s.taskDef.RepeatInterval
+ }
+ return 24 * time.Hour // Default
+}
+
+// IsEnabled returns whether this scheduler is enabled
+func (s *GenericScheduler) IsEnabled() bool {
+ return s.taskDef.Config.IsEnabled()
+}
diff --git a/weed/worker/tasks/base/registration.go b/weed/worker/tasks/base/registration.go
new file mode 100644
index 000000000..416b6f6b8
--- /dev/null
+++ b/weed/worker/tasks/base/registration.go
@@ -0,0 +1,155 @@
+package base
+
+import (
+ "fmt"
+
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// GenericFactory creates task instances using a TaskDefinition
+type GenericFactory struct {
+ *tasks.BaseTaskFactory
+ taskDef *TaskDefinition
+}
+
+// NewGenericFactory creates a generic task factory
+func NewGenericFactory(taskDef *TaskDefinition) *GenericFactory {
+ return &GenericFactory{
+ BaseTaskFactory: tasks.NewBaseTaskFactory(
+ taskDef.Type,
+ taskDef.Capabilities,
+ taskDef.Description,
+ ),
+ taskDef: taskDef,
+ }
+}
+
+// Create creates a task instance using the task definition
+func (f *GenericFactory) Create(params types.TaskParams) (types.TaskInterface, error) {
+ if f.taskDef.CreateTask == nil {
+ return nil, fmt.Errorf("no task creation function defined for %s", f.taskDef.Type)
+ }
+ return f.taskDef.CreateTask(params)
+}
+
+// GenericSchemaProvider provides config schema from TaskDefinition
+type GenericSchemaProvider struct {
+ taskDef *TaskDefinition
+}
+
+// GetConfigSchema returns the schema from task definition
+func (p *GenericSchemaProvider) GetConfigSchema() *tasks.TaskConfigSchema {
+ return &tasks.TaskConfigSchema{
+ TaskName: string(p.taskDef.Type),
+ DisplayName: p.taskDef.DisplayName,
+ Description: p.taskDef.Description,
+ Icon: p.taskDef.Icon,
+ Schema: config.Schema{
+ Fields: p.taskDef.ConfigSpec.Fields,
+ },
+ }
+}
+
+// GenericUIProvider provides UI functionality from TaskDefinition
+type GenericUIProvider struct {
+ taskDef *TaskDefinition
+}
+
+// GetTaskType returns the task type
+func (ui *GenericUIProvider) GetTaskType() types.TaskType {
+ return ui.taskDef.Type
+}
+
+// GetDisplayName returns the human-readable name
+func (ui *GenericUIProvider) GetDisplayName() string {
+ return ui.taskDef.DisplayName
+}
+
+// GetDescription returns a description of what this task does
+func (ui *GenericUIProvider) GetDescription() string {
+ return ui.taskDef.Description
+}
+
+// GetIcon returns the icon CSS class for this task type
+func (ui *GenericUIProvider) GetIcon() string {
+ return ui.taskDef.Icon
+}
+
+// GetCurrentConfig returns current config as TaskConfig
+func (ui *GenericUIProvider) GetCurrentConfig() types.TaskConfig {
+ return ui.taskDef.Config
+}
+
+// ApplyTaskPolicy applies protobuf TaskPolicy configuration
+func (ui *GenericUIProvider) ApplyTaskPolicy(policy *worker_pb.TaskPolicy) error {
+ return ui.taskDef.Config.FromTaskPolicy(policy)
+}
+
+// ApplyTaskConfig applies TaskConfig interface configuration
+func (ui *GenericUIProvider) ApplyTaskConfig(config types.TaskConfig) error {
+ taskPolicy := config.ToTaskPolicy()
+ return ui.taskDef.Config.FromTaskPolicy(taskPolicy)
+}
+
+// RegisterTask registers a complete task definition with all registries
+func RegisterTask(taskDef *TaskDefinition) {
+ // Validate task definition
+ if err := validateTaskDefinition(taskDef); err != nil {
+ glog.Errorf("Invalid task definition for %s: %v", taskDef.Type, err)
+ return
+ }
+
+ // Create and register factory
+ factory := NewGenericFactory(taskDef)
+ tasks.AutoRegister(taskDef.Type, factory)
+
+ // Create and register detector/scheduler
+ detector := NewGenericDetector(taskDef)
+ scheduler := NewGenericScheduler(taskDef)
+
+ tasks.AutoRegisterTypes(func(registry *types.TaskRegistry) {
+ registry.RegisterTask(detector, scheduler)
+ })
+
+ // Create and register schema provider
+ schemaProvider := &GenericSchemaProvider{taskDef: taskDef}
+ tasks.RegisterTaskConfigSchema(string(taskDef.Type), schemaProvider)
+
+ // Create and register UI provider
+ uiProvider := &GenericUIProvider{taskDef: taskDef}
+ tasks.AutoRegisterUI(func(uiRegistry *types.UIRegistry) {
+ baseUIProvider := tasks.NewBaseUIProvider(
+ taskDef.Type,
+ taskDef.DisplayName,
+ taskDef.Description,
+ taskDef.Icon,
+ schemaProvider.GetConfigSchema,
+ uiProvider.GetCurrentConfig,
+ uiProvider.ApplyTaskPolicy,
+ uiProvider.ApplyTaskConfig,
+ )
+ uiRegistry.RegisterUI(baseUIProvider)
+ })
+
+ glog.V(1).Infof("✅ Registered complete task definition: %s", taskDef.Type)
+}
+
+// validateTaskDefinition ensures the task definition is complete
+func validateTaskDefinition(taskDef *TaskDefinition) error {
+ if taskDef.Type == "" {
+ return fmt.Errorf("task type is required")
+ }
+ if taskDef.Name == "" {
+ return fmt.Errorf("task name is required")
+ }
+ if taskDef.Config == nil {
+ return fmt.Errorf("task config is required")
+ }
+ // CreateTask is optional for tasks that use the typed task system
+ // The typed system registers tasks separately via types.RegisterGlobalTypedTask()
+ return nil
+}
diff --git a/weed/worker/tasks/base/task_definition.go b/weed/worker/tasks/base/task_definition.go
new file mode 100644
index 000000000..6689d9c81
--- /dev/null
+++ b/weed/worker/tasks/base/task_definition.go
@@ -0,0 +1,272 @@
+package base
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// TaskDefinition encapsulates everything needed to define a complete task type
+type TaskDefinition struct {
+ // Basic task information
+ Type types.TaskType
+ Name string
+ DisplayName string
+ Description string
+ Icon string
+ Capabilities []string
+
+ // Task configuration
+ Config TaskConfig
+ ConfigSpec ConfigSpec
+
+ // Task creation
+ CreateTask func(params types.TaskParams) (types.TaskInterface, error)
+
+ // Detection logic
+ DetectionFunc func(metrics []*types.VolumeHealthMetrics, info *types.ClusterInfo, config TaskConfig) ([]*types.TaskDetectionResult, error)
+ ScanInterval time.Duration
+
+ // Scheduling logic
+ SchedulingFunc func(task *types.Task, running []*types.Task, workers []*types.Worker, config TaskConfig) bool
+ MaxConcurrent int
+ RepeatInterval time.Duration
+}
+
+// TaskConfig provides a configuration interface that supports type-safe defaults
+type TaskConfig interface {
+ config.ConfigWithDefaults // Extends ConfigWithDefaults for type-safe schema operations
+ IsEnabled() bool
+ SetEnabled(bool)
+ ToTaskPolicy() *worker_pb.TaskPolicy
+ FromTaskPolicy(policy *worker_pb.TaskPolicy) error
+}
+
+// ConfigSpec defines the configuration schema
+type ConfigSpec struct {
+ Fields []*config.Field
+}
+
+// BaseConfig provides common configuration fields with reflection-based serialization
+type BaseConfig struct {
+ Enabled bool `json:"enabled"`
+ ScanIntervalSeconds int `json:"scan_interval_seconds"`
+ MaxConcurrent int `json:"max_concurrent"`
+}
+
+// IsEnabled returns whether the task is enabled
+func (c *BaseConfig) IsEnabled() bool {
+ return c.Enabled
+}
+
+// SetEnabled sets whether the task is enabled
+func (c *BaseConfig) SetEnabled(enabled bool) {
+ c.Enabled = enabled
+}
+
+// Validate validates the base configuration
+func (c *BaseConfig) Validate() error {
+ // Common validation logic
+ return nil
+}
+
+// StructToMap converts any struct to a map using reflection
+func StructToMap(obj interface{}) map[string]interface{} {
+ result := make(map[string]interface{})
+ val := reflect.ValueOf(obj)
+
+ // Handle pointer to struct
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+
+ if val.Kind() != reflect.Struct {
+ return result
+ }
+
+ typ := val.Type()
+
+ for i := 0; i < val.NumField(); i++ {
+ field := val.Field(i)
+ fieldType := typ.Field(i)
+
+ // Skip unexported fields
+ if !field.CanInterface() {
+ continue
+ }
+
+ // Handle embedded structs recursively (before JSON tag check)
+ if field.Kind() == reflect.Struct && fieldType.Anonymous {
+ embeddedMap := StructToMap(field.Interface())
+ for k, v := range embeddedMap {
+ result[k] = v
+ }
+ continue
+ }
+
+ // Get JSON tag name
+ jsonTag := fieldType.Tag.Get("json")
+ if jsonTag == "" || jsonTag == "-" {
+ continue
+ }
+
+ // Remove options like ",omitempty"
+ if commaIdx := strings.Index(jsonTag, ","); commaIdx >= 0 {
+ jsonTag = jsonTag[:commaIdx]
+ }
+
+ result[jsonTag] = field.Interface()
+ }
+ return result
+}
+
+// MapToStruct loads data from map into struct using reflection
+func MapToStruct(data map[string]interface{}, obj interface{}) error {
+ val := reflect.ValueOf(obj)
+
+ // Must be pointer to struct
+ if val.Kind() != reflect.Ptr || val.Elem().Kind() != reflect.Struct {
+ return fmt.Errorf("obj must be pointer to struct")
+ }
+
+ val = val.Elem()
+ typ := val.Type()
+
+ for i := 0; i < val.NumField(); i++ {
+ field := val.Field(i)
+ fieldType := typ.Field(i)
+
+ // Skip unexported fields
+ if !field.CanSet() {
+ continue
+ }
+
+ // Handle embedded structs recursively (before JSON tag check)
+ if field.Kind() == reflect.Struct && fieldType.Anonymous {
+ err := MapToStruct(data, field.Addr().Interface())
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Get JSON tag name
+ jsonTag := fieldType.Tag.Get("json")
+ if jsonTag == "" || jsonTag == "-" {
+ continue
+ }
+
+ // Remove options like ",omitempty"
+ if commaIdx := strings.Index(jsonTag, ","); commaIdx >= 0 {
+ jsonTag = jsonTag[:commaIdx]
+ }
+
+ if value, exists := data[jsonTag]; exists {
+ err := setFieldValue(field, value)
+ if err != nil {
+ return fmt.Errorf("failed to set field %s: %v", jsonTag, err)
+ }
+ }
+ }
+
+ return nil
+}
+
+// ToMap converts config to map using reflection
+// ToTaskPolicy converts BaseConfig to protobuf (partial implementation)
+// Note: Concrete implementations should override this to include task-specific config
+func (c *BaseConfig) ToTaskPolicy() *worker_pb.TaskPolicy {
+ return &worker_pb.TaskPolicy{
+ Enabled: c.Enabled,
+ MaxConcurrent: int32(c.MaxConcurrent),
+ RepeatIntervalSeconds: int32(c.ScanIntervalSeconds),
+ CheckIntervalSeconds: int32(c.ScanIntervalSeconds),
+ // TaskConfig field should be set by concrete implementations
+ }
+}
+
+// FromTaskPolicy loads BaseConfig from protobuf (partial implementation)
+// Note: Concrete implementations should override this to handle task-specific config
+func (c *BaseConfig) FromTaskPolicy(policy *worker_pb.TaskPolicy) error {
+ if policy == nil {
+ return fmt.Errorf("policy is nil")
+ }
+ c.Enabled = policy.Enabled
+ c.MaxConcurrent = int(policy.MaxConcurrent)
+ c.ScanIntervalSeconds = int(policy.RepeatIntervalSeconds)
+ return nil
+}
+
+// ApplySchemaDefaults applies default values from schema using reflection
+func (c *BaseConfig) ApplySchemaDefaults(schema *config.Schema) error {
+ // Use reflection-based approach for BaseConfig since it needs to handle embedded structs
+ return schema.ApplyDefaultsToProtobuf(c)
+}
+
+// setFieldValue sets a field value with type conversion
+func setFieldValue(field reflect.Value, value interface{}) error {
+ if value == nil {
+ return nil
+ }
+
+ valueVal := reflect.ValueOf(value)
+ fieldType := field.Type()
+ valueType := valueVal.Type()
+
+ // Direct assignment if types match
+ if valueType.AssignableTo(fieldType) {
+ field.Set(valueVal)
+ return nil
+ }
+
+ // Type conversion for common cases
+ switch fieldType.Kind() {
+ case reflect.Bool:
+ if b, ok := value.(bool); ok {
+ field.SetBool(b)
+ } else {
+ return fmt.Errorf("cannot convert %T to bool", value)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch v := value.(type) {
+ case int:
+ field.SetInt(int64(v))
+ case int32:
+ field.SetInt(int64(v))
+ case int64:
+ field.SetInt(v)
+ case float64:
+ field.SetInt(int64(v))
+ default:
+ return fmt.Errorf("cannot convert %T to int", value)
+ }
+ case reflect.Float32, reflect.Float64:
+ switch v := value.(type) {
+ case float32:
+ field.SetFloat(float64(v))
+ case float64:
+ field.SetFloat(v)
+ case int:
+ field.SetFloat(float64(v))
+ case int64:
+ field.SetFloat(float64(v))
+ default:
+ return fmt.Errorf("cannot convert %T to float", value)
+ }
+ case reflect.String:
+ if s, ok := value.(string); ok {
+ field.SetString(s)
+ } else {
+ return fmt.Errorf("cannot convert %T to string", value)
+ }
+ default:
+ return fmt.Errorf("unsupported field type %s", fieldType.Kind())
+ }
+
+ return nil
+}
diff --git a/weed/worker/tasks/base/task_definition_test.go b/weed/worker/tasks/base/task_definition_test.go
new file mode 100644
index 000000000..a0a0a5a24
--- /dev/null
+++ b/weed/worker/tasks/base/task_definition_test.go
@@ -0,0 +1,338 @@
+package base
+
+import (
+ "reflect"
+ "testing"
+)
+
+// Test structs that mirror the actual configuration structure
+type TestBaseConfig struct {
+ Enabled bool `json:"enabled"`
+ ScanIntervalSeconds int `json:"scan_interval_seconds"`
+ MaxConcurrent int `json:"max_concurrent"`
+}
+
+type TestTaskConfig struct {
+ TestBaseConfig
+ TaskSpecificField float64 `json:"task_specific_field"`
+ AnotherSpecificField string `json:"another_specific_field"`
+}
+
+type TestNestedConfig struct {
+ TestBaseConfig
+ NestedStruct struct {
+ NestedField string `json:"nested_field"`
+ } `json:"nested_struct"`
+ TaskField int `json:"task_field"`
+}
+
+func TestStructToMap_WithEmbeddedStruct(t *testing.T) {
+ // Test case 1: Basic embedded struct
+ config := &TestTaskConfig{
+ TestBaseConfig: TestBaseConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 1800,
+ MaxConcurrent: 3,
+ },
+ TaskSpecificField: 0.25,
+ AnotherSpecificField: "test_value",
+ }
+
+ result := StructToMap(config)
+
+ // Verify all fields are present
+ expectedFields := map[string]interface{}{
+ "enabled": true,
+ "scan_interval_seconds": 1800,
+ "max_concurrent": 3,
+ "task_specific_field": 0.25,
+ "another_specific_field": "test_value",
+ }
+
+ if len(result) != len(expectedFields) {
+ t.Errorf("Expected %d fields, got %d. Result: %+v", len(expectedFields), len(result), result)
+ }
+
+ for key, expectedValue := range expectedFields {
+ if actualValue, exists := result[key]; !exists {
+ t.Errorf("Missing field: %s", key)
+ } else if !reflect.DeepEqual(actualValue, expectedValue) {
+ t.Errorf("Field %s: expected %v (%T), got %v (%T)", key, expectedValue, expectedValue, actualValue, actualValue)
+ }
+ }
+}
+
+func TestStructToMap_WithNestedStruct(t *testing.T) {
+ config := &TestNestedConfig{
+ TestBaseConfig: TestBaseConfig{
+ Enabled: false,
+ ScanIntervalSeconds: 3600,
+ MaxConcurrent: 1,
+ },
+ NestedStruct: struct {
+ NestedField string `json:"nested_field"`
+ }{
+ NestedField: "nested_value",
+ },
+ TaskField: 42,
+ }
+
+ result := StructToMap(config)
+
+ // Verify embedded struct fields are included
+ if enabled, exists := result["enabled"]; !exists || enabled != false {
+ t.Errorf("Expected enabled=false from embedded struct, got %v", enabled)
+ }
+
+ if scanInterval, exists := result["scan_interval_seconds"]; !exists || scanInterval != 3600 {
+ t.Errorf("Expected scan_interval_seconds=3600 from embedded struct, got %v", scanInterval)
+ }
+
+ if maxConcurrent, exists := result["max_concurrent"]; !exists || maxConcurrent != 1 {
+ t.Errorf("Expected max_concurrent=1 from embedded struct, got %v", maxConcurrent)
+ }
+
+ // Verify regular fields are included
+ if taskField, exists := result["task_field"]; !exists || taskField != 42 {
+ t.Errorf("Expected task_field=42, got %v", taskField)
+ }
+
+ // Verify nested struct is included as a whole
+ if nestedStruct, exists := result["nested_struct"]; !exists {
+ t.Errorf("Missing nested_struct field")
+ } else {
+ // The nested struct should be included as-is, not flattened
+ if nested, ok := nestedStruct.(struct {
+ NestedField string `json:"nested_field"`
+ }); !ok || nested.NestedField != "nested_value" {
+ t.Errorf("Expected nested_struct with NestedField='nested_value', got %v", nestedStruct)
+ }
+ }
+}
+
+func TestMapToStruct_WithEmbeddedStruct(t *testing.T) {
+ // Test data with all fields including embedded struct fields
+ data := map[string]interface{}{
+ "enabled": true,
+ "scan_interval_seconds": 2400,
+ "max_concurrent": 5,
+ "task_specific_field": 0.15,
+ "another_specific_field": "updated_value",
+ }
+
+ config := &TestTaskConfig{}
+ err := MapToStruct(data, config)
+
+ if err != nil {
+ t.Fatalf("MapToStruct failed: %v", err)
+ }
+
+ // Verify embedded struct fields were set
+ if config.Enabled != true {
+ t.Errorf("Expected Enabled=true, got %v", config.Enabled)
+ }
+
+ if config.ScanIntervalSeconds != 2400 {
+ t.Errorf("Expected ScanIntervalSeconds=2400, got %v", config.ScanIntervalSeconds)
+ }
+
+ if config.MaxConcurrent != 5 {
+ t.Errorf("Expected MaxConcurrent=5, got %v", config.MaxConcurrent)
+ }
+
+ // Verify regular fields were set
+ if config.TaskSpecificField != 0.15 {
+ t.Errorf("Expected TaskSpecificField=0.15, got %v", config.TaskSpecificField)
+ }
+
+ if config.AnotherSpecificField != "updated_value" {
+ t.Errorf("Expected AnotherSpecificField='updated_value', got %v", config.AnotherSpecificField)
+ }
+}
+
+func TestMapToStruct_PartialData(t *testing.T) {
+ // Test with only some fields present (simulating form data)
+ data := map[string]interface{}{
+ "enabled": false,
+ "max_concurrent": 2,
+ "task_specific_field": 0.30,
+ }
+
+ // Start with some initial values
+ config := &TestTaskConfig{
+ TestBaseConfig: TestBaseConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 1800,
+ MaxConcurrent: 1,
+ },
+ TaskSpecificField: 0.20,
+ AnotherSpecificField: "initial_value",
+ }
+
+ err := MapToStruct(data, config)
+
+ if err != nil {
+ t.Fatalf("MapToStruct failed: %v", err)
+ }
+
+ // Verify updated fields
+ if config.Enabled != false {
+ t.Errorf("Expected Enabled=false (updated), got %v", config.Enabled)
+ }
+
+ if config.MaxConcurrent != 2 {
+ t.Errorf("Expected MaxConcurrent=2 (updated), got %v", config.MaxConcurrent)
+ }
+
+ if config.TaskSpecificField != 0.30 {
+ t.Errorf("Expected TaskSpecificField=0.30 (updated), got %v", config.TaskSpecificField)
+ }
+
+ // Verify unchanged fields remain the same
+ if config.ScanIntervalSeconds != 1800 {
+ t.Errorf("Expected ScanIntervalSeconds=1800 (unchanged), got %v", config.ScanIntervalSeconds)
+ }
+
+ if config.AnotherSpecificField != "initial_value" {
+ t.Errorf("Expected AnotherSpecificField='initial_value' (unchanged), got %v", config.AnotherSpecificField)
+ }
+}
+
+func TestRoundTripSerialization(t *testing.T) {
+ // Test complete round-trip: struct -> map -> struct
+ original := &TestTaskConfig{
+ TestBaseConfig: TestBaseConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 3600,
+ MaxConcurrent: 4,
+ },
+ TaskSpecificField: 0.18,
+ AnotherSpecificField: "round_trip_test",
+ }
+
+ // Convert to map
+ dataMap := StructToMap(original)
+
+ // Convert back to struct
+ roundTrip := &TestTaskConfig{}
+ err := MapToStruct(dataMap, roundTrip)
+
+ if err != nil {
+ t.Fatalf("Round-trip MapToStruct failed: %v", err)
+ }
+
+ // Verify all fields match
+ if !reflect.DeepEqual(original.TestBaseConfig, roundTrip.TestBaseConfig) {
+ t.Errorf("BaseConfig mismatch:\nOriginal: %+v\nRound-trip: %+v", original.TestBaseConfig, roundTrip.TestBaseConfig)
+ }
+
+ if original.TaskSpecificField != roundTrip.TaskSpecificField {
+ t.Errorf("TaskSpecificField mismatch: %v != %v", original.TaskSpecificField, roundTrip.TaskSpecificField)
+ }
+
+ if original.AnotherSpecificField != roundTrip.AnotherSpecificField {
+ t.Errorf("AnotherSpecificField mismatch: %v != %v", original.AnotherSpecificField, roundTrip.AnotherSpecificField)
+ }
+}
+
+func TestStructToMap_EmptyStruct(t *testing.T) {
+ config := &TestTaskConfig{}
+ result := StructToMap(config)
+
+ // Should still include all fields, even with zero values
+ expectedFields := []string{"enabled", "scan_interval_seconds", "max_concurrent", "task_specific_field", "another_specific_field"}
+
+ for _, field := range expectedFields {
+ if _, exists := result[field]; !exists {
+ t.Errorf("Missing field: %s", field)
+ }
+ }
+}
+
+func TestStructToMap_NilPointer(t *testing.T) {
+ var config *TestTaskConfig = nil
+ result := StructToMap(config)
+
+ if len(result) != 0 {
+ t.Errorf("Expected empty map for nil pointer, got %+v", result)
+ }
+}
+
+func TestMapToStruct_InvalidInput(t *testing.T) {
+ data := map[string]interface{}{
+ "enabled": "not_a_bool", // Wrong type
+ }
+
+ config := &TestTaskConfig{}
+ err := MapToStruct(data, config)
+
+ if err == nil {
+ t.Errorf("Expected error for invalid input type, but got none")
+ }
+}
+
+func TestMapToStruct_NonPointer(t *testing.T) {
+ data := map[string]interface{}{
+ "enabled": true,
+ }
+
+ config := TestTaskConfig{} // Not a pointer
+ err := MapToStruct(data, config)
+
+ if err == nil {
+ t.Errorf("Expected error for non-pointer input, but got none")
+ }
+}
+
+// Benchmark tests to ensure performance is reasonable
+func BenchmarkStructToMap(b *testing.B) {
+ config := &TestTaskConfig{
+ TestBaseConfig: TestBaseConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 1800,
+ MaxConcurrent: 3,
+ },
+ TaskSpecificField: 0.25,
+ AnotherSpecificField: "benchmark_test",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = StructToMap(config)
+ }
+}
+
+func BenchmarkMapToStruct(b *testing.B) {
+ data := map[string]interface{}{
+ "enabled": true,
+ "scan_interval_seconds": 1800,
+ "max_concurrent": 3,
+ "task_specific_field": 0.25,
+ "another_specific_field": "benchmark_test",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ config := &TestTaskConfig{}
+ _ = MapToStruct(data, config)
+ }
+}
+
+func BenchmarkRoundTrip(b *testing.B) {
+ original := &TestTaskConfig{
+ TestBaseConfig: TestBaseConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 1800,
+ MaxConcurrent: 3,
+ },
+ TaskSpecificField: 0.25,
+ AnotherSpecificField: "benchmark_test",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ dataMap := StructToMap(original)
+ roundTrip := &TestTaskConfig{}
+ _ = MapToStruct(dataMap, roundTrip)
+ }
+}
diff --git a/weed/worker/tasks/base/typed_task.go b/weed/worker/tasks/base/typed_task.go
new file mode 100644
index 000000000..9d2839607
--- /dev/null
+++ b/weed/worker/tasks/base/typed_task.go
@@ -0,0 +1,218 @@
+package base
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// BaseTypedTask provides a base implementation for typed tasks with logger support
+type BaseTypedTask struct {
+ taskType types.TaskType
+ taskID string
+ progress float64
+ progressCallback func(float64)
+ cancelled bool
+ mutex sync.RWMutex
+
+ // Logger functionality
+ logger tasks.TaskLogger
+ loggerConfig types.TaskLoggerConfig
+}
+
+// NewBaseTypedTask creates a new base typed task
+func NewBaseTypedTask(taskType types.TaskType) *BaseTypedTask {
+ return &BaseTypedTask{
+ taskType: taskType,
+ progress: 0.0,
+ loggerConfig: types.TaskLoggerConfig{
+ BaseLogDir: "/data/task_logs",
+ MaxTasks: 100,
+ MaxLogSizeMB: 10,
+ EnableConsole: true,
+ },
+ }
+}
+
+// GetType returns the task type
+func (bt *BaseTypedTask) GetType() types.TaskType {
+ return bt.taskType
+}
+
+// IsCancellable returns whether the task can be cancelled
+func (bt *BaseTypedTask) IsCancellable() bool {
+ return true // Most tasks can be cancelled
+}
+
+// Cancel cancels the task
+func (bt *BaseTypedTask) Cancel() error {
+ bt.mutex.Lock()
+ defer bt.mutex.Unlock()
+ bt.cancelled = true
+ return nil
+}
+
+// IsCancelled returns whether the task has been cancelled
+func (bt *BaseTypedTask) IsCancelled() bool {
+ bt.mutex.RLock()
+ defer bt.mutex.RUnlock()
+ return bt.cancelled
+}
+
+// GetProgress returns the current progress (0-100)
+func (bt *BaseTypedTask) GetProgress() float64 {
+ bt.mutex.RLock()
+ defer bt.mutex.RUnlock()
+ return bt.progress
+}
+
+// SetProgress sets the current progress and calls the callback if set
+func (bt *BaseTypedTask) SetProgress(progress float64) {
+ bt.mutex.Lock()
+ callback := bt.progressCallback
+ bt.progress = progress
+ bt.mutex.Unlock()
+
+ if callback != nil {
+ callback(progress)
+ }
+}
+
+// SetProgressCallback sets the progress callback function
+func (bt *BaseTypedTask) SetProgressCallback(callback func(float64)) {
+ bt.mutex.Lock()
+ defer bt.mutex.Unlock()
+ bt.progressCallback = callback
+}
+
+// SetLoggerConfig sets the logger configuration for this task
+func (bt *BaseTypedTask) SetLoggerConfig(config types.TaskLoggerConfig) {
+ bt.mutex.Lock()
+ defer bt.mutex.Unlock()
+ bt.loggerConfig = config
+}
+
+// convertToTasksLoggerConfig converts types.TaskLoggerConfig to tasks.TaskLoggerConfig
+func convertToTasksLoggerConfig(config types.TaskLoggerConfig) tasks.TaskLoggerConfig {
+ return tasks.TaskLoggerConfig{
+ BaseLogDir: config.BaseLogDir,
+ MaxTasks: config.MaxTasks,
+ MaxLogSizeMB: config.MaxLogSizeMB,
+ EnableConsole: config.EnableConsole,
+ }
+}
+
+// InitializeTaskLogger initializes the task logger with task details (LoggerProvider interface)
+func (bt *BaseTypedTask) InitializeTaskLogger(taskID string, workerID string, params types.TaskParams) error {
+ bt.mutex.Lock()
+ defer bt.mutex.Unlock()
+
+ bt.taskID = taskID
+
+ // Convert the logger config to the tasks package type
+ tasksLoggerConfig := convertToTasksLoggerConfig(bt.loggerConfig)
+
+ logger, err := tasks.NewTaskLogger(taskID, bt.taskType, workerID, params, tasksLoggerConfig)
+ if err != nil {
+ return fmt.Errorf("failed to initialize task logger: %w", err)
+ }
+
+ bt.logger = logger
+ if bt.logger != nil {
+ bt.logger.Info("BaseTypedTask initialized for task %s (type: %s)", taskID, bt.taskType)
+ }
+
+ return nil
+}
+
+// GetTaskLogger returns the task logger (LoggerProvider interface)
+func (bt *BaseTypedTask) GetTaskLogger() types.TaskLogger {
+ bt.mutex.RLock()
+ defer bt.mutex.RUnlock()
+ return bt.logger
+}
+
+// LogInfo logs an info message
+func (bt *BaseTypedTask) LogInfo(message string, args ...interface{}) {
+ bt.mutex.RLock()
+ logger := bt.logger
+ bt.mutex.RUnlock()
+
+ if logger != nil {
+ logger.Info(message, args...)
+ }
+}
+
+// LogWarning logs a warning message
+func (bt *BaseTypedTask) LogWarning(message string, args ...interface{}) {
+ bt.mutex.RLock()
+ logger := bt.logger
+ bt.mutex.RUnlock()
+
+ if logger != nil {
+ logger.Warning(message, args...)
+ }
+}
+
+// LogError logs an error message
+func (bt *BaseTypedTask) LogError(message string, args ...interface{}) {
+ bt.mutex.RLock()
+ logger := bt.logger
+ bt.mutex.RUnlock()
+
+ if logger != nil {
+ logger.Error(message, args...)
+ }
+}
+
+// LogDebug logs a debug message
+func (bt *BaseTypedTask) LogDebug(message string, args ...interface{}) {
+ bt.mutex.RLock()
+ logger := bt.logger
+ bt.mutex.RUnlock()
+
+ if logger != nil {
+ logger.Debug(message, args...)
+ }
+}
+
+// LogWithFields logs a message with structured fields
+func (bt *BaseTypedTask) LogWithFields(level string, message string, fields map[string]interface{}) {
+ bt.mutex.RLock()
+ logger := bt.logger
+ bt.mutex.RUnlock()
+
+ if logger != nil {
+ logger.LogWithFields(level, message, fields)
+ }
+}
+
+// ValidateTyped provides basic validation for typed parameters
+func (bt *BaseTypedTask) ValidateTyped(params *worker_pb.TaskParams) error {
+ if params == nil {
+ return errors.New("task parameters cannot be nil")
+ }
+ if params.VolumeId == 0 {
+ return errors.New("volume_id is required")
+ }
+ if params.Server == "" {
+ return errors.New("server is required")
+ }
+ return nil
+}
+
+// EstimateTimeTyped provides a default time estimation
+func (bt *BaseTypedTask) EstimateTimeTyped(params *worker_pb.TaskParams) time.Duration {
+ // Default estimation - concrete tasks should override this
+ return 5 * time.Minute
+}
+
+// ExecuteTyped is a placeholder that concrete tasks must implement
+func (bt *BaseTypedTask) ExecuteTyped(params *worker_pb.TaskParams) error {
+ panic("ExecuteTyped must be implemented by concrete task types")
+}
diff --git a/weed/worker/tasks/config_update_registry.go b/weed/worker/tasks/config_update_registry.go
new file mode 100644
index 000000000..649c8b384
--- /dev/null
+++ b/weed/worker/tasks/config_update_registry.go
@@ -0,0 +1,67 @@
+package tasks
+
+import (
+ "sync"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// ConfigUpdateFunc is a function type for updating task configurations
+type ConfigUpdateFunc func(configPersistence interface{}) error
+
+// ConfigUpdateRegistry manages config update functions for all task types
+type ConfigUpdateRegistry struct {
+ updaters map[types.TaskType]ConfigUpdateFunc
+ mutex sync.RWMutex
+}
+
+var (
+ globalConfigUpdateRegistry *ConfigUpdateRegistry
+ configUpdateRegistryOnce sync.Once
+)
+
+// GetGlobalConfigUpdateRegistry returns the global config update registry (singleton)
+func GetGlobalConfigUpdateRegistry() *ConfigUpdateRegistry {
+ configUpdateRegistryOnce.Do(func() {
+ globalConfigUpdateRegistry = &ConfigUpdateRegistry{
+ updaters: make(map[types.TaskType]ConfigUpdateFunc),
+ }
+ glog.V(1).Infof("Created global config update registry")
+ })
+ return globalConfigUpdateRegistry
+}
+
+// RegisterConfigUpdater registers a config update function for a task type
+func (r *ConfigUpdateRegistry) RegisterConfigUpdater(taskType types.TaskType, updateFunc ConfigUpdateFunc) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ r.updaters[taskType] = updateFunc
+ glog.V(1).Infof("Registered config updater for task type: %s", taskType)
+}
+
+// UpdateAllConfigs updates configurations for all registered task types
+func (r *ConfigUpdateRegistry) UpdateAllConfigs(configPersistence interface{}) {
+ r.mutex.RLock()
+ updaters := make(map[types.TaskType]ConfigUpdateFunc)
+ for k, v := range r.updaters {
+ updaters[k] = v
+ }
+ r.mutex.RUnlock()
+
+ for taskType, updateFunc := range updaters {
+ if err := updateFunc(configPersistence); err != nil {
+ glog.Warningf("Failed to load %s configuration from persistence: %v", taskType, err)
+ } else {
+ glog.V(1).Infof("Loaded %s configuration from persistence", taskType)
+ }
+ }
+
+ glog.V(1).Infof("All task configurations loaded from persistence")
+}
+
+// AutoRegisterConfigUpdater is a convenience function for registering config updaters
+func AutoRegisterConfigUpdater(taskType types.TaskType, updateFunc ConfigUpdateFunc) {
+ registry := GetGlobalConfigUpdateRegistry()
+ registry.RegisterConfigUpdater(taskType, updateFunc)
+}
diff --git a/weed/worker/tasks/erasure_coding/config.go b/weed/worker/tasks/erasure_coding/config.go
new file mode 100644
index 000000000..1f70fb8db
--- /dev/null
+++ b/weed/worker/tasks/erasure_coding/config.go
@@ -0,0 +1,207 @@
+package erasure_coding
+
+import (
+ "fmt"
+
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
+)
+
+// Config extends BaseConfig with erasure coding specific settings
+type Config struct {
+ base.BaseConfig
+ QuietForSeconds int `json:"quiet_for_seconds"`
+ FullnessRatio float64 `json:"fullness_ratio"`
+ CollectionFilter string `json:"collection_filter"`
+ MinSizeMB int `json:"min_size_mb"`
+}
+
+// NewDefaultConfig creates a new default erasure coding configuration
+func NewDefaultConfig() *Config {
+ return &Config{
+ BaseConfig: base.BaseConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 60 * 60, // 1 hour
+ MaxConcurrent: 1,
+ },
+ QuietForSeconds: 300, // 5 minutes
+ FullnessRatio: 0.8, // 80%
+ CollectionFilter: "",
+ MinSizeMB: 30, // 30MB (more reasonable than 100MB)
+ }
+}
+
+// GetConfigSpec returns the configuration schema for erasure coding tasks
+func GetConfigSpec() base.ConfigSpec {
+ return base.ConfigSpec{
+ Fields: []*config.Field{
+ {
+ Name: "enabled",
+ JSONName: "enabled",
+ Type: config.FieldTypeBool,
+ DefaultValue: true,
+ Required: false,
+ DisplayName: "Enable Erasure Coding Tasks",
+ Description: "Whether erasure coding tasks should be automatically created",
+ HelpText: "Toggle this to enable or disable automatic erasure coding task generation",
+ InputType: "checkbox",
+ CSSClasses: "form-check-input",
+ },
+ {
+ Name: "scan_interval_seconds",
+ JSONName: "scan_interval_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 60 * 60,
+ MinValue: 10 * 60,
+ MaxValue: 24 * 60 * 60,
+ Required: true,
+ DisplayName: "Scan Interval",
+ Description: "How often to scan for volumes needing erasure coding",
+ HelpText: "The system will check for volumes that need erasure coding at this interval",
+ Placeholder: "1",
+ Unit: config.UnitHours,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "max_concurrent",
+ JSONName: "max_concurrent",
+ Type: config.FieldTypeInt,
+ DefaultValue: 1,
+ MinValue: 1,
+ MaxValue: 5,
+ Required: true,
+ DisplayName: "Max Concurrent Tasks",
+ Description: "Maximum number of erasure coding tasks that can run simultaneously",
+ HelpText: "Limits the number of erasure coding operations running at the same time",
+ Placeholder: "1 (default)",
+ Unit: config.UnitCount,
+ InputType: "number",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "quiet_for_seconds",
+ JSONName: "quiet_for_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 300,
+ MinValue: 60,
+ MaxValue: 3600,
+ Required: true,
+ DisplayName: "Quiet Period",
+ Description: "Minimum time volume must be quiet before erasure coding",
+ HelpText: "Volume must not be modified for this duration before erasure coding",
+ Placeholder: "5",
+ Unit: config.UnitMinutes,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "fullness_ratio",
+ JSONName: "fullness_ratio",
+ Type: config.FieldTypeFloat,
+ DefaultValue: 0.8,
+ MinValue: 0.1,
+ MaxValue: 1.0,
+ Required: true,
+ DisplayName: "Fullness Ratio",
+ Description: "Minimum fullness ratio to trigger erasure coding",
+ HelpText: "Only volumes with this fullness ratio or higher will be erasure coded",
+ Placeholder: "0.80 (80%)",
+ Unit: config.UnitNone,
+ InputType: "number",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "collection_filter",
+ JSONName: "collection_filter",
+ Type: config.FieldTypeString,
+ DefaultValue: "",
+ Required: false,
+ DisplayName: "Collection Filter",
+ Description: "Only process volumes from specific collections",
+ HelpText: "Leave empty to process all collections, or specify collection name",
+ Placeholder: "my_collection",
+ InputType: "text",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "min_size_mb",
+ JSONName: "min_size_mb",
+ Type: config.FieldTypeInt,
+ DefaultValue: 30,
+ MinValue: 1,
+ MaxValue: 1000,
+ Required: true,
+ DisplayName: "Minimum Size (MB)",
+ Description: "Minimum volume size to consider for erasure coding",
+ HelpText: "Only volumes larger than this size will be considered for erasure coding",
+ Placeholder: "30",
+ Unit: config.UnitNone,
+ InputType: "number",
+ CSSClasses: "form-control",
+ },
+ },
+ }
+}
+
+// ToTaskPolicy converts configuration to a TaskPolicy protobuf message
+func (c *Config) ToTaskPolicy() *worker_pb.TaskPolicy {
+ return &worker_pb.TaskPolicy{
+ Enabled: c.Enabled,
+ MaxConcurrent: int32(c.MaxConcurrent),
+ RepeatIntervalSeconds: int32(c.ScanIntervalSeconds),
+ CheckIntervalSeconds: int32(c.ScanIntervalSeconds),
+ TaskConfig: &worker_pb.TaskPolicy_ErasureCodingConfig{
+ ErasureCodingConfig: &worker_pb.ErasureCodingTaskConfig{
+ FullnessRatio: float64(c.FullnessRatio),
+ QuietForSeconds: int32(c.QuietForSeconds),
+ MinVolumeSizeMb: int32(c.MinSizeMB),
+ CollectionFilter: c.CollectionFilter,
+ },
+ },
+ }
+}
+
+// FromTaskPolicy loads configuration from a TaskPolicy protobuf message
+func (c *Config) FromTaskPolicy(policy *worker_pb.TaskPolicy) error {
+ if policy == nil {
+ return fmt.Errorf("policy is nil")
+ }
+
+ // Set general TaskPolicy fields
+ c.Enabled = policy.Enabled
+ c.MaxConcurrent = int(policy.MaxConcurrent)
+ c.ScanIntervalSeconds = int(policy.RepeatIntervalSeconds) // Direct seconds-to-seconds mapping
+
+ // Set erasure coding-specific fields from the task config
+ if ecConfig := policy.GetErasureCodingConfig(); ecConfig != nil {
+ c.FullnessRatio = float64(ecConfig.FullnessRatio)
+ c.QuietForSeconds = int(ecConfig.QuietForSeconds)
+ c.MinSizeMB = int(ecConfig.MinVolumeSizeMb)
+ c.CollectionFilter = ecConfig.CollectionFilter
+ }
+
+ return nil
+}
+
+// LoadConfigFromPersistence loads configuration from the persistence layer if available
+func LoadConfigFromPersistence(configPersistence interface{}) *Config {
+ config := NewDefaultConfig()
+
+ // Try to load from persistence if available
+ if persistence, ok := configPersistence.(interface {
+ LoadErasureCodingTaskPolicy() (*worker_pb.TaskPolicy, error)
+ }); ok {
+ if policy, err := persistence.LoadErasureCodingTaskPolicy(); err == nil && policy != nil {
+ if err := config.FromTaskPolicy(policy); err == nil {
+ glog.V(1).Infof("Loaded erasure coding configuration from persistence")
+ return config
+ }
+ }
+ }
+
+ glog.V(1).Infof("Using default erasure coding configuration")
+ return config
+}
diff --git a/weed/worker/tasks/erasure_coding/detection.go b/weed/worker/tasks/erasure_coding/detection.go
new file mode 100644
index 000000000..1a2558396
--- /dev/null
+++ b/weed/worker/tasks/erasure_coding/detection.go
@@ -0,0 +1,140 @@
+package erasure_coding
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// Detection implements the detection logic for erasure coding tasks
+func Detection(metrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo, config base.TaskConfig) ([]*types.TaskDetectionResult, error) {
+ if !config.IsEnabled() {
+ return nil, nil
+ }
+
+ ecConfig := config.(*Config)
+ var results []*types.TaskDetectionResult
+ now := time.Now()
+ quietThreshold := time.Duration(ecConfig.QuietForSeconds) * time.Second
+ minSizeBytes := uint64(ecConfig.MinSizeMB) * 1024 * 1024 // Configurable minimum
+
+ debugCount := 0
+ skippedAlreadyEC := 0
+ skippedTooSmall := 0
+ skippedCollectionFilter := 0
+ skippedQuietTime := 0
+ skippedFullness := 0
+
+ for _, metric := range metrics {
+ // Skip if already EC volume
+ if metric.IsECVolume {
+ skippedAlreadyEC++
+ continue
+ }
+
+ // Check minimum size requirement
+ if metric.Size < minSizeBytes {
+ skippedTooSmall++
+ continue
+ }
+
+ // Check collection filter if specified
+ if ecConfig.CollectionFilter != "" {
+ // Parse comma-separated collections
+ allowedCollections := make(map[string]bool)
+ for _, collection := range strings.Split(ecConfig.CollectionFilter, ",") {
+ allowedCollections[strings.TrimSpace(collection)] = true
+ }
+ // Skip if volume's collection is not in the allowed list
+ if !allowedCollections[metric.Collection] {
+ skippedCollectionFilter++
+ continue
+ }
+ }
+
+ // Check quiet duration and fullness criteria
+ if metric.Age >= quietThreshold && metric.FullnessRatio >= ecConfig.FullnessRatio {
+ result := &types.TaskDetectionResult{
+ TaskType: types.TaskTypeErasureCoding,
+ VolumeID: metric.VolumeID,
+ Server: metric.Server,
+ Collection: metric.Collection,
+ Priority: types.TaskPriorityLow, // EC is not urgent
+ Reason: fmt.Sprintf("Volume meets EC criteria: quiet for %.1fs (>%ds), fullness=%.1f%% (>%.1f%%), size=%.1fMB (>100MB)",
+ metric.Age.Seconds(), ecConfig.QuietForSeconds, metric.FullnessRatio*100, ecConfig.FullnessRatio*100,
+ float64(metric.Size)/(1024*1024)),
+ ScheduleAt: now,
+ }
+ results = append(results, result)
+ } else {
+ // Count debug reasons
+ if debugCount < 5 { // Limit to avoid spam
+ if metric.Age < quietThreshold {
+ skippedQuietTime++
+ }
+ if metric.FullnessRatio < ecConfig.FullnessRatio {
+ skippedFullness++
+ }
+ }
+ debugCount++
+ }
+ }
+
+ // Log debug summary if no tasks were created
+ if len(results) == 0 && len(metrics) > 0 {
+ totalVolumes := len(metrics)
+ glog.V(1).Infof("EC detection: No tasks created for %d volumes (skipped: %d already EC, %d too small, %d filtered, %d not quiet, %d not full)",
+ totalVolumes, skippedAlreadyEC, skippedTooSmall, skippedCollectionFilter, skippedQuietTime, skippedFullness)
+
+ // Show details for first few volumes
+ for i, metric := range metrics {
+ if i >= 3 || metric.IsECVolume { // Limit to first 3 non-EC volumes
+ continue
+ }
+ sizeMB := float64(metric.Size) / (1024 * 1024)
+ glog.Infof("ERASURE CODING: Volume %d: size=%.1fMB (need ≥%dMB), age=%s (need ≥%s), fullness=%.1f%% (need ≥%.1f%%)",
+ metric.VolumeID, sizeMB, ecConfig.MinSizeMB, metric.Age.Truncate(time.Minute), quietThreshold.Truncate(time.Minute),
+ metric.FullnessRatio*100, ecConfig.FullnessRatio*100)
+ }
+ }
+
+ return results, nil
+}
+
+// Scheduling implements the scheduling logic for erasure coding tasks
+func Scheduling(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker, config base.TaskConfig) bool {
+ ecConfig := config.(*Config)
+
+ // Check if we have available workers
+ if len(availableWorkers) == 0 {
+ return false
+ }
+
+ // Count running EC tasks
+ runningCount := 0
+ for _, runningTask := range runningTasks {
+ if runningTask.Type == types.TaskTypeErasureCoding {
+ runningCount++
+ }
+ }
+
+ // Check concurrency limit
+ if runningCount >= ecConfig.MaxConcurrent {
+ return false
+ }
+
+ // Check if any worker can handle EC tasks
+ for _, worker := range availableWorkers {
+ for _, capability := range worker.Capabilities {
+ if capability == types.TaskTypeErasureCoding {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/weed/worker/tasks/erasure_coding/ec.go b/weed/worker/tasks/erasure_coding/ec.go
index 641dfc6b5..8dc7a1cd0 100644
--- a/weed/worker/tasks/erasure_coding/ec.go
+++ b/weed/worker/tasks/erasure_coding/ec.go
@@ -1,79 +1,785 @@
package erasure_coding
import (
+ "context"
"fmt"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/worker/tasks"
+ "github.com/seaweedfs/seaweedfs/weed/operation"
+ "github.com/seaweedfs/seaweedfs/weed/pb"
+ "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
+ "github.com/seaweedfs/seaweedfs/weed/storage/needle"
+ "github.com/seaweedfs/seaweedfs/weed/storage/volume_info"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
)
-// Task implements erasure coding operation to convert volumes to EC format
+// Task implements comprehensive erasure coding with protobuf parameters
type Task struct {
- *tasks.BaseTask
- server string
- volumeID uint32
+ *base.BaseTypedTask
+
+ // Current task state
+ sourceServer string
+ volumeID uint32
+ collection string
+ workDir string
+ masterClient string
+ grpcDialOpt grpc.DialOption
+
+ // EC parameters from protobuf
+ destinations []*worker_pb.ECDestination // Disk-aware destinations
+ existingShardLocations []*worker_pb.ExistingECShardLocation // Existing shards to cleanup
+ estimatedShardSize uint64
+ dataShards int
+ parityShards int
+ cleanupSource bool
+
+ // Progress tracking
+ currentStep string
+ stepProgress map[string]float64
}
-// NewTask creates a new erasure coding task instance
-func NewTask(server string, volumeID uint32) *Task {
+// NewTask creates a new erasure coding task
+func NewTask() types.TypedTaskInterface {
task := &Task{
- BaseTask: tasks.NewBaseTask(types.TaskTypeErasureCoding),
- server: server,
- volumeID: volumeID,
+ BaseTypedTask: base.NewBaseTypedTask(types.TaskTypeErasureCoding),
+ masterClient: "localhost:9333", // Default master client
+ workDir: "/tmp/seaweedfs_ec_work", // Default work directory
+ grpcDialOpt: grpc.WithTransportCredentials(insecure.NewCredentials()), // Default to insecure
+ dataShards: erasure_coding.DataShardsCount, // Use package constant
+ parityShards: erasure_coding.ParityShardsCount, // Use package constant
+ stepProgress: make(map[string]float64),
}
return task
}
-// Execute executes the erasure coding task
-func (t *Task) Execute(params types.TaskParams) error {
- glog.Infof("Starting erasure coding task for volume %d on server %s", t.volumeID, t.server)
+// ValidateTyped validates the typed parameters for EC task
+func (t *Task) ValidateTyped(params *worker_pb.TaskParams) error {
+ // Basic validation from base class
+ if err := t.BaseTypedTask.ValidateTyped(params); err != nil {
+ return err
+ }
+
+ // Check that we have EC-specific parameters
+ ecParams := params.GetErasureCodingParams()
+ if ecParams == nil {
+ return fmt.Errorf("erasure_coding_params is required for EC task")
+ }
+
+ // Require destinations
+ if len(ecParams.Destinations) == 0 {
+ return fmt.Errorf("destinations must be specified for EC task")
+ }
+
+ // DataShards and ParityShards are constants from erasure_coding package
+ expectedDataShards := int32(erasure_coding.DataShardsCount)
+ expectedParityShards := int32(erasure_coding.ParityShardsCount)
+
+ if ecParams.DataShards > 0 && ecParams.DataShards != expectedDataShards {
+ return fmt.Errorf("data_shards must be %d (fixed constant), got %d", expectedDataShards, ecParams.DataShards)
+ }
+ if ecParams.ParityShards > 0 && ecParams.ParityShards != expectedParityShards {
+ return fmt.Errorf("parity_shards must be %d (fixed constant), got %d", expectedParityShards, ecParams.ParityShards)
+ }
+
+ // Validate destination count
+ destinationCount := len(ecParams.Destinations)
+ totalShards := expectedDataShards + expectedParityShards
+ if totalShards > int32(destinationCount) {
+ return fmt.Errorf("insufficient destinations: need %d, have %d", totalShards, destinationCount)
+ }
+
+ return nil
+}
+
+// EstimateTimeTyped estimates the time needed for EC processing based on protobuf parameters
+func (t *Task) EstimateTimeTyped(params *worker_pb.TaskParams) time.Duration {
+ baseTime := 20 * time.Minute // Processing takes time due to comprehensive operations
+
+ ecParams := params.GetErasureCodingParams()
+ if ecParams != nil && ecParams.EstimatedShardSize > 0 {
+ // More accurate estimate based on shard size
+ // Account for copying, encoding, and distribution
+ gbSize := ecParams.EstimatedShardSize / (1024 * 1024 * 1024)
+ estimatedTime := time.Duration(gbSize*2) * time.Minute // 2 minutes per GB
+ if estimatedTime > baseTime {
+ return estimatedTime
+ }
+ }
+
+ return baseTime
+}
+
+// ExecuteTyped implements the actual erasure coding workflow with typed parameters
+func (t *Task) ExecuteTyped(params *worker_pb.TaskParams) error {
+ // Extract basic parameters
+ t.volumeID = params.VolumeId
+ t.sourceServer = params.Server
+ t.collection = params.Collection
- // Simulate erasure coding operation with progress updates
- steps := []struct {
- name string
- duration time.Duration
- progress float64
- }{
- {"Analyzing volume", 2 * time.Second, 15},
- {"Creating EC shards", 5 * time.Second, 50},
- {"Verifying shards", 2 * time.Second, 75},
- {"Finalizing EC volume", 1 * time.Second, 100},
+ // Extract EC-specific parameters
+ ecParams := params.GetErasureCodingParams()
+ if ecParams != nil {
+ t.destinations = ecParams.Destinations // Store disk-aware destinations
+ t.existingShardLocations = ecParams.ExistingShardLocations // Store existing shards for cleanup
+ t.estimatedShardSize = ecParams.EstimatedShardSize
+ t.cleanupSource = ecParams.CleanupSource
+
+ // DataShards and ParityShards are constants, don't override from parameters
+ // t.dataShards and t.parityShards are already set to constants in NewTask
+
+ if ecParams.WorkingDir != "" {
+ t.workDir = ecParams.WorkingDir
+ }
+ if ecParams.MasterClient != "" {
+ t.masterClient = ecParams.MasterClient
+ }
}
- for _, step := range steps {
- if t.IsCancelled() {
- return fmt.Errorf("erasure coding task cancelled")
+ // Determine available destinations for logging
+ var availableDestinations []string
+ for _, dest := range t.destinations {
+ availableDestinations = append(availableDestinations, fmt.Sprintf("%s(disk:%d)", dest.Node, dest.DiskId))
+ }
+
+ glog.V(1).Infof("Starting EC task for volume %d: %s -> %v (data:%d, parity:%d)",
+ t.volumeID, t.sourceServer, availableDestinations, t.dataShards, t.parityShards)
+
+ // Create unique working directory for this task
+ taskWorkDir := filepath.Join(t.workDir, fmt.Sprintf("vol_%d_%d", t.volumeID, time.Now().Unix()))
+ if err := os.MkdirAll(taskWorkDir, 0755); err != nil {
+ return fmt.Errorf("failed to create task working directory %s: %v", taskWorkDir, err)
+ }
+ glog.V(1).Infof("WORKFLOW: Created working directory: %s", taskWorkDir)
+
+ // Ensure cleanup of working directory
+ defer func() {
+ if err := os.RemoveAll(taskWorkDir); err != nil {
+ glog.Warningf("Failed to cleanup working directory %s: %v", taskWorkDir, err)
+ } else {
+ glog.V(1).Infof("WORKFLOW: Cleaned up working directory: %s", taskWorkDir)
}
+ }()
+
+ // Step 1: Collect volume locations from master
+ glog.V(1).Infof("WORKFLOW STEP 1: Collecting volume locations from master")
+ t.SetProgress(5.0)
+ volumeId := needle.VolumeId(t.volumeID)
+ volumeLocations, err := t.collectVolumeLocations(volumeId)
+ if err != nil {
+ return fmt.Errorf("failed to collect volume locations before EC encoding: %v", err)
+ }
+ glog.V(1).Infof("WORKFLOW: Found volume %d on %d servers: %v", t.volumeID, len(volumeLocations), volumeLocations)
- glog.V(1).Infof("Erasure coding task step: %s", step.name)
- t.SetProgress(step.progress)
+ // Convert ServerAddress slice to string slice
+ var locationStrings []string
+ for _, addr := range volumeLocations {
+ locationStrings = append(locationStrings, string(addr))
+ }
- // Simulate work
- time.Sleep(step.duration)
+ // Step 2: Check if volume has sufficient size for EC encoding
+ if !t.shouldPerformECEncoding(locationStrings) {
+ glog.Infof("Volume %d does not meet EC encoding criteria, skipping", t.volumeID)
+ t.SetProgress(100.0)
+ return nil
}
- glog.Infof("Erasure coding task completed for volume %d on server %s", t.volumeID, t.server)
+ // Step 2A: Cleanup existing EC shards if any
+ glog.V(1).Infof("WORKFLOW STEP 2A: Cleaning up existing EC shards for volume %d", t.volumeID)
+ t.SetProgress(10.0)
+ err = t.cleanupExistingEcShards()
+ if err != nil {
+ glog.Warningf("Failed to cleanup existing EC shards (continuing anyway): %v", err)
+ // Don't fail the task - this is just cleanup
+ }
+ glog.V(1).Infof("WORKFLOW: Existing EC shards cleanup completed for volume %d", t.volumeID)
+
+ // Step 3: Mark volume readonly on all servers
+ glog.V(1).Infof("WORKFLOW STEP 2B: Marking volume %d readonly on all replica servers", t.volumeID)
+ t.SetProgress(15.0)
+ err = t.markVolumeReadonlyOnAllReplicas(needle.VolumeId(t.volumeID), locationStrings)
+ if err != nil {
+ return fmt.Errorf("failed to mark volume readonly: %v", err)
+ }
+ glog.V(1).Infof("WORKFLOW: Volume %d marked readonly on all replicas", t.volumeID)
+
+ // Step 5: Copy volume files (.dat, .idx) to EC worker
+ glog.V(1).Infof("WORKFLOW STEP 3: Copying volume files from source server %s to EC worker", t.sourceServer)
+ t.SetProgress(25.0)
+ localVolumeFiles, err := t.copyVolumeFilesToWorker(taskWorkDir)
+ if err != nil {
+ return fmt.Errorf("failed to copy volume files to EC worker: %v", err)
+ }
+ glog.V(1).Infof("WORKFLOW: Volume files copied to EC worker: %v", localVolumeFiles)
+
+ // Step 6: Generate EC shards locally on EC worker
+ glog.V(1).Infof("WORKFLOW STEP 4: Generating EC shards locally on EC worker")
+ t.SetProgress(40.0)
+ localShardFiles, err := t.generateEcShardsLocally(localVolumeFiles, taskWorkDir)
+ if err != nil {
+ return fmt.Errorf("failed to generate EC shards locally: %v", err)
+ }
+ glog.V(1).Infof("WORKFLOW: EC shards generated locally: %d shard files", len(localShardFiles))
+
+ // Step 7: Distribute shards from EC worker to destination servers
+ glog.V(1).Infof("WORKFLOW STEP 5: Distributing EC shards from worker to destination servers")
+ t.SetProgress(60.0)
+ err = t.distributeEcShardsFromWorker(localShardFiles)
+ if err != nil {
+ return fmt.Errorf("failed to distribute EC shards from worker: %v", err)
+ }
+ glog.V(1).Infof("WORKFLOW: EC shards distributed to all destination servers")
+
+ // Step 8: Mount EC shards on destination servers
+ glog.V(1).Infof("WORKFLOW STEP 6: Mounting EC shards on destination servers")
+ t.SetProgress(80.0)
+ err = t.mountEcShardsOnDestinations()
+ if err != nil {
+ return fmt.Errorf("failed to mount EC shards: %v", err)
+ }
+ glog.V(1).Infof("WORKFLOW: EC shards mounted successfully")
+
+ // Step 9: Delete original volume from all locations
+ glog.V(1).Infof("WORKFLOW STEP 7: Deleting original volume %d from all replica servers", t.volumeID)
+ t.SetProgress(90.0)
+ err = t.deleteVolumeFromAllLocations(needle.VolumeId(t.volumeID), locationStrings)
+ if err != nil {
+ return fmt.Errorf("failed to delete original volume: %v", err)
+ }
+ glog.V(1).Infof("WORKFLOW: Original volume %d deleted from all locations", t.volumeID)
+
+ t.SetProgress(100.0)
+ glog.Infof("EC task completed successfully for volume %d", t.volumeID)
return nil
}
-// Validate validates the task parameters
-func (t *Task) Validate(params types.TaskParams) error {
- if params.VolumeID == 0 {
- return fmt.Errorf("volume_id is required")
+// collectVolumeLocations gets volume location from master (placeholder implementation)
+func (t *Task) collectVolumeLocations(volumeId needle.VolumeId) ([]pb.ServerAddress, error) {
+ // For now, return a placeholder implementation
+ // Full implementation would call master to get volume locations
+ return []pb.ServerAddress{pb.ServerAddress(t.sourceServer)}, nil
+}
+
+// cleanupExistingEcShards deletes existing EC shards using planned locations
+func (t *Task) cleanupExistingEcShards() error {
+ if len(t.existingShardLocations) == 0 {
+ glog.V(1).Infof("No existing EC shards to cleanup for volume %d", t.volumeID)
+ return nil
}
- if params.Server == "" {
- return fmt.Errorf("server is required")
+
+ glog.V(1).Infof("Cleaning up existing EC shards for volume %d on %d servers", t.volumeID, len(t.existingShardLocations))
+
+ // Delete existing shards from each location using planned shard locations
+ for _, location := range t.existingShardLocations {
+ if len(location.ShardIds) == 0 {
+ continue
+ }
+
+ glog.V(1).Infof("Deleting existing EC shards %v from %s for volume %d", location.ShardIds, location.Node, t.volumeID)
+
+ err := operation.WithVolumeServerClient(false, pb.ServerAddress(location.Node), t.grpcDialOpt,
+ func(client volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := client.VolumeEcShardsDelete(context.Background(), &volume_server_pb.VolumeEcShardsDeleteRequest{
+ VolumeId: t.volumeID,
+ Collection: t.collection,
+ ShardIds: location.ShardIds,
+ })
+ return deleteErr
+ })
+
+ if err != nil {
+ glog.Errorf("Failed to delete existing EC shards %v from %s for volume %d: %v", location.ShardIds, location.Node, t.volumeID, err)
+ // Continue with other servers - don't fail the entire cleanup
+ } else {
+ glog.V(1).Infof("Successfully deleted existing EC shards %v from %s for volume %d", location.ShardIds, location.Node, t.volumeID)
+ }
}
+
+ glog.V(1).Infof("Completed cleanup of existing EC shards for volume %d", t.volumeID)
return nil
}
-// EstimateTime estimates the time needed for the task
-func (t *Task) EstimateTime(params types.TaskParams) time.Duration {
- // Base time for erasure coding operation
- baseTime := 30 * time.Second
+// shouldPerformECEncoding checks if the volume meets criteria for EC encoding
+func (t *Task) shouldPerformECEncoding(volumeLocations []string) bool {
+ // For now, always proceed with EC encoding if volume exists
+ // This can be extended with volume size checks, etc.
+ return len(volumeLocations) > 0
+}
- // Could adjust based on volume size or other factors
- return baseTime
+// markVolumeReadonlyOnAllReplicas marks the volume as readonly on all replica servers
+func (t *Task) markVolumeReadonlyOnAllReplicas(volumeId needle.VolumeId, volumeLocations []string) error {
+ glog.V(1).Infof("Marking volume %d readonly on %d servers", volumeId, len(volumeLocations))
+
+ // Mark volume readonly on all replica servers
+ for _, location := range volumeLocations {
+ glog.V(1).Infof("Marking volume %d readonly on %s", volumeId, location)
+
+ err := operation.WithVolumeServerClient(false, pb.ServerAddress(location), t.grpcDialOpt,
+ func(client volume_server_pb.VolumeServerClient) error {
+ _, markErr := client.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{
+ VolumeId: uint32(volumeId),
+ })
+ return markErr
+ })
+
+ if err != nil {
+ glog.Errorf("Failed to mark volume %d readonly on %s: %v", volumeId, location, err)
+ return fmt.Errorf("failed to mark volume %d readonly on %s: %v", volumeId, location, err)
+ }
+
+ glog.V(1).Infof("Successfully marked volume %d readonly on %s", volumeId, location)
+ }
+
+ glog.V(1).Infof("Successfully marked volume %d readonly on all %d locations", volumeId, len(volumeLocations))
+ return nil
+}
+
+// copyVolumeFilesToWorker copies .dat and .idx files from source server to local worker
+func (t *Task) copyVolumeFilesToWorker(workDir string) (map[string]string, error) {
+ localFiles := make(map[string]string)
+
+ // Copy .dat file
+ datFile := fmt.Sprintf("%s.dat", filepath.Join(workDir, fmt.Sprintf("%d", t.volumeID)))
+ err := t.copyFileFromSource(".dat", datFile)
+ if err != nil {
+ return nil, fmt.Errorf("failed to copy .dat file: %v", err)
+ }
+ localFiles["dat"] = datFile
+ glog.V(1).Infof("Copied .dat file to: %s", datFile)
+
+ // Copy .idx file
+ idxFile := fmt.Sprintf("%s.idx", filepath.Join(workDir, fmt.Sprintf("%d", t.volumeID)))
+ err = t.copyFileFromSource(".idx", idxFile)
+ if err != nil {
+ return nil, fmt.Errorf("failed to copy .idx file: %v", err)
+ }
+ localFiles["idx"] = idxFile
+ glog.V(1).Infof("Copied .idx file to: %s", idxFile)
+
+ return localFiles, nil
+}
+
+// copyFileFromSource copies a file from source server to local path using gRPC streaming
+func (t *Task) copyFileFromSource(ext, localPath string) error {
+ return operation.WithVolumeServerClient(false, pb.ServerAddress(t.sourceServer), t.grpcDialOpt,
+ func(client volume_server_pb.VolumeServerClient) error {
+ stream, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
+ VolumeId: t.volumeID,
+ Collection: t.collection,
+ Ext: ext,
+ StopOffset: uint64(math.MaxInt64),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to initiate file copy: %v", err)
+ }
+
+ // Create local file
+ localFile, err := os.Create(localPath)
+ if err != nil {
+ return fmt.Errorf("failed to create local file %s: %v", localPath, err)
+ }
+ defer localFile.Close()
+
+ // Stream data and write to local file
+ totalBytes := int64(0)
+ for {
+ resp, err := stream.Recv()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("failed to receive file data: %v", err)
+ }
+
+ if len(resp.FileContent) > 0 {
+ written, writeErr := localFile.Write(resp.FileContent)
+ if writeErr != nil {
+ return fmt.Errorf("failed to write to local file: %v", writeErr)
+ }
+ totalBytes += int64(written)
+ }
+ }
+
+ glog.V(1).Infof("Successfully copied %s (%d bytes) from %s to %s", ext, totalBytes, t.sourceServer, localPath)
+ return nil
+ })
+}
+
+// generateEcShardsLocally generates EC shards from local volume files
+func (t *Task) generateEcShardsLocally(localFiles map[string]string, workDir string) (map[string]string, error) {
+ datFile := localFiles["dat"]
+ idxFile := localFiles["idx"]
+
+ if datFile == "" || idxFile == "" {
+ return nil, fmt.Errorf("missing required volume files: dat=%s, idx=%s", datFile, idxFile)
+ }
+
+ // Get base name without extension for EC operations
+ baseName := strings.TrimSuffix(datFile, ".dat")
+
+ shardFiles := make(map[string]string)
+
+ glog.V(1).Infof("Generating EC shards from local files: dat=%s, idx=%s", datFile, idxFile)
+
+ // Generate EC shard files (.ec00 ~ .ec13)
+ if err := erasure_coding.WriteEcFiles(baseName); err != nil {
+ return nil, fmt.Errorf("failed to generate EC shard files: %v", err)
+ }
+
+ // Generate .ecx file from .idx
+ if err := erasure_coding.WriteSortedFileFromIdx(idxFile, ".ecx"); err != nil {
+ return nil, fmt.Errorf("failed to generate .ecx file: %v", err)
+ }
+
+ // Collect generated shard file paths
+ for i := 0; i < erasure_coding.TotalShardsCount; i++ {
+ shardFile := fmt.Sprintf("%s.ec%02d", baseName, i)
+ if _, err := os.Stat(shardFile); err == nil {
+ shardFiles[fmt.Sprintf("ec%02d", i)] = shardFile
+ }
+ }
+
+ // Add metadata files
+ ecxFile := idxFile + ".ecx"
+ if _, err := os.Stat(ecxFile); err == nil {
+ shardFiles["ecx"] = ecxFile
+ }
+
+ // Generate .vif file (volume info)
+ vifFile := baseName + ".vif"
+ // Create basic volume info - in a real implementation, this would come from the original volume
+ volumeInfo := &volume_server_pb.VolumeInfo{
+ Version: uint32(needle.GetCurrentVersion()),
+ }
+ if err := volume_info.SaveVolumeInfo(vifFile, volumeInfo); err != nil {
+ glog.Warningf("Failed to create .vif file: %v", err)
+ } else {
+ shardFiles["vif"] = vifFile
+ }
+
+ glog.V(1).Infof("Generated %d EC files locally", len(shardFiles))
+ return shardFiles, nil
+}
+
+func (t *Task) copyEcShardsToDestinations() error {
+ if len(t.destinations) == 0 {
+ return fmt.Errorf("no destinations specified for EC shard distribution")
+ }
+
+ destinations := t.destinations
+
+ glog.V(1).Infof("Copying EC shards for volume %d to %d destinations", t.volumeID, len(destinations))
+
+ // Prepare shard IDs (0-13 for EC shards)
+ var shardIds []uint32
+ for i := 0; i < erasure_coding.TotalShardsCount; i++ {
+ shardIds = append(shardIds, uint32(i))
+ }
+
+ // Distribute shards across destinations
+ var wg sync.WaitGroup
+ errorChan := make(chan error, len(destinations))
+
+ // Track which disks have already received metadata files (server+disk)
+ metadataFilesCopied := make(map[string]bool)
+ var metadataMutex sync.Mutex
+
+ // For each destination, copy a subset of shards
+ shardsPerDest := len(shardIds) / len(destinations)
+ remainder := len(shardIds) % len(destinations)
+
+ shardOffset := 0
+ for i, dest := range destinations {
+ wg.Add(1)
+
+ shardsForThisDest := shardsPerDest
+ if i < remainder {
+ shardsForThisDest++ // Distribute remainder shards
+ }
+
+ destShardIds := shardIds[shardOffset : shardOffset+shardsForThisDest]
+ shardOffset += shardsForThisDest
+
+ go func(destination *worker_pb.ECDestination, targetShardIds []uint32) {
+ defer wg.Done()
+
+ if t.IsCancelled() {
+ errorChan <- fmt.Errorf("task cancelled during shard copy")
+ return
+ }
+
+ // Create disk-specific metadata key (server+disk)
+ diskKey := fmt.Sprintf("%s:%d", destination.Node, destination.DiskId)
+
+ glog.V(1).Infof("Copying shards %v from %s to %s (disk %d)",
+ targetShardIds, t.sourceServer, destination.Node, destination.DiskId)
+
+ // Check if this disk needs metadata files (only once per disk)
+ metadataMutex.Lock()
+ needsMetadataFiles := !metadataFilesCopied[diskKey]
+ if needsMetadataFiles {
+ metadataFilesCopied[diskKey] = true
+ }
+ metadataMutex.Unlock()
+
+ err := operation.WithVolumeServerClient(false, pb.ServerAddress(destination.Node), t.grpcDialOpt,
+ func(client volume_server_pb.VolumeServerClient) error {
+ _, copyErr := client.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
+ VolumeId: uint32(t.volumeID),
+ Collection: t.collection,
+ ShardIds: targetShardIds,
+ CopyEcxFile: needsMetadataFiles, // Copy .ecx only once per disk
+ CopyEcjFile: needsMetadataFiles, // Copy .ecj only once per disk
+ CopyVifFile: needsMetadataFiles, // Copy .vif only once per disk
+ SourceDataNode: t.sourceServer,
+ DiskId: destination.DiskId, // Pass target disk ID
+ })
+ return copyErr
+ })
+
+ if err != nil {
+ errorChan <- fmt.Errorf("failed to copy shards to %s disk %d: %v", destination.Node, destination.DiskId, err)
+ return
+ }
+
+ if needsMetadataFiles {
+ glog.V(1).Infof("Successfully copied shards %v and metadata files (.ecx, .ecj, .vif) to %s disk %d",
+ targetShardIds, destination.Node, destination.DiskId)
+ } else {
+ glog.V(1).Infof("Successfully copied shards %v to %s disk %d (metadata files already present)",
+ targetShardIds, destination.Node, destination.DiskId)
+ }
+ }(dest, destShardIds)
+ }
+
+ wg.Wait()
+ close(errorChan)
+
+ // Check for any copy errors
+ if err := <-errorChan; err != nil {
+ return err
+ }
+
+ glog.V(1).Infof("Successfully copied all EC shards for volume %d", t.volumeID)
+ return nil
+}
+
+// distributeEcShardsFromWorker distributes locally generated EC shards to destination servers
+func (t *Task) distributeEcShardsFromWorker(localShardFiles map[string]string) error {
+ if len(t.destinations) == 0 {
+ return fmt.Errorf("no destinations specified for EC shard distribution")
+ }
+
+ destinations := t.destinations
+
+ glog.V(1).Infof("Distributing EC shards for volume %d from worker to %d destinations", t.volumeID, len(destinations))
+
+ // Prepare shard IDs (0-13 for EC shards)
+ var shardIds []uint32
+ for i := 0; i < erasure_coding.TotalShardsCount; i++ {
+ shardIds = append(shardIds, uint32(i))
+ }
+
+ // Distribute shards across destinations
+ var wg sync.WaitGroup
+ errorChan := make(chan error, len(destinations))
+
+ // Track which disks have already received metadata files (server+disk)
+ metadataFilesCopied := make(map[string]bool)
+ var metadataMutex sync.Mutex
+
+ // For each destination, send a subset of shards
+ shardsPerDest := len(shardIds) / len(destinations)
+ remainder := len(shardIds) % len(destinations)
+
+ shardOffset := 0
+ for i, dest := range destinations {
+ wg.Add(1)
+
+ shardsForThisDest := shardsPerDest
+ if i < remainder {
+ shardsForThisDest++ // Distribute remainder shards
+ }
+
+ destShardIds := shardIds[shardOffset : shardOffset+shardsForThisDest]
+ shardOffset += shardsForThisDest
+
+ go func(destination *worker_pb.ECDestination, targetShardIds []uint32) {
+ defer wg.Done()
+
+ if t.IsCancelled() {
+ errorChan <- fmt.Errorf("task cancelled during shard distribution")
+ return
+ }
+
+ // Create disk-specific metadata key (server+disk)
+ diskKey := fmt.Sprintf("%s:%d", destination.Node, destination.DiskId)
+
+ glog.V(1).Infof("Distributing shards %v from worker to %s (disk %d)",
+ targetShardIds, destination.Node, destination.DiskId)
+
+ // Check if this disk needs metadata files (only once per disk)
+ metadataMutex.Lock()
+ needsMetadataFiles := !metadataFilesCopied[diskKey]
+ if needsMetadataFiles {
+ metadataFilesCopied[diskKey] = true
+ }
+ metadataMutex.Unlock()
+
+ // Send shard files to destination using HTTP upload (simplified for now)
+ err := t.sendShardsToDestination(destination, targetShardIds, localShardFiles, needsMetadataFiles)
+ if err != nil {
+ errorChan <- fmt.Errorf("failed to send shards to %s disk %d: %v", destination.Node, destination.DiskId, err)
+ return
+ }
+
+ if needsMetadataFiles {
+ glog.V(1).Infof("Successfully distributed shards %v and metadata files (.ecx, .vif) to %s disk %d",
+ targetShardIds, destination.Node, destination.DiskId)
+ } else {
+ glog.V(1).Infof("Successfully distributed shards %v to %s disk %d (metadata files already present)",
+ targetShardIds, destination.Node, destination.DiskId)
+ }
+ }(dest, destShardIds)
+ }
+
+ wg.Wait()
+ close(errorChan)
+
+ // Check for any distribution errors
+ if err := <-errorChan; err != nil {
+ return err
+ }
+
+ glog.V(1).Infof("Completed distributing EC shards for volume %d", t.volumeID)
+ return nil
+}
+
+// sendShardsToDestination sends specific shard files from worker to a destination server (simplified)
+func (t *Task) sendShardsToDestination(destination *worker_pb.ECDestination, shardIds []uint32, localFiles map[string]string, includeMetadata bool) error {
+ // For now, use a simplified approach - just upload the files
+ // In a full implementation, this would use proper file upload mechanisms
+ glog.V(2).Infof("Would send shards %v and metadata=%v to %s disk %d", shardIds, includeMetadata, destination.Node, destination.DiskId)
+
+ // TODO: Implement actual file upload to volume server
+ // This is a placeholder - actual implementation would:
+ // 1. Open each shard file locally
+ // 2. Upload via HTTP POST or gRPC stream to destination volume server
+ // 3. Volume server would save to the specified disk_id
+
+ return nil
+}
+
+// mountEcShardsOnDestinations mounts EC shards on all destination servers
+func (t *Task) mountEcShardsOnDestinations() error {
+ if len(t.destinations) == 0 {
+ return fmt.Errorf("no destinations specified for mounting EC shards")
+ }
+
+ destinations := t.destinations
+
+ glog.V(1).Infof("Mounting EC shards for volume %d on %d destinations", t.volumeID, len(destinations))
+
+ // Prepare all shard IDs (0-13)
+ var allShardIds []uint32
+ for i := 0; i < erasure_coding.TotalShardsCount; i++ {
+ allShardIds = append(allShardIds, uint32(i))
+ }
+
+ var wg sync.WaitGroup
+ errorChan := make(chan error, len(destinations))
+
+ // Mount shards on each destination server
+ for _, dest := range destinations {
+ wg.Add(1)
+
+ go func(destination *worker_pb.ECDestination) {
+ defer wg.Done()
+
+ if t.IsCancelled() {
+ errorChan <- fmt.Errorf("task cancelled during shard mounting")
+ return
+ }
+
+ glog.V(1).Infof("Mounting EC shards on %s disk %d", destination.Node, destination.DiskId)
+
+ err := operation.WithVolumeServerClient(false, pb.ServerAddress(destination.Node), t.grpcDialOpt,
+ func(client volume_server_pb.VolumeServerClient) error {
+ _, mountErr := client.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{
+ VolumeId: uint32(t.volumeID),
+ Collection: t.collection,
+ ShardIds: allShardIds, // Mount all available shards on each server
+ })
+ return mountErr
+ })
+
+ if err != nil {
+ // It's normal for some servers to not have all shards, so log as warning rather than error
+ glog.Warningf("Failed to mount some shards on %s disk %d (this may be normal): %v", destination.Node, destination.DiskId, err)
+ } else {
+ glog.V(1).Infof("Successfully mounted EC shards on %s disk %d", destination.Node, destination.DiskId)
+ }
+ }(dest)
+ }
+
+ wg.Wait()
+ close(errorChan)
+
+ // Check for any critical mounting errors
+ select {
+ case err := <-errorChan:
+ if err != nil {
+ glog.Warningf("Some shard mounting issues occurred: %v", err)
+ }
+ default:
+ // No errors
+ }
+
+ glog.V(1).Infof("Completed mounting EC shards for volume %d", t.volumeID)
+ return nil
+}
+
+// deleteVolumeFromAllLocations deletes the original volume from all replica servers
+func (t *Task) deleteVolumeFromAllLocations(volumeId needle.VolumeId, volumeLocations []string) error {
+ glog.V(1).Infof("Deleting original volume %d from %d locations", volumeId, len(volumeLocations))
+
+ for _, location := range volumeLocations {
+ glog.V(1).Infof("Deleting volume %d from %s", volumeId, location)
+
+ err := operation.WithVolumeServerClient(false, pb.ServerAddress(location), t.grpcDialOpt,
+ func(client volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := client.VolumeDelete(context.Background(), &volume_server_pb.VolumeDeleteRequest{
+ VolumeId: uint32(volumeId),
+ OnlyEmpty: false, // Force delete even if not empty since we've already created EC shards
+ })
+ return deleteErr
+ })
+
+ if err != nil {
+ glog.Errorf("Failed to delete volume %d from %s: %v", volumeId, location, err)
+ return fmt.Errorf("failed to delete volume %d from %s: %v", volumeId, location, err)
+ }
+
+ glog.V(1).Infof("Successfully deleted volume %d from %s", volumeId, location)
+ }
+
+ glog.V(1).Infof("Successfully deleted volume %d from all %d locations", volumeId, len(volumeLocations))
+ return nil
+}
+
+// Register the task in the global registry
+func init() {
+ types.RegisterGlobalTypedTask(types.TaskTypeErasureCoding, NewTask)
+ glog.V(1).Infof("Registered EC task")
}
diff --git a/weed/worker/tasks/erasure_coding/ec_detector.go b/weed/worker/tasks/erasure_coding/ec_detector.go
deleted file mode 100644
index 0f8b5e376..000000000
--- a/weed/worker/tasks/erasure_coding/ec_detector.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package erasure_coding
-
-import (
- "time"
-
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/worker/types"
-)
-
-// EcDetector implements erasure coding task detection
-type EcDetector struct {
- enabled bool
- volumeAgeHours int
- fullnessRatio float64
- scanInterval time.Duration
-}
-
-// Compile-time interface assertions
-var (
- _ types.TaskDetector = (*EcDetector)(nil)
-)
-
-// NewEcDetector creates a new erasure coding detector
-func NewEcDetector() *EcDetector {
- return &EcDetector{
- enabled: false, // Conservative default
- volumeAgeHours: 24 * 7, // 1 week
- fullnessRatio: 0.9, // 90% full
- scanInterval: 2 * time.Hour,
- }
-}
-
-// GetTaskType returns the task type
-func (d *EcDetector) GetTaskType() types.TaskType {
- return types.TaskTypeErasureCoding
-}
-
-// ScanForTasks scans for volumes that should be converted to erasure coding
-func (d *EcDetector) ScanForTasks(volumeMetrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo) ([]*types.TaskDetectionResult, error) {
- if !d.enabled {
- return nil, nil
- }
-
- var results []*types.TaskDetectionResult
- now := time.Now()
- ageThreshold := time.Duration(d.volumeAgeHours) * time.Hour
-
- for _, metric := range volumeMetrics {
- // Skip if already EC volume
- if metric.IsECVolume {
- continue
- }
-
- // Check age and fullness criteria
- if metric.Age >= ageThreshold && metric.FullnessRatio >= d.fullnessRatio {
- // Check if volume is read-only (safe for EC conversion)
- if !metric.IsReadOnly {
- continue
- }
-
- result := &types.TaskDetectionResult{
- TaskType: types.TaskTypeErasureCoding,
- VolumeID: metric.VolumeID,
- Server: metric.Server,
- Collection: metric.Collection,
- Priority: types.TaskPriorityLow, // EC is not urgent
- Reason: "Volume is old and full enough for EC conversion",
- Parameters: map[string]interface{}{
- "age_hours": int(metric.Age.Hours()),
- "fullness_ratio": metric.FullnessRatio,
- },
- ScheduleAt: now,
- }
- results = append(results, result)
- }
- }
-
- glog.V(2).Infof("EC detector found %d tasks to schedule", len(results))
- return results, nil
-}
-
-// ScanInterval returns how often this task type should be scanned
-func (d *EcDetector) ScanInterval() time.Duration {
- return d.scanInterval
-}
-
-// IsEnabled returns whether this task type is enabled
-func (d *EcDetector) IsEnabled() bool {
- return d.enabled
-}
-
-// Configuration setters
-
-func (d *EcDetector) SetEnabled(enabled bool) {
- d.enabled = enabled
-}
-
-func (d *EcDetector) SetVolumeAgeHours(hours int) {
- d.volumeAgeHours = hours
-}
-
-func (d *EcDetector) SetFullnessRatio(ratio float64) {
- d.fullnessRatio = ratio
-}
-
-func (d *EcDetector) SetScanInterval(interval time.Duration) {
- d.scanInterval = interval
-}
-
-// GetVolumeAgeHours returns the current volume age threshold in hours
-func (d *EcDetector) GetVolumeAgeHours() int {
- return d.volumeAgeHours
-}
-
-// GetFullnessRatio returns the current fullness ratio threshold
-func (d *EcDetector) GetFullnessRatio() float64 {
- return d.fullnessRatio
-}
-
-// GetScanInterval returns the scan interval
-func (d *EcDetector) GetScanInterval() time.Duration {
- return d.scanInterval
-}
-
-// ConfigureFromPolicy configures the detector based on the maintenance policy
-func (d *EcDetector) ConfigureFromPolicy(policy interface{}) {
- // Type assert to the maintenance policy type we expect
- if maintenancePolicy, ok := policy.(interface {
- GetECEnabled() bool
- GetECVolumeAgeHours() int
- GetECFullnessRatio() float64
- }); ok {
- d.SetEnabled(maintenancePolicy.GetECEnabled())
- d.SetVolumeAgeHours(maintenancePolicy.GetECVolumeAgeHours())
- d.SetFullnessRatio(maintenancePolicy.GetECFullnessRatio())
- } else {
- glog.V(1).Infof("Could not configure EC detector from policy: unsupported policy type")
- }
-}
diff --git a/weed/worker/tasks/erasure_coding/ec_register.go b/weed/worker/tasks/erasure_coding/ec_register.go
index 6c4b5bf7f..62cfe6b56 100644
--- a/weed/worker/tasks/erasure_coding/ec_register.go
+++ b/weed/worker/tasks/erasure_coding/ec_register.go
@@ -2,80 +2,71 @@ package erasure_coding
import (
"fmt"
+ "time"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
-// Factory creates erasure coding task instances
-type Factory struct {
- *tasks.BaseTaskFactory
-}
+// Global variable to hold the task definition for configuration updates
+var globalTaskDef *base.TaskDefinition
-// NewFactory creates a new erasure coding task factory
-func NewFactory() *Factory {
- return &Factory{
- BaseTaskFactory: tasks.NewBaseTaskFactory(
- types.TaskTypeErasureCoding,
- []string{"erasure_coding", "storage", "durability"},
- "Convert volumes to erasure coded format for improved durability",
- ),
- }
-}
-
-// Create creates a new erasure coding task instance
-func (f *Factory) Create(params types.TaskParams) (types.TaskInterface, error) {
- // Validate parameters
- if params.VolumeID == 0 {
- return nil, fmt.Errorf("volume_id is required")
- }
- if params.Server == "" {
- return nil, fmt.Errorf("server is required")
- }
-
- task := NewTask(params.Server, params.VolumeID)
- task.SetEstimatedDuration(task.EstimateTime(params))
+// Auto-register this task when the package is imported
+func init() {
+ RegisterErasureCodingTask()
- return task, nil
+ // Register config updater
+ tasks.AutoRegisterConfigUpdater(types.TaskTypeErasureCoding, UpdateConfigFromPersistence)
}
-// Shared detector and scheduler instances
-var (
- sharedDetector *EcDetector
- sharedScheduler *Scheduler
-)
+// RegisterErasureCodingTask registers the erasure coding task with the new architecture
+func RegisterErasureCodingTask() {
+ // Create configuration instance
+ config := NewDefaultConfig()
-// getSharedInstances returns the shared detector and scheduler instances
-func getSharedInstances() (*EcDetector, *Scheduler) {
- if sharedDetector == nil {
- sharedDetector = NewEcDetector()
- }
- if sharedScheduler == nil {
- sharedScheduler = NewScheduler()
+ // Create complete task definition
+ taskDef := &base.TaskDefinition{
+ Type: types.TaskTypeErasureCoding,
+ Name: "erasure_coding",
+ DisplayName: "Erasure Coding",
+ Description: "Applies erasure coding to volumes for data protection",
+ Icon: "fas fa-shield-alt text-success",
+ Capabilities: []string{"erasure_coding", "data_protection"},
+
+ Config: config,
+ ConfigSpec: GetConfigSpec(),
+ CreateTask: nil, // Uses typed task system - see init() in ec.go
+ DetectionFunc: Detection,
+ ScanInterval: 1 * time.Hour,
+ SchedulingFunc: Scheduling,
+ MaxConcurrent: 1,
+ RepeatInterval: 24 * time.Hour,
}
- return sharedDetector, sharedScheduler
-}
-// GetSharedInstances returns the shared detector and scheduler instances (public access)
-func GetSharedInstances() (*EcDetector, *Scheduler) {
- return getSharedInstances()
+ // Store task definition globally for configuration updates
+ globalTaskDef = taskDef
+
+ // Register everything with a single function call!
+ base.RegisterTask(taskDef)
}
-// Auto-register this task when the package is imported
-func init() {
- factory := NewFactory()
- tasks.AutoRegister(types.TaskTypeErasureCoding, factory)
+// UpdateConfigFromPersistence updates the erasure coding configuration from persistence
+func UpdateConfigFromPersistence(configPersistence interface{}) error {
+ if globalTaskDef == nil {
+ return fmt.Errorf("erasure coding task not registered")
+ }
- // Get shared instances for all registrations
- detector, scheduler := getSharedInstances()
+ // Load configuration from persistence
+ newConfig := LoadConfigFromPersistence(configPersistence)
+ if newConfig == nil {
+ return fmt.Errorf("failed to load configuration from persistence")
+ }
- // Register with types registry
- tasks.AutoRegisterTypes(func(registry *types.TaskRegistry) {
- registry.RegisterTask(detector, scheduler)
- })
+ // Update the task definition's config
+ globalTaskDef.Config = newConfig
- // Register with UI registry using the same instances
- tasks.AutoRegisterUI(func(uiRegistry *types.UIRegistry) {
- RegisterUI(uiRegistry, detector, scheduler)
- })
+ glog.V(1).Infof("Updated erasure coding task configuration from persistence")
+ return nil
}
diff --git a/weed/worker/tasks/erasure_coding/ec_scheduler.go b/weed/worker/tasks/erasure_coding/ec_scheduler.go
deleted file mode 100644
index b2366bb06..000000000
--- a/weed/worker/tasks/erasure_coding/ec_scheduler.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package erasure_coding
-
-import (
- "time"
-
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/worker/types"
-)
-
-// Scheduler implements erasure coding task scheduling
-type Scheduler struct {
- maxConcurrent int
- enabled bool
-}
-
-// NewScheduler creates a new erasure coding scheduler
-func NewScheduler() *Scheduler {
- return &Scheduler{
- maxConcurrent: 1, // Conservative default
- enabled: false, // Conservative default
- }
-}
-
-// GetTaskType returns the task type
-func (s *Scheduler) GetTaskType() types.TaskType {
- return types.TaskTypeErasureCoding
-}
-
-// CanScheduleNow determines if an erasure coding task can be scheduled now
-func (s *Scheduler) CanScheduleNow(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker) bool {
- if !s.enabled {
- return false
- }
-
- // Check if we have available workers
- if len(availableWorkers) == 0 {
- return false
- }
-
- // Count running EC tasks
- runningCount := 0
- for _, runningTask := range runningTasks {
- if runningTask.Type == types.TaskTypeErasureCoding {
- runningCount++
- }
- }
-
- // Check concurrency limit
- if runningCount >= s.maxConcurrent {
- glog.V(3).Infof("EC scheduler: at concurrency limit (%d/%d)", runningCount, s.maxConcurrent)
- return false
- }
-
- // Check if any worker can handle EC tasks
- for _, worker := range availableWorkers {
- for _, capability := range worker.Capabilities {
- if capability == types.TaskTypeErasureCoding {
- glog.V(3).Infof("EC scheduler: can schedule task for volume %d", task.VolumeID)
- return true
- }
- }
- }
-
- return false
-}
-
-// GetMaxConcurrent returns the maximum number of concurrent tasks
-func (s *Scheduler) GetMaxConcurrent() int {
- return s.maxConcurrent
-}
-
-// GetDefaultRepeatInterval returns the default interval to wait before repeating EC tasks
-func (s *Scheduler) GetDefaultRepeatInterval() time.Duration {
- return 24 * time.Hour // Don't repeat EC for 24 hours
-}
-
-// GetPriority returns the priority for this task
-func (s *Scheduler) GetPriority(task *types.Task) types.TaskPriority {
- return types.TaskPriorityLow // EC is not urgent
-}
-
-// WasTaskRecentlyCompleted checks if a similar task was recently completed
-func (s *Scheduler) WasTaskRecentlyCompleted(task *types.Task, completedTasks []*types.Task, now time.Time) bool {
- // Don't repeat EC for 24 hours
- interval := 24 * time.Hour
- cutoff := now.Add(-interval)
-
- for _, completedTask := range completedTasks {
- if completedTask.Type == types.TaskTypeErasureCoding &&
- completedTask.VolumeID == task.VolumeID &&
- completedTask.Server == task.Server &&
- completedTask.Status == types.TaskStatusCompleted &&
- completedTask.CompletedAt != nil &&
- completedTask.CompletedAt.After(cutoff) {
- return true
- }
- }
- return false
-}
-
-// IsEnabled returns whether this task type is enabled
-func (s *Scheduler) IsEnabled() bool {
- return s.enabled
-}
-
-// Configuration setters
-
-func (s *Scheduler) SetEnabled(enabled bool) {
- s.enabled = enabled
-}
-
-func (s *Scheduler) SetMaxConcurrent(max int) {
- s.maxConcurrent = max
-}
diff --git a/weed/worker/tasks/erasure_coding/ui.go b/weed/worker/tasks/erasure_coding/ui.go
deleted file mode 100644
index e17cba89a..000000000
--- a/weed/worker/tasks/erasure_coding/ui.go
+++ /dev/null
@@ -1,309 +0,0 @@
-package erasure_coding
-
-import (
- "fmt"
- "html/template"
- "strconv"
- "time"
-
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/worker/types"
-)
-
-// UIProvider provides the UI for erasure coding task configuration
-type UIProvider struct {
- detector *EcDetector
- scheduler *Scheduler
-}
-
-// NewUIProvider creates a new erasure coding UI provider
-func NewUIProvider(detector *EcDetector, scheduler *Scheduler) *UIProvider {
- return &UIProvider{
- detector: detector,
- scheduler: scheduler,
- }
-}
-
-// GetTaskType returns the task type
-func (ui *UIProvider) GetTaskType() types.TaskType {
- return types.TaskTypeErasureCoding
-}
-
-// GetDisplayName returns the human-readable name
-func (ui *UIProvider) GetDisplayName() string {
- return "Erasure Coding"
-}
-
-// GetDescription returns a description of what this task does
-func (ui *UIProvider) GetDescription() string {
- return "Converts volumes to erasure coded format for improved data durability and fault tolerance"
-}
-
-// GetIcon returns the icon CSS class for this task type
-func (ui *UIProvider) GetIcon() string {
- return "fas fa-shield-alt text-info"
-}
-
-// ErasureCodingConfig represents the erasure coding configuration
-type ErasureCodingConfig struct {
- Enabled bool `json:"enabled"`
- VolumeAgeHoursSeconds int `json:"volume_age_hours_seconds"`
- FullnessRatio float64 `json:"fullness_ratio"`
- ScanIntervalSeconds int `json:"scan_interval_seconds"`
- MaxConcurrent int `json:"max_concurrent"`
- ShardCount int `json:"shard_count"`
- ParityCount int `json:"parity_count"`
- CollectionFilter string `json:"collection_filter"`
-}
-
-// Helper functions for duration conversion
-func secondsToDuration(seconds int) time.Duration {
- return time.Duration(seconds) * time.Second
-}
-
-func durationToSeconds(d time.Duration) int {
- return int(d.Seconds())
-}
-
-// formatDurationForUser formats seconds as a user-friendly duration string
-func formatDurationForUser(seconds int) string {
- d := secondsToDuration(seconds)
- if d < time.Minute {
- return fmt.Sprintf("%ds", seconds)
- }
- if d < time.Hour {
- return fmt.Sprintf("%.0fm", d.Minutes())
- }
- if d < 24*time.Hour {
- return fmt.Sprintf("%.1fh", d.Hours())
- }
- return fmt.Sprintf("%.1fd", d.Hours()/24)
-}
-
-// RenderConfigForm renders the configuration form HTML
-func (ui *UIProvider) RenderConfigForm(currentConfig interface{}) (template.HTML, error) {
- config := ui.getCurrentECConfig()
-
- // Build form using the FormBuilder helper
- form := types.NewFormBuilder()
-
- // Detection Settings
- form.AddCheckboxField(
- "enabled",
- "Enable Erasure Coding Tasks",
- "Whether erasure coding tasks should be automatically created",
- config.Enabled,
- )
-
- form.AddNumberField(
- "volume_age_hours_seconds",
- "Volume Age Threshold",
- "Only apply erasure coding to volumes older than this duration",
- float64(config.VolumeAgeHoursSeconds),
- true,
- )
-
- form.AddNumberField(
- "scan_interval_seconds",
- "Scan Interval",
- "How often to scan for volumes needing erasure coding",
- float64(config.ScanIntervalSeconds),
- true,
- )
-
- // Scheduling Settings
- form.AddNumberField(
- "max_concurrent",
- "Max Concurrent Tasks",
- "Maximum number of erasure coding tasks that can run simultaneously",
- float64(config.MaxConcurrent),
- true,
- )
-
- // Erasure Coding Parameters
- form.AddNumberField(
- "shard_count",
- "Data Shards",
- "Number of data shards for erasure coding (recommended: 10)",
- float64(config.ShardCount),
- true,
- )
-
- form.AddNumberField(
- "parity_count",
- "Parity Shards",
- "Number of parity shards for erasure coding (recommended: 4)",
- float64(config.ParityCount),
- true,
- )
-
- // Generate organized form sections using Bootstrap components
- html := `
-<div class="row">
- <div class="col-12">
- <div class="card mb-4">
- <div class="card-header">
- <h5 class="mb-0">
- <i class="fas fa-shield-alt me-2"></i>
- Erasure Coding Configuration
- </h5>
- </div>
- <div class="card-body">
-` + string(form.Build()) + `
- </div>
- </div>
- </div>
-</div>
-
-<div class="row">
- <div class="col-12">
- <div class="card mb-3">
- <div class="card-header">
- <h5 class="mb-0">
- <i class="fas fa-info-circle me-2"></i>
- Performance Impact
- </h5>
- </div>
- <div class="card-body">
- <div class="alert alert-info" role="alert">
- <h6 class="alert-heading">Important Notes:</h6>
- <p class="mb-2"><strong>Performance:</strong> Erasure coding is CPU and I/O intensive. Consider running during off-peak hours.</p>
- <p class="mb-0"><strong>Durability:</strong> With ` + fmt.Sprintf("%d+%d", config.ShardCount, config.ParityCount) + ` configuration, can tolerate up to ` + fmt.Sprintf("%d", config.ParityCount) + ` shard failures.</p>
- </div>
- </div>
- </div>
- </div>
-</div>`
-
- return template.HTML(html), nil
-}
-
-// ParseConfigForm parses form data into configuration
-func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}, error) {
- config := &ErasureCodingConfig{}
-
- // Parse enabled
- config.Enabled = len(formData["enabled"]) > 0
-
- // Parse volume age hours
- if values, ok := formData["volume_age_hours_seconds"]; ok && len(values) > 0 {
- hours, err := strconv.Atoi(values[0])
- if err != nil {
- return nil, fmt.Errorf("invalid volume age hours: %w", err)
- }
- config.VolumeAgeHoursSeconds = hours
- }
-
- // Parse scan interval
- if values, ok := formData["scan_interval_seconds"]; ok && len(values) > 0 {
- interval, err := strconv.Atoi(values[0])
- if err != nil {
- return nil, fmt.Errorf("invalid scan interval: %w", err)
- }
- config.ScanIntervalSeconds = interval
- }
-
- // Parse max concurrent
- if values, ok := formData["max_concurrent"]; ok && len(values) > 0 {
- maxConcurrent, err := strconv.Atoi(values[0])
- if err != nil {
- return nil, fmt.Errorf("invalid max concurrent: %w", err)
- }
- if maxConcurrent < 1 {
- return nil, fmt.Errorf("max concurrent must be at least 1")
- }
- config.MaxConcurrent = maxConcurrent
- }
-
- // Parse shard count
- if values, ok := formData["shard_count"]; ok && len(values) > 0 {
- shardCount, err := strconv.Atoi(values[0])
- if err != nil {
- return nil, fmt.Errorf("invalid shard count: %w", err)
- }
- if shardCount < 1 {
- return nil, fmt.Errorf("shard count must be at least 1")
- }
- config.ShardCount = shardCount
- }
-
- // Parse parity count
- if values, ok := formData["parity_count"]; ok && len(values) > 0 {
- parityCount, err := strconv.Atoi(values[0])
- if err != nil {
- return nil, fmt.Errorf("invalid parity count: %w", err)
- }
- if parityCount < 1 {
- return nil, fmt.Errorf("parity count must be at least 1")
- }
- config.ParityCount = parityCount
- }
-
- return config, nil
-}
-
-// GetCurrentConfig returns the current configuration
-func (ui *UIProvider) GetCurrentConfig() interface{} {
- return ui.getCurrentECConfig()
-}
-
-// ApplyConfig applies the new configuration
-func (ui *UIProvider) ApplyConfig(config interface{}) error {
- ecConfig, ok := config.(ErasureCodingConfig)
- if !ok {
- return fmt.Errorf("invalid config type, expected ErasureCodingConfig")
- }
-
- // Apply to detector
- if ui.detector != nil {
- ui.detector.SetEnabled(ecConfig.Enabled)
- ui.detector.SetVolumeAgeHours(ecConfig.VolumeAgeHoursSeconds)
- ui.detector.SetScanInterval(secondsToDuration(ecConfig.ScanIntervalSeconds))
- }
-
- // Apply to scheduler
- if ui.scheduler != nil {
- ui.scheduler.SetEnabled(ecConfig.Enabled)
- ui.scheduler.SetMaxConcurrent(ecConfig.MaxConcurrent)
- }
-
- glog.V(1).Infof("Applied erasure coding configuration: enabled=%v, age_threshold=%v, max_concurrent=%d, shards=%d+%d",
- ecConfig.Enabled, ecConfig.VolumeAgeHoursSeconds, ecConfig.MaxConcurrent, ecConfig.ShardCount, ecConfig.ParityCount)
-
- return nil
-}
-
-// getCurrentECConfig gets the current configuration from detector and scheduler
-func (ui *UIProvider) getCurrentECConfig() ErasureCodingConfig {
- config := ErasureCodingConfig{
- // Default values (fallback if detectors/schedulers are nil)
- Enabled: true,
- VolumeAgeHoursSeconds: 24 * 3600, // 24 hours in seconds
- ScanIntervalSeconds: 2 * 3600, // 2 hours in seconds
- MaxConcurrent: 1,
- ShardCount: 10,
- ParityCount: 4,
- }
-
- // Get current values from detector
- if ui.detector != nil {
- config.Enabled = ui.detector.IsEnabled()
- config.VolumeAgeHoursSeconds = ui.detector.GetVolumeAgeHours()
- config.ScanIntervalSeconds = durationToSeconds(ui.detector.ScanInterval())
- }
-
- // Get current values from scheduler
- if ui.scheduler != nil {
- config.MaxConcurrent = ui.scheduler.GetMaxConcurrent()
- }
-
- return config
-}
-
-// RegisterUI registers the erasure coding UI provider with the UI registry
-func RegisterUI(uiRegistry *types.UIRegistry, detector *EcDetector, scheduler *Scheduler) {
- uiProvider := NewUIProvider(detector, scheduler)
- uiRegistry.RegisterUI(uiProvider)
-
- glog.V(1).Infof("✅ Registered erasure coding task UI provider")
-}
diff --git a/weed/worker/tasks/schema_provider.go b/weed/worker/tasks/schema_provider.go
new file mode 100644
index 000000000..4d69556b1
--- /dev/null
+++ b/weed/worker/tasks/schema_provider.go
@@ -0,0 +1,51 @@
+package tasks
+
+import (
+ "sync"
+
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+)
+
+// TaskConfigSchema defines the schema for task configuration
+type TaskConfigSchema struct {
+ config.Schema // Embed common schema functionality
+ TaskName string `json:"task_name"`
+ DisplayName string `json:"display_name"`
+ Description string `json:"description"`
+ Icon string `json:"icon"`
+}
+
+// TaskConfigSchemaProvider is an interface for providing task configuration schemas
+type TaskConfigSchemaProvider interface {
+ GetConfigSchema() *TaskConfigSchema
+}
+
+// schemaRegistry maintains a registry of schema providers by task type
+type schemaRegistry struct {
+ providers map[string]TaskConfigSchemaProvider
+ mutex sync.RWMutex
+}
+
+var globalSchemaRegistry = &schemaRegistry{
+ providers: make(map[string]TaskConfigSchemaProvider),
+}
+
+// RegisterTaskConfigSchema registers a schema provider for a task type
+func RegisterTaskConfigSchema(taskType string, provider TaskConfigSchemaProvider) {
+ globalSchemaRegistry.mutex.Lock()
+ defer globalSchemaRegistry.mutex.Unlock()
+ globalSchemaRegistry.providers[taskType] = provider
+}
+
+// GetTaskConfigSchema returns the schema for the specified task type
+func GetTaskConfigSchema(taskType string) *TaskConfigSchema {
+ globalSchemaRegistry.mutex.RLock()
+ provider, exists := globalSchemaRegistry.providers[taskType]
+ globalSchemaRegistry.mutex.RUnlock()
+
+ if !exists {
+ return nil
+ }
+
+ return provider.GetConfigSchema()
+}
diff --git a/weed/worker/tasks/task.go b/weed/worker/tasks/task.go
index 482233f60..15369c137 100644
--- a/weed/worker/tasks/task.go
+++ b/weed/worker/tasks/task.go
@@ -2,29 +2,69 @@ package tasks
import (
"context"
+ "fmt"
"sync"
"time"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// BaseTask provides common functionality for all tasks
type BaseTask struct {
taskType types.TaskType
+ taskID string
progress float64
cancelled bool
mutex sync.RWMutex
startTime time.Time
estimatedDuration time.Duration
+ logger TaskLogger
+ loggerConfig TaskLoggerConfig
+ progressCallback func(float64) // Callback function for progress updates
}
// NewBaseTask creates a new base task
func NewBaseTask(taskType types.TaskType) *BaseTask {
return &BaseTask{
- taskType: taskType,
- progress: 0.0,
- cancelled: false,
+ taskType: taskType,
+ progress: 0.0,
+ cancelled: false,
+ loggerConfig: DefaultTaskLoggerConfig(),
+ }
+}
+
+// NewBaseTaskWithLogger creates a new base task with custom logger configuration
+func NewBaseTaskWithLogger(taskType types.TaskType, loggerConfig TaskLoggerConfig) *BaseTask {
+ return &BaseTask{
+ taskType: taskType,
+ progress: 0.0,
+ cancelled: false,
+ loggerConfig: loggerConfig,
+ }
+}
+
+// InitializeLogger initializes the task logger with task details
+func (t *BaseTask) InitializeLogger(taskID string, workerID string, params types.TaskParams) error {
+ return t.InitializeTaskLogger(taskID, workerID, params)
+}
+
+// InitializeTaskLogger initializes the task logger with task details (LoggerProvider interface)
+func (t *BaseTask) InitializeTaskLogger(taskID string, workerID string, params types.TaskParams) error {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ t.taskID = taskID
+
+ logger, err := NewTaskLogger(taskID, t.taskType, workerID, params, t.loggerConfig)
+ if err != nil {
+ return fmt.Errorf("failed to initialize task logger: %w", err)
}
+
+ t.logger = logger
+ t.logger.Info("BaseTask initialized for task %s (type: %s)", taskID, t.taskType)
+
+ return nil
}
// Type returns the task type
@@ -39,24 +79,47 @@ func (t *BaseTask) GetProgress() float64 {
return t.progress
}
-// SetProgress sets the current progress
+// SetProgress sets the current progress and logs it
func (t *BaseTask) SetProgress(progress float64) {
t.mutex.Lock()
- defer t.mutex.Unlock()
if progress < 0 {
progress = 0
}
if progress > 100 {
progress = 100
}
+ oldProgress := t.progress
+ callback := t.progressCallback
t.progress = progress
+ t.mutex.Unlock()
+
+ // Log progress change
+ if t.logger != nil && progress != oldProgress {
+ t.logger.LogProgress(progress, fmt.Sprintf("Progress updated from %.1f%% to %.1f%%", oldProgress, progress))
+ }
+
+ // Call progress callback if set
+ if callback != nil && progress != oldProgress {
+ callback(progress)
+ }
}
// Cancel cancels the task
func (t *BaseTask) Cancel() error {
t.mutex.Lock()
defer t.mutex.Unlock()
+
+ if t.cancelled {
+ return nil
+ }
+
t.cancelled = true
+
+ if t.logger != nil {
+ t.logger.LogStatus("cancelled", "Task cancelled by request")
+ t.logger.Warning("Task %s was cancelled", t.taskID)
+ }
+
return nil
}
@@ -72,6 +135,10 @@ func (t *BaseTask) SetStartTime(startTime time.Time) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.startTime = startTime
+
+ if t.logger != nil {
+ t.logger.LogStatus("running", fmt.Sprintf("Task started at %s", startTime.Format(time.RFC3339)))
+ }
}
// GetStartTime returns the task start time
@@ -86,6 +153,13 @@ func (t *BaseTask) SetEstimatedDuration(duration time.Duration) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.estimatedDuration = duration
+
+ if t.logger != nil {
+ t.logger.LogWithFields("INFO", "Estimated duration set", map[string]interface{}{
+ "estimated_duration": duration.String(),
+ "estimated_seconds": duration.Seconds(),
+ })
+ }
}
// GetEstimatedDuration returns the estimated duration
@@ -95,11 +169,115 @@ func (t *BaseTask) GetEstimatedDuration() time.Duration {
return t.estimatedDuration
}
-// ExecuteTask is a wrapper that handles common task execution logic
+// SetProgressCallback sets the progress callback function
+func (t *BaseTask) SetProgressCallback(callback func(float64)) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.progressCallback = callback
+}
+
+// SetLoggerConfig sets the logger configuration for this task
+func (t *BaseTask) SetLoggerConfig(config TaskLoggerConfig) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.loggerConfig = config
+}
+
+// GetLogger returns the task logger
+func (t *BaseTask) GetLogger() TaskLogger {
+ t.mutex.RLock()
+ defer t.mutex.RUnlock()
+ return t.logger
+}
+
+// GetTaskLogger returns the task logger (LoggerProvider interface)
+func (t *BaseTask) GetTaskLogger() TaskLogger {
+ t.mutex.RLock()
+ defer t.mutex.RUnlock()
+ return t.logger
+}
+
+// LogInfo logs an info message
+func (t *BaseTask) LogInfo(message string, args ...interface{}) {
+ if t.logger != nil {
+ t.logger.Info(message, args...)
+ }
+}
+
+// LogWarning logs a warning message
+func (t *BaseTask) LogWarning(message string, args ...interface{}) {
+ if t.logger != nil {
+ t.logger.Warning(message, args...)
+ }
+}
+
+// LogError logs an error message
+func (t *BaseTask) LogError(message string, args ...interface{}) {
+ if t.logger != nil {
+ t.logger.Error(message, args...)
+ }
+}
+
+// LogDebug logs a debug message
+func (t *BaseTask) LogDebug(message string, args ...interface{}) {
+ if t.logger != nil {
+ t.logger.Debug(message, args...)
+ }
+}
+
+// LogWithFields logs a message with structured fields
+func (t *BaseTask) LogWithFields(level string, message string, fields map[string]interface{}) {
+ if t.logger != nil {
+ t.logger.LogWithFields(level, message, fields)
+ }
+}
+
+// FinishTask finalizes the task and closes the logger
+func (t *BaseTask) FinishTask(success bool, errorMsg string) error {
+ if t.logger != nil {
+ if success {
+ t.logger.LogStatus("completed", "Task completed successfully")
+ t.logger.Info("Task %s finished successfully", t.taskID)
+ } else {
+ t.logger.LogStatus("failed", fmt.Sprintf("Task failed: %s", errorMsg))
+ t.logger.Error("Task %s failed: %s", t.taskID, errorMsg)
+ }
+
+ // Close logger
+ if err := t.logger.Close(); err != nil {
+ glog.Errorf("Failed to close task logger: %v", err)
+ }
+ }
+
+ return nil
+}
+
+// ExecuteTask is a wrapper that handles common task execution logic with logging
func (t *BaseTask) ExecuteTask(ctx context.Context, params types.TaskParams, executor func(context.Context, types.TaskParams) error) error {
+ // Initialize logger if not already done
+ if t.logger == nil {
+ // Generate a temporary task ID if none provided
+ if t.taskID == "" {
+ t.taskID = fmt.Sprintf("task_%d", time.Now().UnixNano())
+ }
+
+ workerID := "unknown"
+ if err := t.InitializeLogger(t.taskID, workerID, params); err != nil {
+ glog.Warningf("Failed to initialize task logger: %v", err)
+ }
+ }
+
t.SetStartTime(time.Now())
t.SetProgress(0)
+ if t.logger != nil {
+ t.logger.LogWithFields("INFO", "Task execution started", map[string]interface{}{
+ "volume_id": params.VolumeID,
+ "server": params.Server,
+ "collection": params.Collection,
+ })
+ }
+
// Create a context that can be cancelled
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@@ -114,21 +292,29 @@ func (t *BaseTask) ExecuteTask(ctx context.Context, params types.TaskParams, exe
// Check cancellation every second
}
}
+ t.LogWarning("Task cancellation detected, cancelling context")
cancel()
}()
// Execute the actual task
+ t.LogInfo("Starting task executor")
err := executor(ctx, params)
if err != nil {
+ t.LogError("Task executor failed: %v", err)
+ t.FinishTask(false, err.Error())
return err
}
if t.IsCancelled() {
+ t.LogWarning("Task was cancelled during execution")
+ t.FinishTask(false, "cancelled")
return context.Canceled
}
t.SetProgress(100)
+ t.LogInfo("Task executor completed successfully")
+ t.FinishTask(true, "")
return nil
}
diff --git a/weed/worker/tasks/task_log_handler.go b/weed/worker/tasks/task_log_handler.go
new file mode 100644
index 000000000..be5f00f12
--- /dev/null
+++ b/weed/worker/tasks/task_log_handler.go
@@ -0,0 +1,230 @@
+package tasks
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+)
+
+// TaskLogHandler handles task log requests from admin server
+type TaskLogHandler struct {
+ baseLogDir string
+}
+
+// NewTaskLogHandler creates a new task log handler
+func NewTaskLogHandler(baseLogDir string) *TaskLogHandler {
+ if baseLogDir == "" {
+ baseLogDir = "/tmp/seaweedfs/task_logs"
+ }
+ return &TaskLogHandler{
+ baseLogDir: baseLogDir,
+ }
+}
+
+// HandleLogRequest processes a task log request and returns the response
+func (h *TaskLogHandler) HandleLogRequest(request *worker_pb.TaskLogRequest) *worker_pb.TaskLogResponse {
+ response := &worker_pb.TaskLogResponse{
+ TaskId: request.TaskId,
+ WorkerId: request.WorkerId,
+ Success: false,
+ }
+
+ // Find the task log directory
+ logDir, err := h.findTaskLogDirectory(request.TaskId)
+ if err != nil {
+ response.ErrorMessage = fmt.Sprintf("Task log directory not found: %v", err)
+ glog.Warningf("Task log request failed for %s: %v", request.TaskId, err)
+ return response
+ }
+
+ // Read metadata if requested
+ if request.IncludeMetadata {
+ metadata, err := h.readTaskMetadata(logDir)
+ if err != nil {
+ response.ErrorMessage = fmt.Sprintf("Failed to read task metadata: %v", err)
+ glog.Warningf("Failed to read metadata for task %s: %v", request.TaskId, err)
+ return response
+ }
+ response.Metadata = metadata
+ }
+
+ // Read log entries
+ logEntries, err := h.readTaskLogEntries(logDir, request)
+ if err != nil {
+ response.ErrorMessage = fmt.Sprintf("Failed to read task logs: %v", err)
+ glog.Warningf("Failed to read logs for task %s: %v", request.TaskId, err)
+ return response
+ }
+
+ response.LogEntries = logEntries
+ response.Success = true
+
+ glog.V(1).Infof("Successfully retrieved %d log entries for task %s", len(logEntries), request.TaskId)
+ return response
+}
+
+// findTaskLogDirectory searches for the task log directory by task ID
+func (h *TaskLogHandler) findTaskLogDirectory(taskID string) (string, error) {
+ entries, err := os.ReadDir(h.baseLogDir)
+ if err != nil {
+ return "", fmt.Errorf("failed to read base log directory: %w", err)
+ }
+
+ // Look for directories that start with the task ID
+ for _, entry := range entries {
+ if entry.IsDir() && strings.HasPrefix(entry.Name(), taskID+"_") {
+ return filepath.Join(h.baseLogDir, entry.Name()), nil
+ }
+ }
+
+ return "", fmt.Errorf("task log directory not found for task ID: %s", taskID)
+}
+
+// readTaskMetadata reads task metadata from the log directory
+func (h *TaskLogHandler) readTaskMetadata(logDir string) (*worker_pb.TaskLogMetadata, error) {
+ metadata, err := GetTaskLogMetadata(logDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert to protobuf metadata
+ pbMetadata := &worker_pb.TaskLogMetadata{
+ TaskId: metadata.TaskID,
+ TaskType: metadata.TaskType,
+ WorkerId: metadata.WorkerID,
+ StartTime: metadata.StartTime.Unix(),
+ Status: metadata.Status,
+ Progress: float32(metadata.Progress),
+ VolumeId: metadata.VolumeID,
+ Server: metadata.Server,
+ Collection: metadata.Collection,
+ LogFilePath: metadata.LogFilePath,
+ CreatedAt: metadata.CreatedAt.Unix(),
+ CustomData: make(map[string]string),
+ }
+
+ // Set end time and duration if available
+ if metadata.EndTime != nil {
+ pbMetadata.EndTime = metadata.EndTime.Unix()
+ }
+ if metadata.Duration != nil {
+ pbMetadata.DurationMs = metadata.Duration.Milliseconds()
+ }
+
+ // Convert custom data
+ for key, value := range metadata.CustomData {
+ if strValue, ok := value.(string); ok {
+ pbMetadata.CustomData[key] = strValue
+ } else {
+ pbMetadata.CustomData[key] = fmt.Sprintf("%v", value)
+ }
+ }
+
+ return pbMetadata, nil
+}
+
+// readTaskLogEntries reads and filters log entries based on the request
+func (h *TaskLogHandler) readTaskLogEntries(logDir string, request *worker_pb.TaskLogRequest) ([]*worker_pb.TaskLogEntry, error) {
+ entries, err := ReadTaskLogs(logDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // Apply filters
+ var filteredEntries []TaskLogEntry
+
+ for _, entry := range entries {
+ // Filter by log level
+ if request.LogLevel != "" && !strings.EqualFold(entry.Level, request.LogLevel) {
+ continue
+ }
+
+ // Filter by time range
+ if request.StartTime > 0 && entry.Timestamp.Unix() < request.StartTime {
+ continue
+ }
+ if request.EndTime > 0 && entry.Timestamp.Unix() > request.EndTime {
+ continue
+ }
+
+ filteredEntries = append(filteredEntries, entry)
+ }
+
+ // Limit entries if requested
+ if request.MaxEntries > 0 && len(filteredEntries) > int(request.MaxEntries) {
+ // Take the most recent entries
+ start := len(filteredEntries) - int(request.MaxEntries)
+ filteredEntries = filteredEntries[start:]
+ }
+
+ // Convert to protobuf entries
+ var pbEntries []*worker_pb.TaskLogEntry
+ for _, entry := range filteredEntries {
+ pbEntry := &worker_pb.TaskLogEntry{
+ Timestamp: entry.Timestamp.Unix(),
+ Level: entry.Level,
+ Message: entry.Message,
+ Fields: make(map[string]string),
+ }
+
+ // Set progress if available
+ if entry.Progress != nil {
+ pbEntry.Progress = float32(*entry.Progress)
+ }
+
+ // Set status if available
+ if entry.Status != nil {
+ pbEntry.Status = *entry.Status
+ }
+
+ // Convert fields
+ for key, value := range entry.Fields {
+ if strValue, ok := value.(string); ok {
+ pbEntry.Fields[key] = strValue
+ } else {
+ pbEntry.Fields[key] = fmt.Sprintf("%v", value)
+ }
+ }
+
+ pbEntries = append(pbEntries, pbEntry)
+ }
+
+ return pbEntries, nil
+}
+
+// ListAvailableTaskLogs returns a list of available task log directories
+func (h *TaskLogHandler) ListAvailableTaskLogs() ([]string, error) {
+ entries, err := os.ReadDir(h.baseLogDir)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read base log directory: %w", err)
+ }
+
+ var taskDirs []string
+ for _, entry := range entries {
+ if entry.IsDir() {
+ taskDirs = append(taskDirs, entry.Name())
+ }
+ }
+
+ return taskDirs, nil
+}
+
+// CleanupOldLogs removes old task logs beyond the specified limit
+func (h *TaskLogHandler) CleanupOldLogs(maxTasks int) error {
+ config := TaskLoggerConfig{
+ BaseLogDir: h.baseLogDir,
+ MaxTasks: maxTasks,
+ }
+
+ // Create a temporary logger to trigger cleanup
+ tempLogger := &FileTaskLogger{
+ config: config,
+ }
+
+ tempLogger.cleanupOldLogs()
+ return nil
+}
diff --git a/weed/worker/tasks/task_logger.go b/weed/worker/tasks/task_logger.go
new file mode 100644
index 000000000..e9c06c35c
--- /dev/null
+++ b/weed/worker/tasks/task_logger.go
@@ -0,0 +1,432 @@
+package tasks
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// TaskLogger provides file-based logging for individual tasks
+type TaskLogger interface {
+ // Log methods
+ Info(message string, args ...interface{})
+ Warning(message string, args ...interface{})
+ Error(message string, args ...interface{})
+ Debug(message string, args ...interface{})
+
+ // Progress and status logging
+ LogProgress(progress float64, message string)
+ LogStatus(status string, message string)
+
+ // Structured logging
+ LogWithFields(level string, message string, fields map[string]interface{})
+
+ // Lifecycle
+ Close() error
+ GetLogDir() string
+}
+
+// LoggerProvider interface for tasks that support logging
+type LoggerProvider interface {
+ InitializeTaskLogger(taskID string, workerID string, params types.TaskParams) error
+ GetTaskLogger() TaskLogger
+}
+
+// TaskLoggerConfig holds configuration for task logging
+type TaskLoggerConfig struct {
+ BaseLogDir string
+ MaxTasks int // Maximum number of task logs to keep
+ MaxLogSizeMB int // Maximum log file size in MB
+ EnableConsole bool // Also log to console
+}
+
+// FileTaskLogger implements TaskLogger using file-based logging
+type FileTaskLogger struct {
+ taskID string
+ taskType types.TaskType
+ workerID string
+ logDir string
+ logFile *os.File
+ mutex sync.Mutex
+ config TaskLoggerConfig
+ metadata *TaskLogMetadata
+ closed bool
+}
+
+// TaskLogMetadata contains metadata about the task execution
+type TaskLogMetadata struct {
+ TaskID string `json:"task_id"`
+ TaskType string `json:"task_type"`
+ WorkerID string `json:"worker_id"`
+ StartTime time.Time `json:"start_time"`
+ EndTime *time.Time `json:"end_time,omitempty"`
+ Duration *time.Duration `json:"duration,omitempty"`
+ Status string `json:"status"`
+ Progress float64 `json:"progress"`
+ VolumeID uint32 `json:"volume_id,omitempty"`
+ Server string `json:"server,omitempty"`
+ Collection string `json:"collection,omitempty"`
+ CustomData map[string]interface{} `json:"custom_data,omitempty"`
+ LogFilePath string `json:"log_file_path"`
+ CreatedAt time.Time `json:"created_at"`
+}
+
+// TaskLogEntry represents a single log entry
+type TaskLogEntry struct {
+ Timestamp time.Time `json:"timestamp"`
+ Level string `json:"level"`
+ Message string `json:"message"`
+ Fields map[string]interface{} `json:"fields,omitempty"`
+ Progress *float64 `json:"progress,omitempty"`
+ Status *string `json:"status,omitempty"`
+}
+
+// DefaultTaskLoggerConfig returns default configuration
+func DefaultTaskLoggerConfig() TaskLoggerConfig {
+ return TaskLoggerConfig{
+ BaseLogDir: "/data/task_logs", // Use persistent data directory
+ MaxTasks: 100, // Keep last 100 task logs
+ MaxLogSizeMB: 10,
+ EnableConsole: true,
+ }
+}
+
+// NewTaskLogger creates a new file-based task logger
+func NewTaskLogger(taskID string, taskType types.TaskType, workerID string, params types.TaskParams, config TaskLoggerConfig) (TaskLogger, error) {
+ // Create unique directory name with timestamp
+ timestamp := time.Now().Format("20060102_150405")
+ dirName := fmt.Sprintf("%s_%s_%s_%s", taskID, taskType, workerID, timestamp)
+ logDir := filepath.Join(config.BaseLogDir, dirName)
+
+ // Create log directory
+ if err := os.MkdirAll(logDir, 0755); err != nil {
+ return nil, fmt.Errorf("failed to create log directory %s: %w", logDir, err)
+ }
+
+ // Create log file
+ logFilePath := filepath.Join(logDir, "task.log")
+ logFile, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create log file %s: %w", logFilePath, err)
+ }
+
+ // Create metadata
+ metadata := &TaskLogMetadata{
+ TaskID: taskID,
+ TaskType: string(taskType),
+ WorkerID: workerID,
+ StartTime: time.Now(),
+ Status: "started",
+ Progress: 0.0,
+ VolumeID: params.VolumeID,
+ Server: params.Server,
+ Collection: params.Collection,
+ CustomData: make(map[string]interface{}),
+ LogFilePath: logFilePath,
+ CreatedAt: time.Now(),
+ }
+
+ logger := &FileTaskLogger{
+ taskID: taskID,
+ taskType: taskType,
+ workerID: workerID,
+ logDir: logDir,
+ logFile: logFile,
+ config: config,
+ metadata: metadata,
+ closed: false,
+ }
+
+ // Write initial log entry
+ logger.Info("Task logger initialized for %s (type: %s, worker: %s)", taskID, taskType, workerID)
+ logger.LogWithFields("INFO", "Task parameters", map[string]interface{}{
+ "volume_id": params.VolumeID,
+ "server": params.Server,
+ "collection": params.Collection,
+ })
+
+ // Save initial metadata
+ if err := logger.saveMetadata(); err != nil {
+ glog.Warningf("Failed to save initial task metadata: %v", err)
+ }
+
+ // Clean up old task logs
+ go logger.cleanupOldLogs()
+
+ return logger, nil
+}
+
+// Info logs an info message
+func (l *FileTaskLogger) Info(message string, args ...interface{}) {
+ l.log("INFO", message, args...)
+}
+
+// Warning logs a warning message
+func (l *FileTaskLogger) Warning(message string, args ...interface{}) {
+ l.log("WARNING", message, args...)
+}
+
+// Error logs an error message
+func (l *FileTaskLogger) Error(message string, args ...interface{}) {
+ l.log("ERROR", message, args...)
+}
+
+// Debug logs a debug message
+func (l *FileTaskLogger) Debug(message string, args ...interface{}) {
+ l.log("DEBUG", message, args...)
+}
+
+// LogProgress logs task progress
+func (l *FileTaskLogger) LogProgress(progress float64, message string) {
+ l.mutex.Lock()
+ l.metadata.Progress = progress
+ l.mutex.Unlock()
+
+ entry := TaskLogEntry{
+ Timestamp: time.Now(),
+ Level: "INFO",
+ Message: message,
+ Progress: &progress,
+ }
+
+ l.writeLogEntry(entry)
+ l.saveMetadata() // Update metadata with new progress
+}
+
+// LogStatus logs task status change
+func (l *FileTaskLogger) LogStatus(status string, message string) {
+ l.mutex.Lock()
+ l.metadata.Status = status
+ l.mutex.Unlock()
+
+ entry := TaskLogEntry{
+ Timestamp: time.Now(),
+ Level: "INFO",
+ Message: message,
+ Status: &status,
+ }
+
+ l.writeLogEntry(entry)
+ l.saveMetadata() // Update metadata with new status
+}
+
+// LogWithFields logs a message with structured fields
+func (l *FileTaskLogger) LogWithFields(level string, message string, fields map[string]interface{}) {
+ entry := TaskLogEntry{
+ Timestamp: time.Now(),
+ Level: level,
+ Message: message,
+ Fields: fields,
+ }
+
+ l.writeLogEntry(entry)
+}
+
+// Close closes the logger and finalizes metadata
+func (l *FileTaskLogger) Close() error {
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ if l.closed {
+ return nil
+ }
+
+ // Finalize metadata
+ endTime := time.Now()
+ duration := endTime.Sub(l.metadata.StartTime)
+ l.metadata.EndTime = &endTime
+ l.metadata.Duration = &duration
+
+ if l.metadata.Status == "started" {
+ l.metadata.Status = "completed"
+ }
+
+ // Save final metadata
+ l.saveMetadata()
+
+ // Close log file
+ if l.logFile != nil {
+ if err := l.logFile.Close(); err != nil {
+ return fmt.Errorf("failed to close log file: %w", err)
+ }
+ }
+
+ l.closed = true
+ l.Info("Task logger closed for %s", l.taskID)
+
+ return nil
+}
+
+// GetLogDir returns the log directory path
+func (l *FileTaskLogger) GetLogDir() string {
+ return l.logDir
+}
+
+// log is the internal logging method
+func (l *FileTaskLogger) log(level string, message string, args ...interface{}) {
+ formattedMessage := fmt.Sprintf(message, args...)
+
+ entry := TaskLogEntry{
+ Timestamp: time.Now(),
+ Level: level,
+ Message: formattedMessage,
+ }
+
+ l.writeLogEntry(entry)
+}
+
+// writeLogEntry writes a log entry to the file
+func (l *FileTaskLogger) writeLogEntry(entry TaskLogEntry) {
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ if l.closed || l.logFile == nil {
+ return
+ }
+
+ // Format as JSON line
+ jsonData, err := json.Marshal(entry)
+ if err != nil {
+ glog.Errorf("Failed to marshal log entry: %v", err)
+ return
+ }
+
+ // Write to file
+ if _, err := l.logFile.WriteString(string(jsonData) + "\n"); err != nil {
+ glog.Errorf("Failed to write log entry: %v", err)
+ return
+ }
+
+ // Flush to disk
+ if err := l.logFile.Sync(); err != nil {
+ glog.Errorf("Failed to sync log file: %v", err)
+ }
+
+ // Also log to console and stderr if enabled
+ if l.config.EnableConsole {
+ // Log to glog with proper call depth to show actual source location
+ // We need depth 3 to skip: writeLogEntry -> log -> Info/Warning/Error calls to reach the original caller
+ formattedMsg := fmt.Sprintf("[TASK-%s] %s: %s", l.taskID, entry.Level, entry.Message)
+ switch entry.Level {
+ case "ERROR":
+ glog.ErrorDepth(3, formattedMsg)
+ case "WARNING":
+ glog.WarningDepth(3, formattedMsg)
+ default: // INFO, DEBUG, etc.
+ glog.InfoDepth(3, formattedMsg)
+ }
+ // Also log to stderr for immediate visibility
+ fmt.Fprintf(os.Stderr, "[TASK-%s] %s: %s\n", l.taskID, entry.Level, entry.Message)
+ }
+}
+
+// saveMetadata saves task metadata to file
+func (l *FileTaskLogger) saveMetadata() error {
+ metadataPath := filepath.Join(l.logDir, "metadata.json")
+
+ data, err := json.MarshalIndent(l.metadata, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal metadata: %w", err)
+ }
+
+ return os.WriteFile(metadataPath, data, 0644)
+}
+
+// cleanupOldLogs removes old task log directories to maintain the limit
+func (l *FileTaskLogger) cleanupOldLogs() {
+ baseDir := l.config.BaseLogDir
+
+ entries, err := os.ReadDir(baseDir)
+ if err != nil {
+ glog.Warningf("Failed to read log directory %s: %v", baseDir, err)
+ return
+ }
+
+ // Filter for directories only
+ var dirs []os.DirEntry
+ for _, entry := range entries {
+ if entry.IsDir() {
+ dirs = append(dirs, entry)
+ }
+ }
+
+ // If we're under the limit, nothing to clean
+ if len(dirs) <= l.config.MaxTasks {
+ return
+ }
+
+ // Sort by modification time (oldest first)
+ sort.Slice(dirs, func(i, j int) bool {
+ infoI, errI := dirs[i].Info()
+ infoJ, errJ := dirs[j].Info()
+ if errI != nil || errJ != nil {
+ return false
+ }
+ return infoI.ModTime().Before(infoJ.ModTime())
+ })
+
+ // Remove oldest directories
+ numToRemove := len(dirs) - l.config.MaxTasks
+ for i := 0; i < numToRemove; i++ {
+ dirPath := filepath.Join(baseDir, dirs[i].Name())
+ if err := os.RemoveAll(dirPath); err != nil {
+ glog.Warningf("Failed to remove old log directory %s: %v", dirPath, err)
+ } else {
+ glog.V(1).Infof("Cleaned up old task log directory: %s", dirPath)
+ }
+ }
+
+ glog.V(1).Infof("Task log cleanup completed: removed %d old directories", numToRemove)
+}
+
+// GetTaskLogMetadata reads metadata from a task log directory
+func GetTaskLogMetadata(logDir string) (*TaskLogMetadata, error) {
+ metadataPath := filepath.Join(logDir, "metadata.json")
+
+ data, err := os.ReadFile(metadataPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read metadata file: %w", err)
+ }
+
+ var metadata TaskLogMetadata
+ if err := json.Unmarshal(data, &metadata); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal metadata: %w", err)
+ }
+
+ return &metadata, nil
+}
+
+// ReadTaskLogs reads all log entries from a task log file
+func ReadTaskLogs(logDir string) ([]TaskLogEntry, error) {
+ logPath := filepath.Join(logDir, "task.log")
+
+ file, err := os.Open(logPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open log file: %w", err)
+ }
+ defer file.Close()
+
+ var entries []TaskLogEntry
+ decoder := json.NewDecoder(file)
+
+ for {
+ var entry TaskLogEntry
+ if err := decoder.Decode(&entry); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("failed to decode log entry: %w", err)
+ }
+ entries = append(entries, entry)
+ }
+
+ return entries, nil
+}
diff --git a/weed/worker/tasks/ui_base.go b/weed/worker/tasks/ui_base.go
new file mode 100644
index 000000000..ac22c20c4
--- /dev/null
+++ b/weed/worker/tasks/ui_base.go
@@ -0,0 +1,184 @@
+package tasks
+
+import (
+ "reflect"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// BaseUIProvider provides common UIProvider functionality for all tasks
+type BaseUIProvider struct {
+ taskType types.TaskType
+ displayName string
+ description string
+ icon string
+ schemaFunc func() *TaskConfigSchema
+ configFunc func() types.TaskConfig
+ applyTaskPolicyFunc func(policy *worker_pb.TaskPolicy) error
+ applyTaskConfigFunc func(config types.TaskConfig) error
+}
+
+// NewBaseUIProvider creates a new base UI provider
+func NewBaseUIProvider(
+ taskType types.TaskType,
+ displayName string,
+ description string,
+ icon string,
+ schemaFunc func() *TaskConfigSchema,
+ configFunc func() types.TaskConfig,
+ applyTaskPolicyFunc func(policy *worker_pb.TaskPolicy) error,
+ applyTaskConfigFunc func(config types.TaskConfig) error,
+) *BaseUIProvider {
+ return &BaseUIProvider{
+ taskType: taskType,
+ displayName: displayName,
+ description: description,
+ icon: icon,
+ schemaFunc: schemaFunc,
+ configFunc: configFunc,
+ applyTaskPolicyFunc: applyTaskPolicyFunc,
+ applyTaskConfigFunc: applyTaskConfigFunc,
+ }
+}
+
+// GetTaskType returns the task type
+func (ui *BaseUIProvider) GetTaskType() types.TaskType {
+ return ui.taskType
+}
+
+// GetDisplayName returns the human-readable name
+func (ui *BaseUIProvider) GetDisplayName() string {
+ return ui.displayName
+}
+
+// GetDescription returns a description of what this task does
+func (ui *BaseUIProvider) GetDescription() string {
+ return ui.description
+}
+
+// GetIcon returns the icon CSS class for this task type
+func (ui *BaseUIProvider) GetIcon() string {
+ return ui.icon
+}
+
+// GetCurrentConfig returns the current configuration as TaskConfig
+func (ui *BaseUIProvider) GetCurrentConfig() types.TaskConfig {
+ return ui.configFunc()
+}
+
+// ApplyTaskPolicy applies protobuf TaskPolicy configuration
+func (ui *BaseUIProvider) ApplyTaskPolicy(policy *worker_pb.TaskPolicy) error {
+ return ui.applyTaskPolicyFunc(policy)
+}
+
+// ApplyTaskConfig applies TaskConfig interface configuration
+func (ui *BaseUIProvider) ApplyTaskConfig(config types.TaskConfig) error {
+ return ui.applyTaskConfigFunc(config)
+}
+
+// CommonConfigGetter provides a common pattern for getting current configuration
+type CommonConfigGetter[T any] struct {
+ defaultConfig T
+ detectorFunc func() T
+ schedulerFunc func() T
+}
+
+// NewCommonConfigGetter creates a new common config getter
+func NewCommonConfigGetter[T any](
+ defaultConfig T,
+ detectorFunc func() T,
+ schedulerFunc func() T,
+) *CommonConfigGetter[T] {
+ return &CommonConfigGetter[T]{
+ defaultConfig: defaultConfig,
+ detectorFunc: detectorFunc,
+ schedulerFunc: schedulerFunc,
+ }
+}
+
+// GetConfig returns the merged configuration
+func (cg *CommonConfigGetter[T]) GetConfig() T {
+ config := cg.defaultConfig
+
+ // Apply detector values if available
+ if cg.detectorFunc != nil {
+ detectorConfig := cg.detectorFunc()
+ mergeConfigs(&config, detectorConfig)
+ }
+
+ // Apply scheduler values if available
+ if cg.schedulerFunc != nil {
+ schedulerConfig := cg.schedulerFunc()
+ mergeConfigs(&config, schedulerConfig)
+ }
+
+ return config
+}
+
+// mergeConfigs merges non-zero values from source into dest
+func mergeConfigs[T any](dest *T, source T) {
+ destValue := reflect.ValueOf(dest).Elem()
+ sourceValue := reflect.ValueOf(source)
+
+ if destValue.Kind() != reflect.Struct || sourceValue.Kind() != reflect.Struct {
+ return
+ }
+
+ for i := 0; i < destValue.NumField(); i++ {
+ destField := destValue.Field(i)
+ sourceField := sourceValue.Field(i)
+
+ if !destField.CanSet() {
+ continue
+ }
+
+ // Only copy non-zero values
+ if !sourceField.IsZero() {
+ if destField.Type() == sourceField.Type() {
+ destField.Set(sourceField)
+ }
+ }
+ }
+}
+
+// RegisterUIFunc provides a common registration function signature
+type RegisterUIFunc[D, S any] func(uiRegistry *types.UIRegistry, detector D, scheduler S)
+
+// CommonRegisterUI provides a common registration implementation
+func CommonRegisterUI[D, S any](
+ taskType types.TaskType,
+ displayName string,
+ uiRegistry *types.UIRegistry,
+ detector D,
+ scheduler S,
+ schemaFunc func() *TaskConfigSchema,
+ configFunc func() types.TaskConfig,
+ applyTaskPolicyFunc func(policy *worker_pb.TaskPolicy) error,
+ applyTaskConfigFunc func(config types.TaskConfig) error,
+) {
+ // Get metadata from schema
+ schema := schemaFunc()
+ description := "Task configuration"
+ icon := "fas fa-cog"
+
+ if schema != nil {
+ description = schema.Description
+ icon = schema.Icon
+ }
+
+ uiProvider := NewBaseUIProvider(
+ taskType,
+ displayName,
+ description,
+ icon,
+ schemaFunc,
+ configFunc,
+ applyTaskPolicyFunc,
+ applyTaskConfigFunc,
+ )
+
+ uiRegistry.RegisterUI(uiProvider)
+ glog.V(1).Infof("✅ Registered %s task UI provider", taskType)
+}
diff --git a/weed/worker/tasks/vacuum/config.go b/weed/worker/tasks/vacuum/config.go
new file mode 100644
index 000000000..fe8c0e8c5
--- /dev/null
+++ b/weed/worker/tasks/vacuum/config.go
@@ -0,0 +1,190 @@
+package vacuum
+
+import (
+ "fmt"
+
+ "github.com/seaweedfs/seaweedfs/weed/admin/config"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
+)
+
+// Config extends BaseConfig with vacuum-specific settings
+type Config struct {
+ base.BaseConfig
+ GarbageThreshold float64 `json:"garbage_threshold"`
+ MinVolumeAgeSeconds int `json:"min_volume_age_seconds"`
+ MinIntervalSeconds int `json:"min_interval_seconds"`
+}
+
+// NewDefaultConfig creates a new default vacuum configuration
+func NewDefaultConfig() *Config {
+ return &Config{
+ BaseConfig: base.BaseConfig{
+ Enabled: true,
+ ScanIntervalSeconds: 2 * 60 * 60, // 2 hours
+ MaxConcurrent: 2,
+ },
+ GarbageThreshold: 0.3, // 30%
+ MinVolumeAgeSeconds: 24 * 60 * 60, // 24 hours
+ MinIntervalSeconds: 7 * 24 * 60 * 60, // 7 days
+ }
+}
+
+// ToTaskPolicy converts configuration to a TaskPolicy protobuf message
+func (c *Config) ToTaskPolicy() *worker_pb.TaskPolicy {
+ return &worker_pb.TaskPolicy{
+ Enabled: c.Enabled,
+ MaxConcurrent: int32(c.MaxConcurrent),
+ RepeatIntervalSeconds: int32(c.ScanIntervalSeconds),
+ CheckIntervalSeconds: int32(c.ScanIntervalSeconds),
+ TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{
+ VacuumConfig: &worker_pb.VacuumTaskConfig{
+ GarbageThreshold: float64(c.GarbageThreshold),
+ MinVolumeAgeHours: int32(c.MinVolumeAgeSeconds / 3600), // Convert seconds to hours
+ MinIntervalSeconds: int32(c.MinIntervalSeconds),
+ },
+ },
+ }
+}
+
+// FromTaskPolicy loads configuration from a TaskPolicy protobuf message
+func (c *Config) FromTaskPolicy(policy *worker_pb.TaskPolicy) error {
+ if policy == nil {
+ return fmt.Errorf("policy is nil")
+ }
+
+ // Set general TaskPolicy fields
+ c.Enabled = policy.Enabled
+ c.MaxConcurrent = int(policy.MaxConcurrent)
+ c.ScanIntervalSeconds = int(policy.RepeatIntervalSeconds) // Direct seconds-to-seconds mapping
+
+ // Set vacuum-specific fields from the task config
+ if vacuumConfig := policy.GetVacuumConfig(); vacuumConfig != nil {
+ c.GarbageThreshold = float64(vacuumConfig.GarbageThreshold)
+ c.MinVolumeAgeSeconds = int(vacuumConfig.MinVolumeAgeHours * 3600) // Convert hours to seconds
+ c.MinIntervalSeconds = int(vacuumConfig.MinIntervalSeconds)
+ }
+
+ return nil
+}
+
+// LoadConfigFromPersistence loads configuration from the persistence layer if available
+func LoadConfigFromPersistence(configPersistence interface{}) *Config {
+ config := NewDefaultConfig()
+
+ // Try to load from persistence if available
+ if persistence, ok := configPersistence.(interface {
+ LoadVacuumTaskPolicy() (*worker_pb.TaskPolicy, error)
+ }); ok {
+ if policy, err := persistence.LoadVacuumTaskPolicy(); err == nil && policy != nil {
+ if err := config.FromTaskPolicy(policy); err == nil {
+ glog.V(1).Infof("Loaded vacuum configuration from persistence")
+ return config
+ }
+ }
+ }
+
+ glog.V(1).Infof("Using default vacuum configuration")
+ return config
+}
+
+// GetConfigSpec returns the configuration schema for vacuum tasks
+func GetConfigSpec() base.ConfigSpec {
+ return base.ConfigSpec{
+ Fields: []*config.Field{
+ {
+ Name: "enabled",
+ JSONName: "enabled",
+ Type: config.FieldTypeBool,
+ DefaultValue: true,
+ Required: false,
+ DisplayName: "Enable Vacuum Tasks",
+ Description: "Whether vacuum tasks should be automatically created",
+ HelpText: "Toggle this to enable or disable automatic vacuum task generation",
+ InputType: "checkbox",
+ CSSClasses: "form-check-input",
+ },
+ {
+ Name: "scan_interval_seconds",
+ JSONName: "scan_interval_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 2 * 60 * 60,
+ MinValue: 10 * 60,
+ MaxValue: 24 * 60 * 60,
+ Required: true,
+ DisplayName: "Scan Interval",
+ Description: "How often to scan for volumes needing vacuum",
+ HelpText: "The system will check for volumes that need vacuuming at this interval",
+ Placeholder: "2",
+ Unit: config.UnitHours,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "max_concurrent",
+ JSONName: "max_concurrent",
+ Type: config.FieldTypeInt,
+ DefaultValue: 2,
+ MinValue: 1,
+ MaxValue: 10,
+ Required: true,
+ DisplayName: "Max Concurrent Tasks",
+ Description: "Maximum number of vacuum tasks that can run simultaneously",
+ HelpText: "Limits the number of vacuum operations running at the same time to control system load",
+ Placeholder: "2 (default)",
+ Unit: config.UnitCount,
+ InputType: "number",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "garbage_threshold",
+ JSONName: "garbage_threshold",
+ Type: config.FieldTypeFloat,
+ DefaultValue: 0.3,
+ MinValue: 0.0,
+ MaxValue: 1.0,
+ Required: true,
+ DisplayName: "Garbage Percentage Threshold",
+ Description: "Trigger vacuum when garbage ratio exceeds this percentage",
+ HelpText: "Volumes with more deleted content than this threshold will be vacuumed",
+ Placeholder: "0.30 (30%)",
+ Unit: config.UnitNone,
+ InputType: "number",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "min_volume_age_seconds",
+ JSONName: "min_volume_age_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 24 * 60 * 60,
+ MinValue: 1 * 60 * 60,
+ MaxValue: 7 * 24 * 60 * 60,
+ Required: true,
+ DisplayName: "Minimum Volume Age",
+ Description: "Only vacuum volumes older than this duration",
+ HelpText: "Prevents vacuuming of recently created volumes that may still be actively written to",
+ Placeholder: "24",
+ Unit: config.UnitHours,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ {
+ Name: "min_interval_seconds",
+ JSONName: "min_interval_seconds",
+ Type: config.FieldTypeInterval,
+ DefaultValue: 7 * 24 * 60 * 60,
+ MinValue: 1 * 24 * 60 * 60,
+ MaxValue: 30 * 24 * 60 * 60,
+ Required: true,
+ DisplayName: "Minimum Interval",
+ Description: "Minimum time between vacuum operations on the same volume",
+ HelpText: "Prevents excessive vacuuming of the same volume by enforcing a minimum wait time",
+ Placeholder: "7",
+ Unit: config.UnitDays,
+ InputType: "interval",
+ CSSClasses: "form-control",
+ },
+ },
+ }
+}
diff --git a/weed/worker/tasks/vacuum/detection.go b/weed/worker/tasks/vacuum/detection.go
new file mode 100644
index 000000000..7b5a1baf0
--- /dev/null
+++ b/weed/worker/tasks/vacuum/detection.go
@@ -0,0 +1,112 @@
+package vacuum
+
+import (
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
+ "github.com/seaweedfs/seaweedfs/weed/worker/types"
+)
+
+// Detection implements the detection logic for vacuum tasks
+func Detection(metrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo, config base.TaskConfig) ([]*types.TaskDetectionResult, error) {
+ if !config.IsEnabled() {
+ return nil, nil
+ }
+
+ vacuumConfig := config.(*Config)
+ var results []*types.TaskDetectionResult
+ minVolumeAge := time.Duration(vacuumConfig.MinVolumeAgeSeconds) * time.Second
+
+ debugCount := 0
+ skippedDueToGarbage := 0
+ skippedDueToAge := 0
+
+ for _, metric := range metrics {
+ // Check if volume needs vacuum
+ if metric.GarbageRatio >= vacuumConfig.GarbageThreshold && metric.Age >= minVolumeAge {
+ priority := types.TaskPriorityNormal
+ if metric.GarbageRatio > 0.6 {
+ priority = types.TaskPriorityHigh
+ }
+
+ result := &types.TaskDetectionResult{
+ TaskType: types.TaskTypeVacuum,
+ VolumeID: metric.VolumeID,
+ Server: metric.Server,
+ Collection: metric.Collection,
+ Priority: priority,
+ Reason: "Volume has excessive garbage requiring vacuum",
+ ScheduleAt: time.Now(),
+ }
+ results = append(results, result)
+ } else {
+ // Debug why volume was not selected
+ if debugCount < 5 { // Limit debug output to first 5 volumes
+ if metric.GarbageRatio < vacuumConfig.GarbageThreshold {
+ skippedDueToGarbage++
+ }
+ if metric.Age < minVolumeAge {
+ skippedDueToAge++
+ }
+ }
+ debugCount++
+ }
+ }
+
+ // Log debug summary if no tasks were created
+ if len(results) == 0 && len(metrics) > 0 {
+ totalVolumes := len(metrics)
+ glog.Infof("VACUUM: No tasks created for %d volumes. Threshold=%.2f%%, MinAge=%s. Skipped: %d (garbage<threshold), %d (age<minimum)",
+ totalVolumes, vacuumConfig.GarbageThreshold*100, minVolumeAge, skippedDueToGarbage, skippedDueToAge)
+
+ // Show details for first few volumes
+ for i, metric := range metrics {
+ if i >= 3 { // Limit to first 3 volumes
+ break
+ }
+ glog.Infof("VACUUM: Volume %d: garbage=%.2f%% (need ≥%.2f%%), age=%s (need ≥%s)",
+ metric.VolumeID, metric.GarbageRatio*100, vacuumConfig.GarbageThreshold*100,
+ metric.Age.Truncate(time.Minute), minVolumeAge.Truncate(time.Minute))
+ }
+ }
+
+ return results, nil
+}
+
+// Scheduling implements the scheduling logic for vacuum tasks
+func Scheduling(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker, config base.TaskConfig) bool {
+ vacuumConfig := config.(*Config)
+
+ // Count running vacuum tasks
+ runningVacuumCount := 0
+ for _, runningTask := range runningTasks {
+ if runningTask.Type == types.TaskTypeVacuum {
+ runningVacuumCount++
+ }
+ }
+
+ // Check concurrency limit
+ if runningVacuumCount >= vacuumConfig.MaxConcurrent {
+ return false
+ }
+
+ // Check for available workers with vacuum capability
+ for _, worker := range availableWorkers {
+ if worker.CurrentLoad < worker.MaxConcurrent {
+ for _, capability := range worker.Capabilities {
+ if capability == types.TaskTypeVacuum {
+ return true
+ }
+ }
+ }
+ }
+
+ return false
+}
+
+// CreateTask creates a new vacuum task instance
+func CreateTask(params types.TaskParams) (types.TaskInterface, error) {
+ // Create and return the vacuum task using existing Task type
+ return NewTask(params.Server, params.VolumeID), nil
+}
diff --git a/weed/worker/tasks/vacuum/ui.go b/weed/worker/tasks/vacuum/ui.go
deleted file mode 100644
index 6f67a801a..000000000
--- a/weed/worker/tasks/vacuum/ui.go
+++ /dev/null
@@ -1,314 +0,0 @@
-package vacuum
-
-import (
- "fmt"
- "html/template"
- "strconv"
- "time"
-
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/worker/types"
-)
-
-// UIProvider provides the UI for vacuum task configuration
-type UIProvider struct {
- detector *VacuumDetector
- scheduler *VacuumScheduler
-}
-
-// NewUIProvider creates a new vacuum UI provider
-func NewUIProvider(detector *VacuumDetector, scheduler *VacuumScheduler) *UIProvider {
- return &UIProvider{
- detector: detector,
- scheduler: scheduler,
- }
-}
-
-// GetTaskType returns the task type
-func (ui *UIProvider) GetTaskType() types.TaskType {
- return types.TaskTypeVacuum
-}
-
-// GetDisplayName returns the human-readable name
-func (ui *UIProvider) GetDisplayName() string {
- return "Volume Vacuum"
-}
-
-// GetDescription returns a description of what this task does
-func (ui *UIProvider) GetDescription() string {
- return "Reclaims disk space by removing deleted files from volumes"
-}
-
-// GetIcon returns the icon CSS class for this task type
-func (ui *UIProvider) GetIcon() string {
- return "fas fa-broom text-primary"
-}
-
-// VacuumConfig represents the vacuum configuration
-type VacuumConfig struct {
- Enabled bool `json:"enabled"`
- GarbageThreshold float64 `json:"garbage_threshold"`
- ScanIntervalSeconds int `json:"scan_interval_seconds"`
- MaxConcurrent int `json:"max_concurrent"`
- MinVolumeAgeSeconds int `json:"min_volume_age_seconds"`
- MinIntervalSeconds int `json:"min_interval_seconds"`
-}
-
-// Helper functions for duration conversion
-func secondsToDuration(seconds int) time.Duration {
- return time.Duration(seconds) * time.Second
-}
-
-func durationToSeconds(d time.Duration) int {
- return int(d.Seconds())
-}
-
-// formatDurationForUser formats seconds as a user-friendly duration string
-func formatDurationForUser(seconds int) string {
- d := secondsToDuration(seconds)
- if d < time.Minute {
- return fmt.Sprintf("%ds", seconds)
- }
- if d < time.Hour {
- return fmt.Sprintf("%.0fm", d.Minutes())
- }
- if d < 24*time.Hour {
- return fmt.Sprintf("%.1fh", d.Hours())
- }
- return fmt.Sprintf("%.1fd", d.Hours()/24)
-}
-
-// RenderConfigForm renders the configuration form HTML
-func (ui *UIProvider) RenderConfigForm(currentConfig interface{}) (template.HTML, error) {
- config := ui.getCurrentVacuumConfig()
-
- // Build form using the FormBuilder helper
- form := types.NewFormBuilder()
-
- // Detection Settings
- form.AddCheckboxField(
- "enabled",
- "Enable Vacuum Tasks",
- "Whether vacuum tasks should be automatically created",
- config.Enabled,
- )
-
- form.AddNumberField(
- "garbage_threshold",
- "Garbage Threshold (%)",
- "Trigger vacuum when garbage ratio exceeds this percentage (0.0-1.0)",
- config.GarbageThreshold,
- true,
- )
-
- form.AddDurationField(
- "scan_interval",
- "Scan Interval",
- "How often to scan for volumes needing vacuum",
- secondsToDuration(config.ScanIntervalSeconds),
- true,
- )
-
- form.AddDurationField(
- "min_volume_age",
- "Minimum Volume Age",
- "Only vacuum volumes older than this duration",
- secondsToDuration(config.MinVolumeAgeSeconds),
- true,
- )
-
- // Scheduling Settings
- form.AddNumberField(
- "max_concurrent",
- "Max Concurrent Tasks",
- "Maximum number of vacuum tasks that can run simultaneously",
- float64(config.MaxConcurrent),
- true,
- )
-
- form.AddDurationField(
- "min_interval",
- "Minimum Interval",
- "Minimum time between vacuum operations on the same volume",
- secondsToDuration(config.MinIntervalSeconds),
- true,
- )
-
- // Generate organized form sections using Bootstrap components
- html := `
-<div class="row">
- <div class="col-12">
- <div class="card mb-4">
- <div class="card-header">
- <h5 class="mb-0">
- <i class="fas fa-search me-2"></i>
- Detection Settings
- </h5>
- </div>
- <div class="card-body">
-` + string(form.Build()) + `
- </div>
- </div>
- </div>
-</div>
-
-<script>
-function resetForm() {
- if (confirm('Reset all vacuum settings to defaults?')) {
- // Reset to default values
- document.querySelector('input[name="enabled"]').checked = true;
- document.querySelector('input[name="garbage_threshold"]').value = '0.3';
- document.querySelector('input[name="scan_interval"]').value = '30m';
- document.querySelector('input[name="min_volume_age"]').value = '1h';
- document.querySelector('input[name="max_concurrent"]').value = '2';
- document.querySelector('input[name="min_interval"]').value = '6h';
- }
-}
-</script>
-`
-
- return template.HTML(html), nil
-}
-
-// ParseConfigForm parses form data into configuration
-func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}, error) {
- config := &VacuumConfig{}
-
- // Parse enabled checkbox
- config.Enabled = len(formData["enabled"]) > 0 && formData["enabled"][0] == "on"
-
- // Parse garbage threshold
- if thresholdStr := formData["garbage_threshold"]; len(thresholdStr) > 0 {
- if threshold, err := strconv.ParseFloat(thresholdStr[0], 64); err != nil {
- return nil, fmt.Errorf("invalid garbage threshold: %w", err)
- } else if threshold < 0 || threshold > 1 {
- return nil, fmt.Errorf("garbage threshold must be between 0.0 and 1.0")
- } else {
- config.GarbageThreshold = threshold
- }
- }
-
- // Parse scan interval
- if intervalStr := formData["scan_interval"]; len(intervalStr) > 0 {
- if interval, err := time.ParseDuration(intervalStr[0]); err != nil {
- return nil, fmt.Errorf("invalid scan interval: %w", err)
- } else {
- config.ScanIntervalSeconds = durationToSeconds(interval)
- }
- }
-
- // Parse min volume age
- if ageStr := formData["min_volume_age"]; len(ageStr) > 0 {
- if age, err := time.ParseDuration(ageStr[0]); err != nil {
- return nil, fmt.Errorf("invalid min volume age: %w", err)
- } else {
- config.MinVolumeAgeSeconds = durationToSeconds(age)
- }
- }
-
- // Parse max concurrent
- if concurrentStr := formData["max_concurrent"]; len(concurrentStr) > 0 {
- if concurrent, err := strconv.Atoi(concurrentStr[0]); err != nil {
- return nil, fmt.Errorf("invalid max concurrent: %w", err)
- } else if concurrent < 1 {
- return nil, fmt.Errorf("max concurrent must be at least 1")
- } else {
- config.MaxConcurrent = concurrent
- }
- }
-
- // Parse min interval
- if intervalStr := formData["min_interval"]; len(intervalStr) > 0 {
- if interval, err := time.ParseDuration(intervalStr[0]); err != nil {
- return nil, fmt.Errorf("invalid min interval: %w", err)
- } else {
- config.MinIntervalSeconds = durationToSeconds(interval)
- }
- }
-
- return config, nil
-}
-
-// GetCurrentConfig returns the current configuration
-func (ui *UIProvider) GetCurrentConfig() interface{} {
- return ui.getCurrentVacuumConfig()
-}
-
-// ApplyConfig applies the new configuration
-func (ui *UIProvider) ApplyConfig(config interface{}) error {
- vacuumConfig, ok := config.(*VacuumConfig)
- if !ok {
- return fmt.Errorf("invalid config type, expected *VacuumConfig")
- }
-
- // Apply to detector
- if ui.detector != nil {
- ui.detector.SetEnabled(vacuumConfig.Enabled)
- ui.detector.SetGarbageThreshold(vacuumConfig.GarbageThreshold)
- ui.detector.SetScanInterval(secondsToDuration(vacuumConfig.ScanIntervalSeconds))
- ui.detector.SetMinVolumeAge(secondsToDuration(vacuumConfig.MinVolumeAgeSeconds))
- }
-
- // Apply to scheduler
- if ui.scheduler != nil {
- ui.scheduler.SetEnabled(vacuumConfig.Enabled)
- ui.scheduler.SetMaxConcurrent(vacuumConfig.MaxConcurrent)
- ui.scheduler.SetMinInterval(secondsToDuration(vacuumConfig.MinIntervalSeconds))
- }
-
- glog.V(1).Infof("Applied vacuum configuration: enabled=%v, threshold=%.1f%%, scan_interval=%s, max_concurrent=%d",
- vacuumConfig.Enabled, vacuumConfig.GarbageThreshold*100, formatDurationForUser(vacuumConfig.ScanIntervalSeconds), vacuumConfig.MaxConcurrent)
-
- return nil
-}
-
-// getCurrentVacuumConfig gets the current configuration from detector and scheduler
-func (ui *UIProvider) getCurrentVacuumConfig() *VacuumConfig {
- config := &VacuumConfig{
- // Default values (fallback if detectors/schedulers are nil)
- Enabled: true,
- GarbageThreshold: 0.3,
- ScanIntervalSeconds: 30 * 60,
- MinVolumeAgeSeconds: 1 * 60 * 60,
- MaxConcurrent: 2,
- MinIntervalSeconds: 6 * 60 * 60,
- }
-
- // Get current values from detector
- if ui.detector != nil {
- config.Enabled = ui.detector.IsEnabled()
- config.GarbageThreshold = ui.detector.GetGarbageThreshold()
- config.ScanIntervalSeconds = durationToSeconds(ui.detector.ScanInterval())
- config.MinVolumeAgeSeconds = durationToSeconds(ui.detector.GetMinVolumeAge())
- }
-
- // Get current values from scheduler
- if ui.scheduler != nil {
- config.MaxConcurrent = ui.scheduler.GetMaxConcurrent()
- config.MinIntervalSeconds = durationToSeconds(ui.scheduler.GetMinInterval())
- }
-
- return config
-}
-
-// RegisterUI registers the vacuum UI provider with the UI registry
-func RegisterUI(uiRegistry *types.UIRegistry, detector *VacuumDetector, scheduler *VacuumScheduler) {
- uiProvider := NewUIProvider(detector, scheduler)
- uiRegistry.RegisterUI(uiProvider)
-
- glog.V(1).Infof("✅ Registered vacuum task UI provider")
-}
-
-// Example: How to get the UI provider for external use
-func GetUIProvider(uiRegistry *types.UIRegistry) *UIProvider {
- provider := uiRegistry.GetProvider(types.TaskTypeVacuum)
- if provider == nil {
- return nil
- }
-
- if vacuumProvider, ok := provider.(*UIProvider); ok {
- return vacuumProvider
- }
-
- return nil
-}
diff --git a/weed/worker/tasks/vacuum/vacuum.go b/weed/worker/tasks/vacuum/vacuum.go
index dbfe35cf8..9cd254958 100644
--- a/weed/worker/tasks/vacuum/vacuum.go
+++ b/weed/worker/tasks/vacuum/vacuum.go
@@ -1,60 +1,184 @@
package vacuum
import (
+ "context"
"fmt"
+ "io"
"time"
- "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb"
+ "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
)
// Task implements vacuum operation to reclaim disk space
type Task struct {
*tasks.BaseTask
- server string
- volumeID uint32
+ server string
+ volumeID uint32
+ garbageThreshold float64
}
// NewTask creates a new vacuum task instance
func NewTask(server string, volumeID uint32) *Task {
task := &Task{
- BaseTask: tasks.NewBaseTask(types.TaskTypeVacuum),
- server: server,
- volumeID: volumeID,
+ BaseTask: tasks.NewBaseTask(types.TaskTypeVacuum),
+ server: server,
+ volumeID: volumeID,
+ garbageThreshold: 0.3, // Default 30% threshold
}
return task
}
-// Execute executes the vacuum task
+// Execute performs the vacuum operation
func (t *Task) Execute(params types.TaskParams) error {
- glog.Infof("Starting vacuum task for volume %d on server %s", t.volumeID, t.server)
-
- // Simulate vacuum operation with progress updates
- steps := []struct {
- name string
- duration time.Duration
- progress float64
- }{
- {"Scanning volume", 1 * time.Second, 20},
- {"Identifying deleted files", 2 * time.Second, 50},
- {"Compacting data", 3 * time.Second, 80},
- {"Finalizing vacuum", 1 * time.Second, 100},
+ // Use BaseTask.ExecuteTask to handle logging initialization
+ return t.ExecuteTask(context.Background(), params, t.executeImpl)
+}
+
+// executeImpl is the actual vacuum implementation
+func (t *Task) executeImpl(ctx context.Context, params types.TaskParams) error {
+ t.LogInfo("Starting vacuum for volume %d on server %s", t.volumeID, t.server)
+
+ // Parse garbage threshold from typed parameters
+ if params.TypedParams != nil {
+ if vacuumParams := params.TypedParams.GetVacuumParams(); vacuumParams != nil {
+ t.garbageThreshold = vacuumParams.GarbageThreshold
+ t.LogWithFields("INFO", "Using garbage threshold from parameters", map[string]interface{}{
+ "threshold": t.garbageThreshold,
+ })
+ }
+ }
+
+ // Convert server address to gRPC address and use proper dial option
+ grpcAddress := pb.ServerToGrpcAddress(t.server)
+ var dialOpt grpc.DialOption = grpc.WithTransportCredentials(insecure.NewCredentials())
+ if params.GrpcDialOption != nil {
+ dialOpt = params.GrpcDialOption
+ }
+
+ conn, err := grpc.NewClient(grpcAddress, dialOpt)
+ if err != nil {
+ t.LogError("Failed to connect to volume server %s: %v", t.server, err)
+ return fmt.Errorf("failed to connect to volume server %s: %v", t.server, err)
+ }
+ defer conn.Close()
+
+ client := volume_server_pb.NewVolumeServerClient(conn)
+
+ // Step 1: Check vacuum eligibility
+ t.SetProgress(10.0)
+ t.LogDebug("Checking vacuum eligibility for volume %d", t.volumeID)
+
+ checkResp, err := client.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{
+ VolumeId: t.volumeID,
+ })
+ if err != nil {
+ t.LogError("Vacuum check failed for volume %d: %v", t.volumeID, err)
+ return fmt.Errorf("vacuum check failed for volume %d: %v", t.volumeID, err)
+ }
+
+ // Check if garbage ratio meets threshold
+ if checkResp.GarbageRatio < t.garbageThreshold {
+ t.LogWarning("Volume %d garbage ratio %.2f%% is below threshold %.2f%%, skipping vacuum",
+ t.volumeID, checkResp.GarbageRatio*100, t.garbageThreshold*100)
+ return fmt.Errorf("volume %d garbage ratio %.2f%% is below threshold %.2f%%, skipping vacuum",
+ t.volumeID, checkResp.GarbageRatio*100, t.garbageThreshold*100)
+ }
+
+ t.LogWithFields("INFO", "Volume eligible for vacuum", map[string]interface{}{
+ "volume_id": t.volumeID,
+ "garbage_ratio": checkResp.GarbageRatio,
+ "threshold": t.garbageThreshold,
+ "garbage_percent": checkResp.GarbageRatio * 100,
+ })
+
+ // Step 2: Compact volume
+ t.SetProgress(30.0)
+ t.LogInfo("Starting compact for volume %d", t.volumeID)
+
+ compactStream, err := client.VacuumVolumeCompact(ctx, &volume_server_pb.VacuumVolumeCompactRequest{
+ VolumeId: t.volumeID,
+ })
+ if err != nil {
+ t.LogError("Vacuum compact failed for volume %d: %v", t.volumeID, err)
+ return fmt.Errorf("vacuum compact failed for volume %d: %v", t.volumeID, err)
}
- for _, step := range steps {
- if t.IsCancelled() {
- return fmt.Errorf("vacuum task cancelled")
+ // Process compact stream and track progress
+ var processedBytes int64
+ var totalBytes int64
+
+ for {
+ resp, err := compactStream.Recv()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ t.LogError("Vacuum compact stream error for volume %d: %v", t.volumeID, err)
+ return fmt.Errorf("vacuum compact stream error for volume %d: %v", t.volumeID, err)
}
- glog.V(1).Infof("Vacuum task step: %s", step.name)
- t.SetProgress(step.progress)
+ processedBytes = resp.ProcessedBytes
+ if resp.LoadAvg_1M > 0 {
+ totalBytes = int64(resp.LoadAvg_1M) // This is a rough approximation
+ }
+
+ // Update progress based on processed bytes (30% to 70% of total progress)
+ if totalBytes > 0 {
+ compactProgress := float64(processedBytes) / float64(totalBytes)
+ if compactProgress > 1.0 {
+ compactProgress = 1.0
+ }
+ progress := 30.0 + (compactProgress * 40.0) // 30% to 70%
+ t.SetProgress(progress)
+ }
- // Simulate work
- time.Sleep(step.duration)
+ t.LogWithFields("DEBUG", "Volume compact progress", map[string]interface{}{
+ "volume_id": t.volumeID,
+ "processed_bytes": processedBytes,
+ "total_bytes": totalBytes,
+ "compact_progress": fmt.Sprintf("%.1f%%", (float64(processedBytes)/float64(totalBytes))*100),
+ })
}
- glog.Infof("Vacuum task completed for volume %d on server %s", t.volumeID, t.server)
+ // Step 3: Commit vacuum changes
+ t.SetProgress(80.0)
+ t.LogInfo("Committing vacuum for volume %d", t.volumeID)
+
+ commitResp, err := client.VacuumVolumeCommit(ctx, &volume_server_pb.VacuumVolumeCommitRequest{
+ VolumeId: t.volumeID,
+ })
+ if err != nil {
+ t.LogError("Vacuum commit failed for volume %d: %v", t.volumeID, err)
+ return fmt.Errorf("vacuum commit failed for volume %d: %v", t.volumeID, err)
+ }
+
+ // Step 4: Cleanup temporary files
+ t.SetProgress(90.0)
+ t.LogInfo("Cleaning up vacuum files for volume %d", t.volumeID)
+
+ _, err = client.VacuumVolumeCleanup(ctx, &volume_server_pb.VacuumVolumeCleanupRequest{
+ VolumeId: t.volumeID,
+ })
+ if err != nil {
+ // Log warning but don't fail the task
+ t.LogWarning("Vacuum cleanup warning for volume %d: %v", t.volumeID, err)
+ }
+
+ t.SetProgress(100.0)
+
+ newVolumeSize := commitResp.VolumeSize
+ t.LogWithFields("INFO", "Successfully completed vacuum", map[string]interface{}{
+ "volume_id": t.volumeID,
+ "server": t.server,
+ "new_volume_size": newVolumeSize,
+ "garbage_reclaimed": true,
+ })
+
return nil
}
@@ -71,9 +195,20 @@ func (t *Task) Validate(params types.TaskParams) error {
// EstimateTime estimates the time needed for the task
func (t *Task) EstimateTime(params types.TaskParams) time.Duration {
- // Base time for vacuum operation
- baseTime := 25 * time.Second
+ // Base time for vacuum operations - varies by volume size and garbage ratio
+ // Typically vacuum is faster than EC encoding
+ baseTime := 5 * time.Minute
- // Could adjust based on volume size or usage patterns
+ // Use default estimation since volume size is not available in typed params
return baseTime
}
+
+// GetProgress returns the current progress
+func (t *Task) GetProgress() float64 {
+ return t.BaseTask.GetProgress()
+}
+
+// Cancel cancels the task
+func (t *Task) Cancel() error {
+ return t.BaseTask.Cancel()
+}
diff --git a/weed/worker/tasks/vacuum/vacuum_detector.go b/weed/worker/tasks/vacuum/vacuum_detector.go
deleted file mode 100644
index 6d7230c6c..000000000
--- a/weed/worker/tasks/vacuum/vacuum_detector.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package vacuum
-
-import (
- "time"
-
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/worker/types"
-)
-
-// VacuumDetector implements vacuum task detection using code instead of schemas
-type VacuumDetector struct {
- enabled bool
- garbageThreshold float64
- minVolumeAge time.Duration
- scanInterval time.Duration
-}
-
-// Compile-time interface assertions
-var (
- _ types.TaskDetector = (*VacuumDetector)(nil)
- _ types.PolicyConfigurableDetector = (*VacuumDetector)(nil)
-)
-
-// NewVacuumDetector creates a new simple vacuum detector
-func NewVacuumDetector() *VacuumDetector {
- return &VacuumDetector{
- enabled: true,
- garbageThreshold: 0.3,
- minVolumeAge: 24 * time.Hour,
- scanInterval: 30 * time.Minute,
- }
-}
-
-// GetTaskType returns the task type
-func (d *VacuumDetector) GetTaskType() types.TaskType {
- return types.TaskTypeVacuum
-}
-
-// ScanForTasks scans for volumes that need vacuum operations
-func (d *VacuumDetector) ScanForTasks(volumeMetrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo) ([]*types.TaskDetectionResult, error) {
- if !d.enabled {
- return nil, nil
- }
-
- var results []*types.TaskDetectionResult
-
- for _, metric := range volumeMetrics {
- // Check if volume needs vacuum
- if metric.GarbageRatio >= d.garbageThreshold && metric.Age >= d.minVolumeAge {
- // Higher priority for volumes with more garbage
- priority := types.TaskPriorityNormal
- if metric.GarbageRatio > 0.6 {
- priority = types.TaskPriorityHigh
- }
-
- result := &types.TaskDetectionResult{
- TaskType: types.TaskTypeVacuum,
- VolumeID: metric.VolumeID,
- Server: metric.Server,
- Collection: metric.Collection,
- Priority: priority,
- Reason: "Volume has excessive garbage requiring vacuum",
- Parameters: map[string]interface{}{
- "garbage_ratio": metric.GarbageRatio,
- "volume_age": metric.Age.String(),
- },
- ScheduleAt: time.Now(),
- }
- results = append(results, result)
- }
- }
-
- glog.V(2).Infof("Vacuum detector found %d volumes needing vacuum", len(results))
- return results, nil
-}
-
-// ScanInterval returns how often this detector should scan
-func (d *VacuumDetector) ScanInterval() time.Duration {
- return d.scanInterval
-}
-
-// IsEnabled returns whether this detector is enabled
-func (d *VacuumDetector) IsEnabled() bool {
- return d.enabled
-}
-
-// Configuration setters
-
-func (d *VacuumDetector) SetEnabled(enabled bool) {
- d.enabled = enabled
-}
-
-func (d *VacuumDetector) SetGarbageThreshold(threshold float64) {
- d.garbageThreshold = threshold
-}
-
-func (d *VacuumDetector) SetScanInterval(interval time.Duration) {
- d.scanInterval = interval
-}
-
-func (d *VacuumDetector) SetMinVolumeAge(age time.Duration) {
- d.minVolumeAge = age
-}
-
-// GetGarbageThreshold returns the current garbage threshold
-func (d *VacuumDetector) GetGarbageThreshold() float64 {
- return d.garbageThreshold
-}
-
-// GetMinVolumeAge returns the minimum volume age
-func (d *VacuumDetector) GetMinVolumeAge() time.Duration {
- return d.minVolumeAge
-}
-
-// GetScanInterval returns the scan interval
-func (d *VacuumDetector) GetScanInterval() time.Duration {
- return d.scanInterval
-}
-
-// ConfigureFromPolicy configures the detector based on the maintenance policy
-func (d *VacuumDetector) ConfigureFromPolicy(policy interface{}) {
- // Type assert to the maintenance policy type we expect
- if maintenancePolicy, ok := policy.(interface {
- GetVacuumEnabled() bool
- GetVacuumGarbageRatio() float64
- }); ok {
- d.SetEnabled(maintenancePolicy.GetVacuumEnabled())
- d.SetGarbageThreshold(maintenancePolicy.GetVacuumGarbageRatio())
- } else {
- glog.V(1).Infof("Could not configure vacuum detector from policy: unsupported policy type")
- }
-}
diff --git a/weed/worker/tasks/vacuum/vacuum_register.go b/weed/worker/tasks/vacuum/vacuum_register.go
index 7d930a88e..d660c9d42 100644
--- a/weed/worker/tasks/vacuum/vacuum_register.go
+++ b/weed/worker/tasks/vacuum/vacuum_register.go
@@ -2,80 +2,71 @@ package vacuum
import (
"fmt"
+ "time"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
+ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
-// Factory creates vacuum task instances
-type Factory struct {
- *tasks.BaseTaskFactory
-}
+// Global variable to hold the task definition for configuration updates
+var globalTaskDef *base.TaskDefinition
-// NewFactory creates a new vacuum task factory
-func NewFactory() *Factory {
- return &Factory{
- BaseTaskFactory: tasks.NewBaseTaskFactory(
- types.TaskTypeVacuum,
- []string{"vacuum", "storage"},
- "Vacuum operation to reclaim disk space by removing deleted files",
- ),
- }
-}
-
-// Create creates a new vacuum task instance
-func (f *Factory) Create(params types.TaskParams) (types.TaskInterface, error) {
- // Validate parameters
- if params.VolumeID == 0 {
- return nil, fmt.Errorf("volume_id is required")
- }
- if params.Server == "" {
- return nil, fmt.Errorf("server is required")
- }
-
- task := NewTask(params.Server, params.VolumeID)
- task.SetEstimatedDuration(task.EstimateTime(params))
+// Auto-register this task when the package is imported
+func init() {
+ RegisterVacuumTask()
- return task, nil
+ // Register config updater
+ tasks.AutoRegisterConfigUpdater(types.TaskTypeVacuum, UpdateConfigFromPersistence)
}
-// Shared detector and scheduler instances
-var (
- sharedDetector *VacuumDetector
- sharedScheduler *VacuumScheduler
-)
+// RegisterVacuumTask registers the vacuum task with the new architecture
+func RegisterVacuumTask() {
+ // Create configuration instance
+ config := NewDefaultConfig()
-// getSharedInstances returns the shared detector and scheduler instances
-func getSharedInstances() (*VacuumDetector, *VacuumScheduler) {
- if sharedDetector == nil {
- sharedDetector = NewVacuumDetector()
- }
- if sharedScheduler == nil {
- sharedScheduler = NewVacuumScheduler()
+ // Create complete task definition
+ taskDef := &base.TaskDefinition{
+ Type: types.TaskTypeVacuum,
+ Name: "vacuum",
+ DisplayName: "Volume Vacuum",
+ Description: "Reclaims disk space by removing deleted files from volumes",
+ Icon: "fas fa-broom text-primary",
+ Capabilities: []string{"vacuum", "storage"},
+
+ Config: config,
+ ConfigSpec: GetConfigSpec(),
+ CreateTask: CreateTask,
+ DetectionFunc: Detection,
+ ScanInterval: 2 * time.Hour,
+ SchedulingFunc: Scheduling,
+ MaxConcurrent: 2,
+ RepeatInterval: 7 * 24 * time.Hour,
}
- return sharedDetector, sharedScheduler
-}
-// GetSharedInstances returns the shared detector and scheduler instances (public access)
-func GetSharedInstances() (*VacuumDetector, *VacuumScheduler) {
- return getSharedInstances()
+ // Store task definition globally for configuration updates
+ globalTaskDef = taskDef
+
+ // Register everything with a single function call!
+ base.RegisterTask(taskDef)
}
-// Auto-register this task when the package is imported
-func init() {
- factory := NewFactory()
- tasks.AutoRegister(types.TaskTypeVacuum, factory)
+// UpdateConfigFromPersistence updates the vacuum configuration from persistence
+func UpdateConfigFromPersistence(configPersistence interface{}) error {
+ if globalTaskDef == nil {
+ return fmt.Errorf("vacuum task not registered")
+ }
- // Get shared instances for all registrations
- detector, scheduler := getSharedInstances()
+ // Load configuration from persistence
+ newConfig := LoadConfigFromPersistence(configPersistence)
+ if newConfig == nil {
+ return fmt.Errorf("failed to load configuration from persistence")
+ }
- // Register with types registry
- tasks.AutoRegisterTypes(func(registry *types.TaskRegistry) {
- registry.RegisterTask(detector, scheduler)
- })
+ // Update the task definition's config
+ globalTaskDef.Config = newConfig
- // Register with UI registry using the same instances
- tasks.AutoRegisterUI(func(uiRegistry *types.UIRegistry) {
- RegisterUI(uiRegistry, detector, scheduler)
- })
+ glog.V(1).Infof("Updated vacuum task configuration from persistence")
+ return nil
}
diff --git a/weed/worker/tasks/vacuum/vacuum_scheduler.go b/weed/worker/tasks/vacuum/vacuum_scheduler.go
deleted file mode 100644
index 2b67a9f40..000000000
--- a/weed/worker/tasks/vacuum/vacuum_scheduler.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package vacuum
-
-import (
- "time"
-
- "github.com/seaweedfs/seaweedfs/weed/worker/types"
-)
-
-// VacuumScheduler implements vacuum task scheduling using code instead of schemas
-type VacuumScheduler struct {
- enabled bool
- maxConcurrent int
- minInterval time.Duration
-}
-
-// Compile-time interface assertions
-var (
- _ types.TaskScheduler = (*VacuumScheduler)(nil)
-)
-
-// NewVacuumScheduler creates a new simple vacuum scheduler
-func NewVacuumScheduler() *VacuumScheduler {
- return &VacuumScheduler{
- enabled: true,
- maxConcurrent: 2,
- minInterval: 6 * time.Hour,
- }
-}
-
-// GetTaskType returns the task type
-func (s *VacuumScheduler) GetTaskType() types.TaskType {
- return types.TaskTypeVacuum
-}
-
-// CanScheduleNow determines if a vacuum task can be scheduled right now
-func (s *VacuumScheduler) CanScheduleNow(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker) bool {
- // Check if scheduler is enabled
- if !s.enabled {
- return false
- }
-
- // Check concurrent limit
- runningVacuumCount := 0
- for _, runningTask := range runningTasks {
- if runningTask.Type == types.TaskTypeVacuum {
- runningVacuumCount++
- }
- }
-
- if runningVacuumCount >= s.maxConcurrent {
- return false
- }
-
- // Check if there's an available worker with vacuum capability
- for _, worker := range availableWorkers {
- if worker.CurrentLoad < worker.MaxConcurrent {
- for _, capability := range worker.Capabilities {
- if capability == types.TaskTypeVacuum {
- return true
- }
- }
- }
- }
-
- return false
-}
-
-// GetPriority returns the priority for this task
-func (s *VacuumScheduler) GetPriority(task *types.Task) types.TaskPriority {
- // Could adjust priority based on task parameters
- if params, ok := task.Parameters["garbage_ratio"].(float64); ok {
- if params > 0.8 {
- return types.TaskPriorityHigh
- }
- }
- return task.Priority
-}
-
-// GetMaxConcurrent returns max concurrent tasks of this type
-func (s *VacuumScheduler) GetMaxConcurrent() int {
- return s.maxConcurrent
-}
-
-// GetDefaultRepeatInterval returns the default interval to wait before repeating vacuum tasks
-func (s *VacuumScheduler) GetDefaultRepeatInterval() time.Duration {
- return s.minInterval
-}
-
-// IsEnabled returns whether this scheduler is enabled
-func (s *VacuumScheduler) IsEnabled() bool {
- return s.enabled
-}
-
-// Configuration setters
-
-func (s *VacuumScheduler) SetEnabled(enabled bool) {
- s.enabled = enabled
-}
-
-func (s *VacuumScheduler) SetMaxConcurrent(max int) {
- s.maxConcurrent = max
-}
-
-func (s *VacuumScheduler) SetMinInterval(interval time.Duration) {
- s.minInterval = interval
-}
-
-// GetMinInterval returns the minimum interval
-func (s *VacuumScheduler) GetMinInterval() time.Duration {
- return s.minInterval
-}
diff --git a/weed/worker/types/config_types.go b/weed/worker/types/config_types.go
index 8e4113580..5a9e94fd5 100644
--- a/weed/worker/types/config_types.go
+++ b/weed/worker/types/config_types.go
@@ -3,6 +3,8 @@ package types
import (
"sync"
"time"
+
+ "google.golang.org/grpc"
)
// WorkerConfig represents the configuration for a worker
@@ -12,7 +14,9 @@ type WorkerConfig struct {
MaxConcurrent int `json:"max_concurrent"`
HeartbeatInterval time.Duration `json:"heartbeat_interval"`
TaskRequestInterval time.Duration `json:"task_request_interval"`
+ BaseWorkingDir string `json:"base_working_dir,omitempty"`
CustomParameters map[string]interface{} `json:"custom_parameters,omitempty"`
+ GrpcDialOption grpc.DialOption `json:"-"` // Not serializable, for runtime use only
}
// MaintenanceConfig represents the configuration for the maintenance system
diff --git a/weed/worker/types/data_types.go b/weed/worker/types/data_types.go
index 4a018563a..c6ba62a18 100644
--- a/weed/worker/types/data_types.go
+++ b/weed/worker/types/data_types.go
@@ -16,6 +16,8 @@ type ClusterInfo struct {
type VolumeHealthMetrics struct {
VolumeID uint32
Server string
+ DiskType string // Disk type (e.g., "hdd", "ssd") or disk path (e.g., "/data1")
+ DiskId uint32 // ID of the disk in Store.Locations array
Collection string
Size uint64
DeletedBytes uint64
diff --git a/weed/worker/types/task_types.go b/weed/worker/types/task_types.go
index b0fdb009f..dc454c211 100644
--- a/weed/worker/types/task_types.go
+++ b/weed/worker/types/task_types.go
@@ -2,6 +2,9 @@ package types
import (
"time"
+
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+ "google.golang.org/grpc"
)
// TaskType represents the type of maintenance task
@@ -11,6 +14,7 @@ const (
TaskTypeVacuum TaskType = "vacuum"
TaskTypeErasureCoding TaskType = "erasure_coding"
TaskTypeBalance TaskType = "balance"
+ TaskTypeReplication TaskType = "replication"
)
// TaskStatus represents the status of a maintenance task
@@ -26,53 +30,57 @@ const (
)
// TaskPriority represents the priority of a maintenance task
-type TaskPriority int
+type TaskPriority string
const (
- TaskPriorityLow TaskPriority = 1
- TaskPriorityNormal TaskPriority = 5
- TaskPriorityHigh TaskPriority = 10
+ TaskPriorityLow TaskPriority = "low"
+ TaskPriorityMedium TaskPriority = "medium"
+ TaskPriorityNormal TaskPriority = "normal"
+ TaskPriorityHigh TaskPriority = "high"
+ TaskPriorityCritical TaskPriority = "critical"
)
// Task represents a maintenance task
type Task struct {
- ID string `json:"id"`
- Type TaskType `json:"type"`
- Status TaskStatus `json:"status"`
- Priority TaskPriority `json:"priority"`
- VolumeID uint32 `json:"volume_id,omitempty"`
- Server string `json:"server,omitempty"`
- Collection string `json:"collection,omitempty"`
- WorkerID string `json:"worker_id,omitempty"`
- Progress float64 `json:"progress"`
- Error string `json:"error,omitempty"`
- Parameters map[string]interface{} `json:"parameters,omitempty"`
- CreatedAt time.Time `json:"created_at"`
- ScheduledAt time.Time `json:"scheduled_at"`
- StartedAt *time.Time `json:"started_at,omitempty"`
- CompletedAt *time.Time `json:"completed_at,omitempty"`
- RetryCount int `json:"retry_count"`
- MaxRetries int `json:"max_retries"`
+ ID string `json:"id"`
+ Type TaskType `json:"type"`
+ Status TaskStatus `json:"status"`
+ Priority TaskPriority `json:"priority"`
+ VolumeID uint32 `json:"volume_id,omitempty"`
+ Server string `json:"server,omitempty"`
+ Collection string `json:"collection,omitempty"`
+ WorkerID string `json:"worker_id,omitempty"`
+ Progress float64 `json:"progress"`
+ Error string `json:"error,omitempty"`
+ TypedParams *worker_pb.TaskParams `json:"typed_params,omitempty"`
+ CreatedAt time.Time `json:"created_at"`
+ ScheduledAt time.Time `json:"scheduled_at"`
+ StartedAt *time.Time `json:"started_at,omitempty"`
+ CompletedAt *time.Time `json:"completed_at,omitempty"`
+ RetryCount int `json:"retry_count"`
+ MaxRetries int `json:"max_retries"`
}
// TaskParams represents parameters for task execution
type TaskParams struct {
- VolumeID uint32 `json:"volume_id,omitempty"`
- Server string `json:"server,omitempty"`
- Collection string `json:"collection,omitempty"`
- Parameters map[string]interface{} `json:"parameters,omitempty"`
+ VolumeID uint32 `json:"volume_id,omitempty"`
+ Server string `json:"server,omitempty"`
+ Collection string `json:"collection,omitempty"`
+ WorkingDir string `json:"working_dir,omitempty"`
+ TypedParams *worker_pb.TaskParams `json:"typed_params,omitempty"`
+ GrpcDialOption grpc.DialOption `json:"-"` // Not serializable, for runtime use only
}
// TaskDetectionResult represents the result of scanning for maintenance needs
type TaskDetectionResult struct {
- TaskType TaskType `json:"task_type"`
- VolumeID uint32 `json:"volume_id,omitempty"`
- Server string `json:"server,omitempty"`
- Collection string `json:"collection,omitempty"`
- Priority TaskPriority `json:"priority"`
- Reason string `json:"reason"`
- Parameters map[string]interface{} `json:"parameters,omitempty"`
- ScheduleAt time.Time `json:"schedule_at"`
+ TaskType TaskType `json:"task_type"`
+ VolumeID uint32 `json:"volume_id,omitempty"`
+ Server string `json:"server,omitempty"`
+ Collection string `json:"collection,omitempty"`
+ Priority TaskPriority `json:"priority"`
+ Reason string `json:"reason"`
+ TypedParams *worker_pb.TaskParams `json:"typed_params,omitempty"`
+ ScheduleAt time.Time `json:"schedule_at"`
}
// ClusterReplicationTask represents a cluster replication task parameters
diff --git a/weed/worker/types/task_ui.go b/weed/worker/types/task_ui.go
index e1e2752ba..9294127a8 100644
--- a/weed/worker/types/task_ui.go
+++ b/weed/worker/types/task_ui.go
@@ -1,12 +1,60 @@
package types
import (
- "fmt"
- "html/template"
"time"
+
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
)
+// Helper function to convert seconds to the most appropriate interval unit
+func secondsToIntervalValueUnit(totalSeconds int) (int, string) {
+ if totalSeconds == 0 {
+ return 0, "minute"
+ }
+
+ // Check if it's evenly divisible by days
+ if totalSeconds%(24*3600) == 0 {
+ return totalSeconds / (24 * 3600), "day"
+ }
+
+ // Check if it's evenly divisible by hours
+ if totalSeconds%3600 == 0 {
+ return totalSeconds / 3600, "hour"
+ }
+
+ // Default to minutes
+ return totalSeconds / 60, "minute"
+}
+
+// Helper function to convert interval value and unit to seconds
+func IntervalValueUnitToSeconds(value int, unit string) int {
+ switch unit {
+ case "day":
+ return value * 24 * 3600
+ case "hour":
+ return value * 3600
+ case "minute":
+ return value * 60
+ default:
+ return value * 60 // Default to minutes
+ }
+}
+
+// TaskConfig defines the interface for task configurations
+// This matches the interfaces used in base package and handlers
+type TaskConfig interface {
+ // Common methods from BaseConfig
+ IsEnabled() bool
+ SetEnabled(enabled bool)
+ Validate() error
+
+ // Protobuf serialization methods - no more interface{}!
+ ToTaskPolicy() *worker_pb.TaskPolicy
+ FromTaskPolicy(policy *worker_pb.TaskPolicy) error
+}
+
// TaskUIProvider defines how tasks provide their configuration UI
+// This interface is simplified to work with schema-driven configuration
type TaskUIProvider interface {
// GetTaskType returns the task type
GetTaskType() TaskType
@@ -20,17 +68,14 @@ type TaskUIProvider interface {
// GetIcon returns the icon CSS class or HTML for this task type
GetIcon() string
- // RenderConfigForm renders the configuration form HTML
- RenderConfigForm(currentConfig interface{}) (template.HTML, error)
+ // GetCurrentConfig returns the current configuration as TaskConfig
+ GetCurrentConfig() TaskConfig
- // ParseConfigForm parses form data into configuration
- ParseConfigForm(formData map[string][]string) (interface{}, error)
+ // ApplyTaskPolicy applies protobuf TaskPolicy configuration
+ ApplyTaskPolicy(policy *worker_pb.TaskPolicy) error
- // GetCurrentConfig returns the current configuration
- GetCurrentConfig() interface{}
-
- // ApplyConfig applies the new configuration
- ApplyConfig(config interface{}) error
+ // ApplyTaskConfig applies TaskConfig interface configuration
+ ApplyTaskConfig(config TaskConfig) error
}
// TaskStats represents runtime statistics for a task type
@@ -87,195 +132,10 @@ type TaskListData struct {
}
type TaskDetailsData struct {
- Task *Task `json:"task"`
- TaskType TaskType `json:"task_type"`
- DisplayName string `json:"display_name"`
- Description string `json:"description"`
- Stats *TaskStats `json:"stats"`
- ConfigForm template.HTML `json:"config_form"`
- LastUpdated time.Time `json:"last_updated"`
-}
-
-// Common form field types for simple form building
-type FormField struct {
- Name string `json:"name"`
- Label string `json:"label"`
- Type string `json:"type"` // text, number, checkbox, select, duration
- Value interface{} `json:"value"`
- Description string `json:"description"`
- Required bool `json:"required"`
- Options []FormOption `json:"options,omitempty"` // For select fields
-}
-
-type FormOption struct {
- Value string `json:"value"`
- Label string `json:"label"`
-}
-
-// Helper for building forms in code
-type FormBuilder struct {
- fields []FormField
-}
-
-// NewFormBuilder creates a new form builder
-func NewFormBuilder() *FormBuilder {
- return &FormBuilder{
- fields: make([]FormField, 0),
- }
-}
-
-// AddTextField adds a text input field
-func (fb *FormBuilder) AddTextField(name, label, description string, value string, required bool) *FormBuilder {
- fb.fields = append(fb.fields, FormField{
- Name: name,
- Label: label,
- Type: "text",
- Value: value,
- Description: description,
- Required: required,
- })
- return fb
-}
-
-// AddNumberField adds a number input field
-func (fb *FormBuilder) AddNumberField(name, label, description string, value float64, required bool) *FormBuilder {
- fb.fields = append(fb.fields, FormField{
- Name: name,
- Label: label,
- Type: "number",
- Value: value,
- Description: description,
- Required: required,
- })
- return fb
-}
-
-// AddCheckboxField adds a checkbox field
-func (fb *FormBuilder) AddCheckboxField(name, label, description string, value bool) *FormBuilder {
- fb.fields = append(fb.fields, FormField{
- Name: name,
- Label: label,
- Type: "checkbox",
- Value: value,
- Description: description,
- Required: false,
- })
- return fb
-}
-
-// AddSelectField adds a select dropdown field
-func (fb *FormBuilder) AddSelectField(name, label, description string, value string, options []FormOption, required bool) *FormBuilder {
- fb.fields = append(fb.fields, FormField{
- Name: name,
- Label: label,
- Type: "select",
- Value: value,
- Description: description,
- Required: required,
- Options: options,
- })
- return fb
-}
-
-// AddDurationField adds a duration input field
-func (fb *FormBuilder) AddDurationField(name, label, description string, value time.Duration, required bool) *FormBuilder {
- fb.fields = append(fb.fields, FormField{
- Name: name,
- Label: label,
- Type: "duration",
- Value: value.String(),
- Description: description,
- Required: required,
- })
- return fb
-}
-
-// Build generates the HTML form fields with Bootstrap styling
-func (fb *FormBuilder) Build() template.HTML {
- html := ""
-
- for _, field := range fb.fields {
- html += fb.renderField(field)
- }
-
- return template.HTML(html)
-}
-
-// renderField renders a single form field with Bootstrap classes
-func (fb *FormBuilder) renderField(field FormField) string {
- html := "<div class=\"mb-3\">\n"
-
- // Special handling for checkbox fields
- if field.Type == "checkbox" {
- checked := ""
- if field.Value.(bool) {
- checked = " checked"
- }
- html += " <div class=\"form-check\">\n"
- html += " <input type=\"checkbox\" class=\"form-check-input\" id=\"" + field.Name + "\" name=\"" + field.Name + "\"" + checked + ">\n"
- html += " <label class=\"form-check-label\" for=\"" + field.Name + "\">" + field.Label + "</label>\n"
- html += " </div>\n"
- // Description for checkbox
- if field.Description != "" {
- html += " <div class=\"form-text text-muted\">" + field.Description + "</div>\n"
- }
- html += "</div>\n"
- return html
- }
-
- // Label for non-checkbox fields
- required := ""
- if field.Required {
- required = " <span class=\"text-danger\">*</span>"
- }
- html += " <label for=\"" + field.Name + "\" class=\"form-label\">" + field.Label + required + "</label>\n"
-
- // Input based on type
- switch field.Type {
- case "text":
- html += " <input type=\"text\" class=\"form-control\" id=\"" + field.Name + "\" name=\"" + field.Name + "\" value=\"" + field.Value.(string) + "\""
- if field.Required {
- html += " required"
- }
- html += ">\n"
-
- case "number":
- html += " <input type=\"number\" class=\"form-control\" id=\"" + field.Name + "\" name=\"" + field.Name + "\" step=\"any\" value=\"" +
- fmt.Sprintf("%v", field.Value) + "\""
- if field.Required {
- html += " required"
- }
- html += ">\n"
-
- case "select":
- html += " <select class=\"form-select\" id=\"" + field.Name + "\" name=\"" + field.Name + "\""
- if field.Required {
- html += " required"
- }
- html += ">\n"
- for _, option := range field.Options {
- selected := ""
- if option.Value == field.Value.(string) {
- selected = " selected"
- }
- html += " <option value=\"" + option.Value + "\"" + selected + ">" + option.Label + "</option>\n"
- }
- html += " </select>\n"
-
- case "duration":
- html += " <input type=\"text\" class=\"form-control\" id=\"" + field.Name + "\" name=\"" + field.Name + "\" value=\"" + field.Value.(string) +
- "\" placeholder=\"e.g., 30m, 2h, 24h\""
- if field.Required {
- html += " required"
- }
- html += ">\n"
- }
-
- // Description for non-checkbox fields
- if field.Description != "" {
- html += " <div class=\"form-text text-muted\">" + field.Description + "</div>\n"
- }
-
- html += "</div>\n"
- return html
+ Task *Task `json:"task"`
+ TaskType TaskType `json:"task_type"`
+ DisplayName string `json:"display_name"`
+ Description string `json:"description"`
+ Stats *TaskStats `json:"stats"`
+ LastUpdated time.Time `json:"last_updated"`
}
diff --git a/weed/worker/types/typed_task_interface.go b/weed/worker/types/typed_task_interface.go
new file mode 100644
index 000000000..3dffe510c
--- /dev/null
+++ b/weed/worker/types/typed_task_interface.go
@@ -0,0 +1,121 @@
+package types
+
+import (
+ "errors"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
+)
+
+var (
+ // ErrTaskTypeNotFound is returned when a task type is not registered
+ ErrTaskTypeNotFound = errors.New("task type not found")
+)
+
+// TaskLogger interface for task logging (minimal definition to avoid import cycles)
+type TaskLogger interface {
+ Info(message string, args ...interface{})
+ Warning(message string, args ...interface{})
+ Error(message string, args ...interface{})
+ Debug(message string, args ...interface{})
+ LogWithFields(level string, message string, fields map[string]interface{})
+ Close() error
+}
+
+// TaskLoggerConfig holds configuration for task logging (minimal definition)
+type TaskLoggerConfig struct {
+ BaseLogDir string
+ MaxTasks int
+ MaxLogSizeMB int
+ EnableConsole bool
+}
+
+// TypedTaskInterface defines the interface for tasks using typed protobuf parameters
+type TypedTaskInterface interface {
+ // Execute the task with typed protobuf parameters
+ ExecuteTyped(params *worker_pb.TaskParams) error
+
+ // Validate typed task parameters
+ ValidateTyped(params *worker_pb.TaskParams) error
+
+ // Estimate execution time based on typed parameters
+ EstimateTimeTyped(params *worker_pb.TaskParams) time.Duration
+
+ // Get task type
+ GetType() TaskType
+
+ // Check if task can be cancelled
+ IsCancellable() bool
+
+ // Cancel the task if running
+ Cancel() error
+
+ // Get current progress (0-100)
+ GetProgress() float64
+
+ // Set progress callback for progress updates
+ SetProgressCallback(callback func(float64))
+
+ // Logger configuration and initialization (all typed tasks support this)
+ SetLoggerConfig(config TaskLoggerConfig)
+ InitializeTaskLogger(taskID string, workerID string, params TaskParams) error
+ GetTaskLogger() TaskLogger
+
+ // Logging methods (all typed tasks support this)
+ LogInfo(message string, args ...interface{})
+ LogWarning(message string, args ...interface{})
+ LogError(message string, args ...interface{})
+ LogDebug(message string, args ...interface{})
+ LogWithFields(level string, message string, fields map[string]interface{})
+}
+
+// TypedTaskCreator is a function that creates a new typed task instance
+type TypedTaskCreator func() TypedTaskInterface
+
+// TypedTaskRegistry manages typed task creation
+type TypedTaskRegistry struct {
+ creators map[TaskType]TypedTaskCreator
+}
+
+// NewTypedTaskRegistry creates a new typed task registry
+func NewTypedTaskRegistry() *TypedTaskRegistry {
+ return &TypedTaskRegistry{
+ creators: make(map[TaskType]TypedTaskCreator),
+ }
+}
+
+// RegisterTypedTask registers a typed task creator
+func (r *TypedTaskRegistry) RegisterTypedTask(taskType TaskType, creator TypedTaskCreator) {
+ r.creators[taskType] = creator
+}
+
+// CreateTypedTask creates a new typed task instance
+func (r *TypedTaskRegistry) CreateTypedTask(taskType TaskType) (TypedTaskInterface, error) {
+ creator, exists := r.creators[taskType]
+ if !exists {
+ return nil, ErrTaskTypeNotFound
+ }
+ return creator(), nil
+}
+
+// GetSupportedTypes returns all registered typed task types
+func (r *TypedTaskRegistry) GetSupportedTypes() []TaskType {
+ types := make([]TaskType, 0, len(r.creators))
+ for taskType := range r.creators {
+ types = append(types, taskType)
+ }
+ return types
+}
+
+// Global typed task registry
+var globalTypedTaskRegistry = NewTypedTaskRegistry()
+
+// RegisterGlobalTypedTask registers a typed task globally
+func RegisterGlobalTypedTask(taskType TaskType, creator TypedTaskCreator) {
+ globalTypedTaskRegistry.RegisterTypedTask(taskType, creator)
+}
+
+// GetGlobalTypedTaskRegistry returns the global typed task registry
+func GetGlobalTypedTaskRegistry() *TypedTaskRegistry {
+ return globalTypedTaskRegistry
+}
diff --git a/weed/worker/worker.go b/weed/worker/worker.go
index 3b7899f07..ff6b87808 100644
--- a/weed/worker/worker.go
+++ b/weed/worker/worker.go
@@ -1,12 +1,17 @@
package worker
import (
+ "crypto/rand"
"fmt"
+ "net"
"os"
+ "path/filepath"
+ "strings"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
@@ -31,6 +36,7 @@ type Worker struct {
tasksFailed int
heartbeatTicker *time.Ticker
requestTicker *time.Ticker
+ taskLogHandler *tasks.TaskLogHandler
}
// AdminClient defines the interface for communicating with the admin server
@@ -41,30 +47,113 @@ type AdminClient interface {
SendHeartbeat(workerID string, status *types.WorkerStatus) error
RequestTask(workerID string, capabilities []types.TaskType) (*types.Task, error)
CompleteTask(taskID string, success bool, errorMsg string) error
+ CompleteTaskWithMetadata(taskID string, success bool, errorMsg string, metadata map[string]string) error
UpdateTaskProgress(taskID string, progress float64) error
IsConnected() bool
}
+// GenerateOrLoadWorkerID generates a unique worker ID or loads existing one from working directory
+func GenerateOrLoadWorkerID(workingDir string) (string, error) {
+ const workerIDFile = "worker.id"
+
+ var idFilePath string
+ if workingDir != "" {
+ idFilePath = filepath.Join(workingDir, workerIDFile)
+ } else {
+ // Use current working directory if none specified
+ wd, err := os.Getwd()
+ if err != nil {
+ return "", fmt.Errorf("failed to get working directory: %w", err)
+ }
+ idFilePath = filepath.Join(wd, workerIDFile)
+ }
+
+ // Try to read existing worker ID
+ if data, err := os.ReadFile(idFilePath); err == nil {
+ workerID := strings.TrimSpace(string(data))
+ if workerID != "" {
+ glog.Infof("Loaded existing worker ID from %s: %s", idFilePath, workerID)
+ return workerID, nil
+ }
+ }
+
+ // Generate new unique worker ID with host information
+ hostname, _ := os.Hostname()
+ if hostname == "" {
+ hostname = "unknown"
+ }
+
+ // Get local IP address for better host identification
+ var hostIP string
+ if addrs, err := net.InterfaceAddrs(); err == nil {
+ for _, addr := range addrs {
+ if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
+ if ipnet.IP.To4() != nil {
+ hostIP = ipnet.IP.String()
+ break
+ }
+ }
+ }
+ }
+ if hostIP == "" {
+ hostIP = "noip"
+ }
+
+ // Create host identifier combining hostname and IP
+ hostID := fmt.Sprintf("%s@%s", hostname, hostIP)
+
+ // Generate random component for uniqueness
+ randomBytes := make([]byte, 4)
+ var workerID string
+ if _, err := rand.Read(randomBytes); err != nil {
+ // Fallback to timestamp if crypto/rand fails
+ workerID = fmt.Sprintf("worker-%s-%d", hostID, time.Now().Unix())
+ glog.Infof("Generated fallback worker ID: %s", workerID)
+ } else {
+ // Use random bytes + timestamp for uniqueness
+ randomHex := fmt.Sprintf("%x", randomBytes)
+ timestamp := time.Now().Unix()
+ workerID = fmt.Sprintf("worker-%s-%s-%d", hostID, randomHex, timestamp)
+ glog.Infof("Generated new worker ID: %s", workerID)
+ }
+
+ // Save worker ID to file
+ if err := os.WriteFile(idFilePath, []byte(workerID), 0644); err != nil {
+ glog.Warningf("Failed to save worker ID to %s: %v", idFilePath, err)
+ } else {
+ glog.Infof("Saved worker ID to %s", idFilePath)
+ }
+
+ return workerID, nil
+}
+
// NewWorker creates a new worker instance
func NewWorker(config *types.WorkerConfig) (*Worker, error) {
if config == nil {
config = types.DefaultWorkerConfig()
}
- // Always auto-generate worker ID
- hostname, _ := os.Hostname()
- workerID := fmt.Sprintf("worker-%s-%d", hostname, time.Now().Unix())
+ // Generate or load persistent worker ID
+ workerID, err := GenerateOrLoadWorkerID(config.BaseWorkingDir)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate or load worker ID: %w", err)
+ }
// Use the global registry that already has all tasks registered
registry := tasks.GetGlobalRegistry()
+ // Initialize task log handler
+ logDir := filepath.Join(config.BaseWorkingDir, "task_logs")
+ taskLogHandler := tasks.NewTaskLogHandler(logDir)
+
worker := &Worker{
- id: workerID,
- config: config,
- registry: registry,
- currentTasks: make(map[string]*types.Task),
- stopChan: make(chan struct{}),
- startTime: time.Now(),
+ id: workerID,
+ config: config,
+ registry: registry,
+ currentTasks: make(map[string]*types.Task),
+ stopChan: make(chan struct{}),
+ startTime: time.Now(),
+ taskLogHandler: taskLogHandler,
}
glog.V(1).Infof("Worker created with %d registered task types", len(registry.GetSupportedTypes()))
@@ -72,6 +161,17 @@ func NewWorker(config *types.WorkerConfig) (*Worker, error) {
return worker, nil
}
+// getTaskLoggerConfig returns the task logger configuration with worker's log directory
+func (w *Worker) getTaskLoggerConfig() tasks.TaskLoggerConfig {
+ config := tasks.DefaultTaskLoggerConfig()
+
+ // Use worker's configured log directory (BaseWorkingDir is guaranteed to be non-empty)
+ logDir := filepath.Join(w.config.BaseWorkingDir, "task_logs")
+ config.BaseLogDir = logDir
+
+ return config
+}
+
// ID returns the worker ID
func (w *Worker) ID() string {
return w.id
@@ -90,15 +190,10 @@ func (w *Worker) Start() error {
return fmt.Errorf("admin client is not set")
}
- // Connect to admin server
- if err := w.adminClient.Connect(); err != nil {
- return fmt.Errorf("failed to connect to admin server: %w", err)
- }
-
w.running = true
w.startTime = time.Now()
- // Register with admin server
+ // Prepare worker info for registration
workerInfo := &types.Worker{
ID: w.id,
Capabilities: w.config.Capabilities,
@@ -108,17 +203,33 @@ func (w *Worker) Start() error {
LastHeartbeat: time.Now(),
}
+ // Register worker info with client first (this stores it for use during connection)
if err := w.adminClient.RegisterWorker(workerInfo); err != nil {
- w.running = false
- w.adminClient.Disconnect()
- return fmt.Errorf("failed to register worker: %w", err)
+ glog.V(1).Infof("Worker info stored for registration: %v", err)
+ // This is expected if not connected yet
}
- // Start worker loops
+ // Start connection attempt (will register immediately if successful)
+ glog.Infof("🚀 WORKER STARTING: Worker %s starting with capabilities %v, max concurrent: %d",
+ w.id, w.config.Capabilities, w.config.MaxConcurrent)
+
+ // Try initial connection, but don't fail if it doesn't work immediately
+ if err := w.adminClient.Connect(); err != nil {
+ glog.Warningf("⚠️ INITIAL CONNECTION FAILED: Worker %s initial connection to admin server failed, will keep retrying: %v", w.id, err)
+ // Don't return error - let the reconnection loop handle it
+ } else {
+ glog.Infof("✅ INITIAL CONNECTION SUCCESS: Worker %s successfully connected to admin server", w.id)
+ }
+
+ // Start worker loops regardless of initial connection status
+ // They will handle connection failures gracefully
+ glog.V(1).Infof("🔄 STARTING LOOPS: Worker %s starting background loops", w.id)
go w.heartbeatLoop()
go w.taskRequestLoop()
+ go w.connectionMonitorLoop()
+ go w.messageProcessingLoop()
- glog.Infof("Worker %s started", w.id)
+ glog.Infof("✅ WORKER STARTED: Worker %s started successfully (connection attempts will continue in background)", w.id)
return nil
}
@@ -208,14 +319,25 @@ func (w *Worker) GetStatus() types.WorkerStatus {
// HandleTask handles a task execution
func (w *Worker) HandleTask(task *types.Task) error {
+ glog.V(1).Infof("Worker %s received task %s (type: %s, volume: %d)",
+ w.id, task.ID, task.Type, task.VolumeID)
+
w.mutex.Lock()
- if len(w.currentTasks) >= w.config.MaxConcurrent {
+ currentLoad := len(w.currentTasks)
+ if currentLoad >= w.config.MaxConcurrent {
w.mutex.Unlock()
+ glog.Errorf("❌ TASK REJECTED: Worker %s at capacity (%d/%d) - rejecting task %s",
+ w.id, currentLoad, w.config.MaxConcurrent, task.ID)
return fmt.Errorf("worker is at capacity")
}
+
w.currentTasks[task.ID] = task
+ newLoad := len(w.currentTasks)
w.mutex.Unlock()
+ glog.Infof("✅ TASK ACCEPTED: Worker %s accepted task %s - current load: %d/%d",
+ w.id, task.ID, newLoad, w.config.MaxConcurrent)
+
// Execute task in goroutine
go w.executeTask(task)
@@ -249,40 +371,95 @@ func (w *Worker) SetAdminClient(client AdminClient) {
// executeTask executes a task
func (w *Worker) executeTask(task *types.Task) {
+ startTime := time.Now()
+
defer func() {
w.mutex.Lock()
delete(w.currentTasks, task.ID)
+ currentLoad := len(w.currentTasks)
w.mutex.Unlock()
+
+ duration := time.Since(startTime)
+ glog.Infof("🏁 TASK EXECUTION FINISHED: Worker %s finished executing task %s after %v - current load: %d/%d",
+ w.id, task.ID, duration, currentLoad, w.config.MaxConcurrent)
}()
- glog.Infof("Worker %s executing task %s: %s", w.id, task.ID, task.Type)
+ glog.Infof("🚀 TASK EXECUTION STARTED: Worker %s starting execution of task %s (type: %s, volume: %d, server: %s, collection: %s) at %v",
+ w.id, task.ID, task.Type, task.VolumeID, task.Server, task.Collection, startTime.Format(time.RFC3339))
- // Create task instance
- taskParams := types.TaskParams{
- VolumeID: task.VolumeID,
- Server: task.Server,
- Collection: task.Collection,
- Parameters: task.Parameters,
+ // Report task start to admin server
+ if err := w.adminClient.UpdateTaskProgress(task.ID, 0.0); err != nil {
+ glog.V(1).Infof("Failed to report task start to admin: %v", err)
}
- taskInstance, err := w.registry.CreateTask(task.Type, taskParams)
+ // Determine task-specific working directory (BaseWorkingDir is guaranteed to be non-empty)
+ taskWorkingDir := filepath.Join(w.config.BaseWorkingDir, string(task.Type))
+ glog.V(2).Infof("📁 WORKING DIRECTORY: Task %s using working directory: %s", task.ID, taskWorkingDir)
+
+ // Check if we have typed protobuf parameters
+ if task.TypedParams == nil {
+ w.completeTask(task.ID, false, "task has no typed parameters - task was not properly planned")
+ glog.Errorf("Worker %s rejecting task %s: no typed parameters", w.id, task.ID)
+ return
+ }
+
+ // Use typed task execution (all tasks should be typed)
+ glog.V(1).Infof("Executing task %s with typed protobuf parameters", task.ID)
+
+ typedRegistry := types.GetGlobalTypedTaskRegistry()
+ typedTaskInstance, err := typedRegistry.CreateTypedTask(task.Type)
if err != nil {
- w.completeTask(task.ID, false, fmt.Sprintf("failed to create task: %v", err))
+ w.completeTask(task.ID, false, fmt.Sprintf("typed task not available for %s: %v", task.Type, err))
+ glog.Errorf("Worker %s failed to create typed task %s: %v", w.id, task.ID, err)
return
}
- // Execute task
- err = taskInstance.Execute(taskParams)
+ // Configure task logger directory (all typed tasks support this)
+ tasksLoggerConfig := w.getTaskLoggerConfig()
+ typedLoggerConfig := types.TaskLoggerConfig{
+ BaseLogDir: tasksLoggerConfig.BaseLogDir,
+ MaxTasks: tasksLoggerConfig.MaxTasks,
+ MaxLogSizeMB: tasksLoggerConfig.MaxLogSizeMB,
+ EnableConsole: tasksLoggerConfig.EnableConsole,
+ }
+ typedTaskInstance.SetLoggerConfig(typedLoggerConfig)
+ glog.V(2).Infof("Set typed task logger config for %s: %s", task.ID, typedLoggerConfig.BaseLogDir)
+
+ // Initialize logging (all typed tasks support this)
+ taskParams := types.TaskParams{
+ VolumeID: task.VolumeID,
+ Server: task.Server,
+ Collection: task.Collection,
+ WorkingDir: taskWorkingDir,
+ TypedParams: task.TypedParams,
+ GrpcDialOption: w.config.GrpcDialOption,
+ }
+
+ if err := typedTaskInstance.InitializeTaskLogger(task.ID, w.id, taskParams); err != nil {
+ glog.Warningf("Failed to initialize task logger for %s: %v", task.ID, err)
+ }
+
+ // Set progress callback that reports to admin server
+ typedTaskInstance.SetProgressCallback(func(progress float64) {
+ // Report progress updates to admin server
+ glog.V(2).Infof("Task %s progress: %.1f%%", task.ID, progress)
+ if err := w.adminClient.UpdateTaskProgress(task.ID, progress); err != nil {
+ glog.V(1).Infof("Failed to report task progress to admin: %v", err)
+ }
+ })
+
+ // Execute typed task
+ err = typedTaskInstance.ExecuteTyped(task.TypedParams)
// Report completion
if err != nil {
w.completeTask(task.ID, false, err.Error())
w.tasksFailed++
- glog.Errorf("Worker %s failed to execute task %s: %v", w.id, task.ID, err)
+ glog.Errorf("Worker %s failed to execute typed task %s: %v", w.id, task.ID, err)
} else {
w.completeTask(task.ID, true, "")
w.tasksCompleted++
- glog.Infof("Worker %s completed task %s successfully", w.id, task.ID)
+ glog.Infof("Worker %s completed typed task %s successfully", w.id, task.ID)
}
}
@@ -348,20 +525,29 @@ func (w *Worker) requestTasks() {
w.mutex.RUnlock()
if currentLoad >= w.config.MaxConcurrent {
+ glog.V(3).Infof("🚫 TASK REQUEST SKIPPED: Worker %s at capacity (%d/%d)",
+ w.id, currentLoad, w.config.MaxConcurrent)
return // Already at capacity
}
if w.adminClient != nil {
+ glog.V(3).Infof("📞 REQUESTING TASK: Worker %s requesting task from admin server (current load: %d/%d, capabilities: %v)",
+ w.id, currentLoad, w.config.MaxConcurrent, w.config.Capabilities)
+
task, err := w.adminClient.RequestTask(w.id, w.config.Capabilities)
if err != nil {
- glog.V(2).Infof("Failed to request task: %v", err)
+ glog.V(2).Infof("❌ TASK REQUEST FAILED: Worker %s failed to request task: %v", w.id, err)
return
}
if task != nil {
+ glog.Infof("📨 TASK RESPONSE RECEIVED: Worker %s received task from admin server - ID: %s, Type: %s",
+ w.id, task.ID, task.Type)
if err := w.HandleTask(task); err != nil {
- glog.Errorf("Failed to handle task: %v", err)
+ glog.Errorf("❌ TASK HANDLING FAILED: Worker %s failed to handle task %s: %v", w.id, task.ID, err)
}
+ } else {
+ glog.V(3).Infof("📭 NO TASK AVAILABLE: Worker %s - admin server has no tasks available", w.id)
}
}
}
@@ -383,6 +569,59 @@ func (w *Worker) GetCurrentTasks() map[string]*types.Task {
return tasks
}
+// registerWorker registers the worker with the admin server
+func (w *Worker) registerWorker() {
+ workerInfo := &types.Worker{
+ ID: w.id,
+ Capabilities: w.config.Capabilities,
+ MaxConcurrent: w.config.MaxConcurrent,
+ Status: "active",
+ CurrentLoad: 0,
+ LastHeartbeat: time.Now(),
+ }
+
+ if err := w.adminClient.RegisterWorker(workerInfo); err != nil {
+ glog.Warningf("Failed to register worker (will retry on next heartbeat): %v", err)
+ } else {
+ glog.Infof("Worker %s registered successfully with admin server", w.id)
+ }
+}
+
+// connectionMonitorLoop monitors connection status
+func (w *Worker) connectionMonitorLoop() {
+ glog.V(1).Infof("🔍 CONNECTION MONITOR STARTED: Worker %s connection monitor loop started", w.id)
+ ticker := time.NewTicker(30 * time.Second) // Check every 30 seconds
+ defer ticker.Stop()
+
+ lastConnectionStatus := false
+
+ for {
+ select {
+ case <-w.stopChan:
+ glog.V(1).Infof("🛑 CONNECTION MONITOR STOPPING: Worker %s connection monitor loop stopping", w.id)
+ return
+ case <-ticker.C:
+ // Monitor connection status and log changes
+ currentConnectionStatus := w.adminClient != nil && w.adminClient.IsConnected()
+
+ if currentConnectionStatus != lastConnectionStatus {
+ if currentConnectionStatus {
+ glog.Infof("🔗 CONNECTION RESTORED: Worker %s connection status changed: connected", w.id)
+ } else {
+ glog.Warningf("⚠️ CONNECTION LOST: Worker %s connection status changed: disconnected", w.id)
+ }
+ lastConnectionStatus = currentConnectionStatus
+ } else {
+ if currentConnectionStatus {
+ glog.V(3).Infof("✅ CONNECTION OK: Worker %s connection status: connected", w.id)
+ } else {
+ glog.V(1).Infof("🔌 CONNECTION DOWN: Worker %s connection status: disconnected, reconnection in progress", w.id)
+ }
+ }
+ }
+ }
+}
+
// GetConfig returns the worker configuration
func (w *Worker) GetConfig() *types.WorkerConfig {
return w.config
@@ -408,3 +647,158 @@ func (w *Worker) GetPerformanceMetrics() *types.WorkerPerformance {
SuccessRate: successRate,
}
}
+
+// messageProcessingLoop processes incoming admin messages
+func (w *Worker) messageProcessingLoop() {
+ glog.Infof("🔄 MESSAGE LOOP STARTED: Worker %s message processing loop started", w.id)
+
+ // Get access to the incoming message channel from gRPC client
+ grpcClient, ok := w.adminClient.(*GrpcAdminClient)
+ if !ok {
+ glog.Warningf("⚠️ MESSAGE LOOP UNAVAILABLE: Worker %s admin client is not gRPC client, message processing not available", w.id)
+ return
+ }
+
+ incomingChan := grpcClient.GetIncomingChannel()
+ glog.V(1).Infof("📡 MESSAGE CHANNEL READY: Worker %s connected to incoming message channel", w.id)
+
+ for {
+ select {
+ case <-w.stopChan:
+ glog.Infof("🛑 MESSAGE LOOP STOPPING: Worker %s message processing loop stopping", w.id)
+ return
+ case message := <-incomingChan:
+ if message != nil {
+ glog.V(3).Infof("📥 MESSAGE PROCESSING: Worker %s processing incoming message", w.id)
+ w.processAdminMessage(message)
+ } else {
+ glog.V(3).Infof("📭 NULL MESSAGE: Worker %s received nil message", w.id)
+ }
+ }
+ }
+}
+
+// processAdminMessage processes different types of admin messages
+func (w *Worker) processAdminMessage(message *worker_pb.AdminMessage) {
+ glog.V(4).Infof("📫 ADMIN MESSAGE RECEIVED: Worker %s received admin message: %T", w.id, message.Message)
+
+ switch msg := message.Message.(type) {
+ case *worker_pb.AdminMessage_RegistrationResponse:
+ glog.V(2).Infof("✅ REGISTRATION RESPONSE: Worker %s received registration response", w.id)
+ w.handleRegistrationResponse(msg.RegistrationResponse)
+ case *worker_pb.AdminMessage_HeartbeatResponse:
+ glog.V(3).Infof("💓 HEARTBEAT RESPONSE: Worker %s received heartbeat response", w.id)
+ w.handleHeartbeatResponse(msg.HeartbeatResponse)
+ case *worker_pb.AdminMessage_TaskLogRequest:
+ glog.V(1).Infof("📋 TASK LOG REQUEST: Worker %s received task log request for task %s", w.id, msg.TaskLogRequest.TaskId)
+ w.handleTaskLogRequest(msg.TaskLogRequest)
+ case *worker_pb.AdminMessage_TaskAssignment:
+ taskAssign := msg.TaskAssignment
+ glog.V(1).Infof("Worker %s received direct task assignment %s (type: %s, volume: %d)",
+ w.id, taskAssign.TaskId, taskAssign.TaskType, taskAssign.Params.VolumeId)
+
+ // Convert to task and handle it
+ task := &types.Task{
+ ID: taskAssign.TaskId,
+ Type: types.TaskType(taskAssign.TaskType),
+ Status: types.TaskStatusAssigned,
+ VolumeID: taskAssign.Params.VolumeId,
+ Server: taskAssign.Params.Server,
+ Collection: taskAssign.Params.Collection,
+ Priority: types.TaskPriority(taskAssign.Priority),
+ CreatedAt: time.Unix(taskAssign.CreatedTime, 0),
+ TypedParams: taskAssign.Params,
+ }
+
+ if err := w.HandleTask(task); err != nil {
+ glog.Errorf("❌ DIRECT TASK ASSIGNMENT FAILED: Worker %s failed to handle direct task assignment %s: %v", w.id, task.ID, err)
+ }
+ case *worker_pb.AdminMessage_TaskCancellation:
+ glog.Infof("🛑 TASK CANCELLATION: Worker %s received task cancellation for task %s", w.id, msg.TaskCancellation.TaskId)
+ w.handleTaskCancellation(msg.TaskCancellation)
+ case *worker_pb.AdminMessage_AdminShutdown:
+ glog.Infof("🔄 ADMIN SHUTDOWN: Worker %s received admin shutdown message", w.id)
+ w.handleAdminShutdown(msg.AdminShutdown)
+ default:
+ glog.V(1).Infof("❓ UNKNOWN MESSAGE: Worker %s received unknown admin message type: %T", w.id, message.Message)
+ }
+}
+
+// handleTaskLogRequest processes task log requests from admin server
+func (w *Worker) handleTaskLogRequest(request *worker_pb.TaskLogRequest) {
+ glog.V(1).Infof("Worker %s handling task log request for task %s", w.id, request.TaskId)
+
+ // Use the task log handler to process the request
+ response := w.taskLogHandler.HandleLogRequest(request)
+
+ // Send response back to admin server
+ responseMsg := &worker_pb.WorkerMessage{
+ WorkerId: w.id,
+ Timestamp: time.Now().Unix(),
+ Message: &worker_pb.WorkerMessage_TaskLogResponse{
+ TaskLogResponse: response,
+ },
+ }
+
+ grpcClient, ok := w.adminClient.(*GrpcAdminClient)
+ if !ok {
+ glog.Errorf("Cannot send task log response: admin client is not gRPC client")
+ return
+ }
+
+ select {
+ case grpcClient.outgoing <- responseMsg:
+ glog.V(1).Infof("Task log response sent for task %s", request.TaskId)
+ case <-time.After(5 * time.Second):
+ glog.Errorf("Failed to send task log response for task %s: timeout", request.TaskId)
+ }
+}
+
+// handleTaskCancellation processes task cancellation requests
+func (w *Worker) handleTaskCancellation(cancellation *worker_pb.TaskCancellation) {
+ glog.Infof("Worker %s received task cancellation for task %s", w.id, cancellation.TaskId)
+
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+
+ if task, exists := w.currentTasks[cancellation.TaskId]; exists {
+ // TODO: Implement task cancellation logic
+ glog.Infof("Cancelling task %s", task.ID)
+ } else {
+ glog.Warningf("Cannot cancel task %s: task not found", cancellation.TaskId)
+ }
+}
+
+// handleAdminShutdown processes admin shutdown notifications
+func (w *Worker) handleAdminShutdown(shutdown *worker_pb.AdminShutdown) {
+ glog.Infof("Worker %s received admin shutdown notification: %s", w.id, shutdown.Reason)
+
+ gracefulSeconds := shutdown.GracefulShutdownSeconds
+ if gracefulSeconds > 0 {
+ glog.Infof("Graceful shutdown in %d seconds", gracefulSeconds)
+ time.AfterFunc(time.Duration(gracefulSeconds)*time.Second, func() {
+ w.Stop()
+ })
+ } else {
+ // Immediate shutdown
+ go w.Stop()
+ }
+}
+
+// handleRegistrationResponse processes registration response from admin server
+func (w *Worker) handleRegistrationResponse(response *worker_pb.RegistrationResponse) {
+ glog.V(2).Infof("Worker %s processed registration response: success=%v", w.id, response.Success)
+ if !response.Success {
+ glog.Warningf("Worker %s registration failed: %s", w.id, response.Message)
+ }
+ // Registration responses are typically handled by the gRPC client during connection setup
+ // No additional action needed here
+}
+
+// handleHeartbeatResponse processes heartbeat response from admin server
+func (w *Worker) handleHeartbeatResponse(response *worker_pb.HeartbeatResponse) {
+ glog.V(4).Infof("Worker %s processed heartbeat response", w.id)
+ // Heartbeat responses are mainly for keeping the connection alive
+ // The admin may include configuration updates or status information in the future
+ // For now, just acknowledge receipt
+}