diff options
| author | Chris Lu <chrislusf@users.noreply.github.com> | 2025-08-09 21:47:29 -0700 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-08-09 21:47:29 -0700 |
| commit | 25bbf4c3d44b1c8a9aa4980e37ed399ec249f771 (patch) | |
| tree | aabb2ce3c6f55e4cf1e26ce2b6989086c17830bc /weed/pb | |
| parent | 3ac2a2e22d863753a6b568596fbe9d76d03023b5 (diff) | |
| download | seaweedfs-25bbf4c3d44b1c8a9aa4980e37ed399ec249f771.tar.xz seaweedfs-25bbf4c3d44b1c8a9aa4980e37ed399ec249f771.zip | |
Admin UI: Fetch task logs (#7114)
* show task details
* loading tasks
* task UI works
* generic rendering
* rendering the export link
* removing placementConflicts from task parameters
* remove TaskSourceLocation
* remove "Server ID" column
* rendering balance task source
* sources and targets
* fix ec task generation
* move info
* render timeline
* simplified worker id
* simplify
* read task logs from worker
* isValidTaskID
* address comments
* Update weed/worker/tasks/balance/execution.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* Update weed/worker/tasks/erasure_coding/ec_task.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* Update weed/worker/tasks/task_log_handler.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix shard ids
* plan distributing shard id
* rendering planned shards in task details
* remove Conflicts
* worker logs correctly
* pass in dc and rack
* task logging
* Update weed/admin/maintenance/maintenance_queue.go
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
* display log details
* logs have fields now
* sort field keys
* fix link
* fix collection filtering
* avoid hard coded ec shard counts
---------
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Diffstat (limited to 'weed/pb')
| -rw-r--r-- | weed/pb/worker.proto | 167 | ||||
| -rw-r--r-- | weed/pb/worker_pb/worker.pb.go | 1140 |
2 files changed, 977 insertions, 330 deletions
diff --git a/weed/pb/worker.proto b/weed/pb/worker.proto index 811f94591..b9e3d61d0 100644 --- a/weed/pb/worker.proto +++ b/weed/pb/worker.proto @@ -94,21 +94,23 @@ message TaskAssignment { // TaskParams contains task-specific parameters with typed variants message TaskParams { - string task_id = 12; // ActiveTopology task ID for lifecycle management - uint32 volume_id = 1; - string server = 2; - string collection = 3; - string data_center = 4; - string rack = 5; - repeated string replicas = 6; - uint64 volume_size = 11; // Original volume size in bytes for tracking size changes + string task_id = 1; // ActiveTopology task ID for lifecycle management + uint32 volume_id = 2; // Primary volume ID for the task + string collection = 3; // Collection name + string data_center = 4; // Primary data center + string rack = 5; // Primary rack + uint64 volume_size = 6; // Original volume size in bytes for tracking size changes + + // Unified source and target arrays for all task types + repeated TaskSource sources = 7; // Source locations (volume replicas, EC shards, etc.) + repeated TaskTarget targets = 8; // Target locations (destinations, new replicas, etc.) // Typed task parameters oneof task_params { - VacuumTaskParams vacuum_params = 7; - ErasureCodingTaskParams erasure_coding_params = 8; - BalanceTaskParams balance_params = 9; - ReplicationTaskParams replication_params = 10; + VacuumTaskParams vacuum_params = 9; + ErasureCodingTaskParams erasure_coding_params = 10; + BalanceTaskParams balance_params = 11; + ReplicationTaskParams replication_params = 12; } } @@ -123,54 +125,48 @@ message VacuumTaskParams { // ErasureCodingTaskParams for EC encoding operations message ErasureCodingTaskParams { - uint64 estimated_shard_size = 3; // Estimated size per shard - int32 data_shards = 4; // Number of data shards (default: 10) - int32 parity_shards = 5; // Number of parity shards (default: 4) - string working_dir = 6; // Working directory for EC processing - string master_client = 7; // Master server address - bool cleanup_source = 8; // Whether to cleanup source volume after EC - repeated string placement_conflicts = 9; // Any placement rule conflicts - repeated ECDestination destinations = 10; // Planned destinations with disk information - repeated ExistingECShardLocation existing_shard_locations = 11; // Existing EC shards to cleanup -} - -// ECDestination represents a planned destination for EC shards with disk information -message ECDestination { - string node = 1; // Target server address - uint32 disk_id = 2; // Target disk ID - string rack = 3; // Target rack for placement tracking - string data_center = 4; // Target data center for placement tracking - double placement_score = 5; // Quality score of the placement + uint64 estimated_shard_size = 1; // Estimated size per shard + int32 data_shards = 2; // Number of data shards (default: 10) + int32 parity_shards = 3; // Number of parity shards (default: 4) + string working_dir = 4; // Working directory for EC processing + string master_client = 5; // Master server address + bool cleanup_source = 6; // Whether to cleanup source volume after EC } -// ExistingECShardLocation represents existing EC shards that need cleanup -message ExistingECShardLocation { - string node = 1; // Server address with existing shards - repeated uint32 shard_ids = 2; // List of shard IDs on this server +// TaskSource represents a unified source location for any task type +message TaskSource { + string node = 1; // Source server address + uint32 disk_id = 2; // Source disk ID + string rack = 3; // Source rack for tracking + string data_center = 4; // Source data center for tracking + uint32 volume_id = 5; // Volume ID (for volume operations) + repeated uint32 shard_ids = 6; // Shard IDs (for EC shard operations) + uint64 estimated_size = 7; // Estimated size to be processed +} + +// TaskTarget represents a unified target location for any task type +message TaskTarget { + string node = 1; // Target server address + uint32 disk_id = 2; // Target disk ID + string rack = 3; // Target rack for tracking + string data_center = 4; // Target data center for tracking + uint32 volume_id = 5; // Volume ID (for volume operations) + repeated uint32 shard_ids = 6; // Shard IDs (for EC shard operations) + uint64 estimated_size = 7; // Estimated size to be created } + + // BalanceTaskParams for volume balancing operations message BalanceTaskParams { - string dest_node = 1; // Planned destination node - uint64 estimated_size = 2; // Estimated volume size - string dest_rack = 3; // Destination rack for placement rules - string dest_dc = 4; // Destination data center - double placement_score = 5; // Quality score of the planned placement - repeated string placement_conflicts = 6; // Any placement rule conflicts - bool force_move = 7; // Force move even with conflicts - int32 timeout_seconds = 8; // Operation timeout + bool force_move = 1; // Force move even with conflicts + int32 timeout_seconds = 2; // Operation timeout } // ReplicationTaskParams for adding replicas message ReplicationTaskParams { - string dest_node = 1; // Planned destination node for new replica - uint64 estimated_size = 2; // Estimated replica size - string dest_rack = 3; // Destination rack for placement rules - string dest_dc = 4; // Destination data center - double placement_score = 5; // Quality score of the planned placement - repeated string placement_conflicts = 6; // Any placement rule conflicts - int32 replica_count = 7; // Target replica count - bool verify_consistency = 8; // Verify replica consistency after creation + int32 replica_count = 1; // Target replica count + bool verify_consistency = 2; // Verify replica consistency after creation } // TaskUpdate reports task progress @@ -329,4 +325,75 @@ message BalanceTaskConfig { // ReplicationTaskConfig contains replication-specific configuration message ReplicationTaskConfig { int32 target_replica_count = 1; // Target number of replicas +} + +// ========== Task Persistence Messages ========== + +// MaintenanceTaskData represents complete task state for persistence +message MaintenanceTaskData { + string id = 1; + string type = 2; + string priority = 3; + string status = 4; + uint32 volume_id = 5; + string server = 6; + string collection = 7; + TaskParams typed_params = 8; + string reason = 9; + int64 created_at = 10; + int64 scheduled_at = 11; + int64 started_at = 12; + int64 completed_at = 13; + string worker_id = 14; + string error = 15; + double progress = 16; + int32 retry_count = 17; + int32 max_retries = 18; + + // Enhanced fields for detailed task tracking + string created_by = 19; + string creation_context = 20; + repeated TaskAssignmentRecord assignment_history = 21; + string detailed_reason = 22; + map<string, string> tags = 23; + TaskCreationMetrics creation_metrics = 24; +} + +// TaskAssignmentRecord tracks worker assignments for a task +message TaskAssignmentRecord { + string worker_id = 1; + string worker_address = 2; + int64 assigned_at = 3; + int64 unassigned_at = 4; // Optional: when worker was unassigned + string reason = 5; // Reason for assignment/unassignment +} + +// TaskCreationMetrics tracks why and how a task was created +message TaskCreationMetrics { + string trigger_metric = 1; // Name of metric that triggered creation + double metric_value = 2; // Value that triggered creation + double threshold = 3; // Threshold that was exceeded + VolumeHealthMetrics volume_metrics = 4; // Volume health at creation time + map<string, string> additional_data = 5; // Additional context data +} + +// VolumeHealthMetrics captures volume state at task creation +message VolumeHealthMetrics { + uint64 total_size = 1; + uint64 used_size = 2; + uint64 garbage_size = 3; + double garbage_ratio = 4; + int32 file_count = 5; + int32 deleted_file_count = 6; + int64 last_modified = 7; + int32 replica_count = 8; + bool is_ec_volume = 9; + string collection = 10; +} + +// TaskStateFile wraps task data with metadata for persistence +message TaskStateFile { + MaintenanceTaskData task = 1; + int64 last_updated = 2; + string admin_version = 3; }
\ No newline at end of file diff --git a/weed/pb/worker_pb/worker.pb.go b/weed/pb/worker_pb/worker.pb.go index ff7d60545..7ff5a8a36 100644 --- a/weed/pb/worker_pb/worker.pb.go +++ b/weed/pb/worker_pb/worker.pb.go @@ -804,14 +804,15 @@ func (x *TaskAssignment) GetMetadata() map[string]string { // TaskParams contains task-specific parameters with typed variants type TaskParams struct { state protoimpl.MessageState `protogen:"open.v1"` - TaskId string `protobuf:"bytes,12,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` // ActiveTopology task ID for lifecycle management - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Server string `protobuf:"bytes,2,opt,name=server,proto3" json:"server,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` - DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,5,opt,name=rack,proto3" json:"rack,omitempty"` - Replicas []string `protobuf:"bytes,6,rep,name=replicas,proto3" json:"replicas,omitempty"` - VolumeSize uint64 `protobuf:"varint,11,opt,name=volume_size,json=volumeSize,proto3" json:"volume_size,omitempty"` // Original volume size in bytes for tracking size changes + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` // ActiveTopology task ID for lifecycle management + VolumeId uint32 `protobuf:"varint,2,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // Primary volume ID for the task + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` // Collection name + DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // Primary data center + Rack string `protobuf:"bytes,5,opt,name=rack,proto3" json:"rack,omitempty"` // Primary rack + VolumeSize uint64 `protobuf:"varint,6,opt,name=volume_size,json=volumeSize,proto3" json:"volume_size,omitempty"` // Original volume size in bytes for tracking size changes + // Unified source and target arrays for all task types + Sources []*TaskSource `protobuf:"bytes,7,rep,name=sources,proto3" json:"sources,omitempty"` // Source locations (volume replicas, EC shards, etc.) + Targets []*TaskTarget `protobuf:"bytes,8,rep,name=targets,proto3" json:"targets,omitempty"` // Target locations (destinations, new replicas, etc.) // Typed task parameters // // Types that are valid to be assigned to TaskParams: @@ -869,13 +870,6 @@ func (x *TaskParams) GetVolumeId() uint32 { return 0 } -func (x *TaskParams) GetServer() string { - if x != nil { - return x.Server - } - return "" -} - func (x *TaskParams) GetCollection() string { if x != nil { return x.Collection @@ -897,18 +891,25 @@ func (x *TaskParams) GetRack() string { return "" } -func (x *TaskParams) GetReplicas() []string { +func (x *TaskParams) GetVolumeSize() uint64 { if x != nil { - return x.Replicas + return x.VolumeSize + } + return 0 +} + +func (x *TaskParams) GetSources() []*TaskSource { + if x != nil { + return x.Sources } return nil } -func (x *TaskParams) GetVolumeSize() uint64 { +func (x *TaskParams) GetTargets() []*TaskTarget { if x != nil { - return x.VolumeSize + return x.Targets } - return 0 + return nil } func (x *TaskParams) GetTaskParams() isTaskParams_TaskParams { @@ -959,19 +960,19 @@ type isTaskParams_TaskParams interface { } type TaskParams_VacuumParams struct { - VacuumParams *VacuumTaskParams `protobuf:"bytes,7,opt,name=vacuum_params,json=vacuumParams,proto3,oneof"` + VacuumParams *VacuumTaskParams `protobuf:"bytes,9,opt,name=vacuum_params,json=vacuumParams,proto3,oneof"` } type TaskParams_ErasureCodingParams struct { - ErasureCodingParams *ErasureCodingTaskParams `protobuf:"bytes,8,opt,name=erasure_coding_params,json=erasureCodingParams,proto3,oneof"` + ErasureCodingParams *ErasureCodingTaskParams `protobuf:"bytes,10,opt,name=erasure_coding_params,json=erasureCodingParams,proto3,oneof"` } type TaskParams_BalanceParams struct { - BalanceParams *BalanceTaskParams `protobuf:"bytes,9,opt,name=balance_params,json=balanceParams,proto3,oneof"` + BalanceParams *BalanceTaskParams `protobuf:"bytes,11,opt,name=balance_params,json=balanceParams,proto3,oneof"` } type TaskParams_ReplicationParams struct { - ReplicationParams *ReplicationTaskParams `protobuf:"bytes,10,opt,name=replication_params,json=replicationParams,proto3,oneof"` + ReplicationParams *ReplicationTaskParams `protobuf:"bytes,12,opt,name=replication_params,json=replicationParams,proto3,oneof"` } func (*TaskParams_VacuumParams) isTaskParams_TaskParams() {} @@ -1061,18 +1062,15 @@ func (x *VacuumTaskParams) GetVerifyChecksum() bool { // ErasureCodingTaskParams for EC encoding operations type ErasureCodingTaskParams struct { - state protoimpl.MessageState `protogen:"open.v1"` - EstimatedShardSize uint64 `protobuf:"varint,3,opt,name=estimated_shard_size,json=estimatedShardSize,proto3" json:"estimated_shard_size,omitempty"` // Estimated size per shard - DataShards int32 `protobuf:"varint,4,opt,name=data_shards,json=dataShards,proto3" json:"data_shards,omitempty"` // Number of data shards (default: 10) - ParityShards int32 `protobuf:"varint,5,opt,name=parity_shards,json=parityShards,proto3" json:"parity_shards,omitempty"` // Number of parity shards (default: 4) - WorkingDir string `protobuf:"bytes,6,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` // Working directory for EC processing - MasterClient string `protobuf:"bytes,7,opt,name=master_client,json=masterClient,proto3" json:"master_client,omitempty"` // Master server address - CleanupSource bool `protobuf:"varint,8,opt,name=cleanup_source,json=cleanupSource,proto3" json:"cleanup_source,omitempty"` // Whether to cleanup source volume after EC - PlacementConflicts []string `protobuf:"bytes,9,rep,name=placement_conflicts,json=placementConflicts,proto3" json:"placement_conflicts,omitempty"` // Any placement rule conflicts - Destinations []*ECDestination `protobuf:"bytes,10,rep,name=destinations,proto3" json:"destinations,omitempty"` // Planned destinations with disk information - ExistingShardLocations []*ExistingECShardLocation `protobuf:"bytes,11,rep,name=existing_shard_locations,json=existingShardLocations,proto3" json:"existing_shard_locations,omitempty"` // Existing EC shards to cleanup - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + EstimatedShardSize uint64 `protobuf:"varint,1,opt,name=estimated_shard_size,json=estimatedShardSize,proto3" json:"estimated_shard_size,omitempty"` // Estimated size per shard + DataShards int32 `protobuf:"varint,2,opt,name=data_shards,json=dataShards,proto3" json:"data_shards,omitempty"` // Number of data shards (default: 10) + ParityShards int32 `protobuf:"varint,3,opt,name=parity_shards,json=parityShards,proto3" json:"parity_shards,omitempty"` // Number of parity shards (default: 4) + WorkingDir string `protobuf:"bytes,4,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` // Working directory for EC processing + MasterClient string `protobuf:"bytes,5,opt,name=master_client,json=masterClient,proto3" json:"master_client,omitempty"` // Master server address + CleanupSource bool `protobuf:"varint,6,opt,name=cleanup_source,json=cleanupSource,proto3" json:"cleanup_source,omitempty"` // Whether to cleanup source volume after EC + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ErasureCodingTaskParams) Reset() { @@ -1147,53 +1145,34 @@ func (x *ErasureCodingTaskParams) GetCleanupSource() bool { return false } -func (x *ErasureCodingTaskParams) GetPlacementConflicts() []string { - if x != nil { - return x.PlacementConflicts - } - return nil -} - -func (x *ErasureCodingTaskParams) GetDestinations() []*ECDestination { - if x != nil { - return x.Destinations - } - return nil -} - -func (x *ErasureCodingTaskParams) GetExistingShardLocations() []*ExistingECShardLocation { - if x != nil { - return x.ExistingShardLocations - } - return nil -} - -// ECDestination represents a planned destination for EC shards with disk information -type ECDestination struct { - state protoimpl.MessageState `protogen:"open.v1"` - Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Target server address - DiskId uint32 `protobuf:"varint,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` // Target disk ID - Rack string `protobuf:"bytes,3,opt,name=rack,proto3" json:"rack,omitempty"` // Target rack for placement tracking - DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // Target data center for placement tracking - PlacementScore float64 `protobuf:"fixed64,5,opt,name=placement_score,json=placementScore,proto3" json:"placement_score,omitempty"` // Quality score of the placement - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache +// TaskSource represents a unified source location for any task type +type TaskSource struct { + state protoimpl.MessageState `protogen:"open.v1"` + Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Source server address + DiskId uint32 `protobuf:"varint,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` // Source disk ID + Rack string `protobuf:"bytes,3,opt,name=rack,proto3" json:"rack,omitempty"` // Source rack for tracking + DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // Source data center for tracking + VolumeId uint32 `protobuf:"varint,5,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // Volume ID (for volume operations) + ShardIds []uint32 `protobuf:"varint,6,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` // Shard IDs (for EC shard operations) + EstimatedSize uint64 `protobuf:"varint,7,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` // Estimated size to be processed + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ECDestination) Reset() { - *x = ECDestination{} +func (x *TaskSource) Reset() { + *x = TaskSource{} mi := &file_worker_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *ECDestination) String() string { +func (x *TaskSource) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ECDestination) ProtoMessage() {} +func (*TaskSource) ProtoMessage() {} -func (x *ECDestination) ProtoReflect() protoreflect.Message { +func (x *TaskSource) ProtoReflect() protoreflect.Message { mi := &file_worker_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1205,69 +1184,88 @@ func (x *ECDestination) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ECDestination.ProtoReflect.Descriptor instead. -func (*ECDestination) Descriptor() ([]byte, []int) { +// Deprecated: Use TaskSource.ProtoReflect.Descriptor instead. +func (*TaskSource) Descriptor() ([]byte, []int) { return file_worker_proto_rawDescGZIP(), []int{11} } -func (x *ECDestination) GetNode() string { +func (x *TaskSource) GetNode() string { if x != nil { return x.Node } return "" } -func (x *ECDestination) GetDiskId() uint32 { +func (x *TaskSource) GetDiskId() uint32 { if x != nil { return x.DiskId } return 0 } -func (x *ECDestination) GetRack() string { +func (x *TaskSource) GetRack() string { if x != nil { return x.Rack } return "" } -func (x *ECDestination) GetDataCenter() string { +func (x *TaskSource) GetDataCenter() string { if x != nil { return x.DataCenter } return "" } -func (x *ECDestination) GetPlacementScore() float64 { +func (x *TaskSource) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *TaskSource) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds + } + return nil +} + +func (x *TaskSource) GetEstimatedSize() uint64 { if x != nil { - return x.PlacementScore + return x.EstimatedSize } return 0 } -// ExistingECShardLocation represents existing EC shards that need cleanup -type ExistingECShardLocation struct { +// TaskTarget represents a unified target location for any task type +type TaskTarget struct { state protoimpl.MessageState `protogen:"open.v1"` - Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Server address with existing shards - ShardIds []uint32 `protobuf:"varint,2,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` // List of shard IDs on this server + Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Target server address + DiskId uint32 `protobuf:"varint,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` // Target disk ID + Rack string `protobuf:"bytes,3,opt,name=rack,proto3" json:"rack,omitempty"` // Target rack for tracking + DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // Target data center for tracking + VolumeId uint32 `protobuf:"varint,5,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // Volume ID (for volume operations) + ShardIds []uint32 `protobuf:"varint,6,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` // Shard IDs (for EC shard operations) + EstimatedSize uint64 `protobuf:"varint,7,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` // Estimated size to be created unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *ExistingECShardLocation) Reset() { - *x = ExistingECShardLocation{} +func (x *TaskTarget) Reset() { + *x = TaskTarget{} mi := &file_worker_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *ExistingECShardLocation) String() string { +func (x *TaskTarget) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExistingECShardLocation) ProtoMessage() {} +func (*TaskTarget) ProtoMessage() {} -func (x *ExistingECShardLocation) ProtoReflect() protoreflect.Message { +func (x *TaskTarget) ProtoReflect() protoreflect.Message { mi := &file_worker_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1279,38 +1277,67 @@ func (x *ExistingECShardLocation) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExistingECShardLocation.ProtoReflect.Descriptor instead. -func (*ExistingECShardLocation) Descriptor() ([]byte, []int) { +// Deprecated: Use TaskTarget.ProtoReflect.Descriptor instead. +func (*TaskTarget) Descriptor() ([]byte, []int) { return file_worker_proto_rawDescGZIP(), []int{12} } -func (x *ExistingECShardLocation) GetNode() string { +func (x *TaskTarget) GetNode() string { if x != nil { return x.Node } return "" } -func (x *ExistingECShardLocation) GetShardIds() []uint32 { +func (x *TaskTarget) GetDiskId() uint32 { + if x != nil { + return x.DiskId + } + return 0 +} + +func (x *TaskTarget) GetRack() string { + if x != nil { + return x.Rack + } + return "" +} + +func (x *TaskTarget) GetDataCenter() string { + if x != nil { + return x.DataCenter + } + return "" +} + +func (x *TaskTarget) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *TaskTarget) GetShardIds() []uint32 { if x != nil { return x.ShardIds } return nil } +func (x *TaskTarget) GetEstimatedSize() uint64 { + if x != nil { + return x.EstimatedSize + } + return 0 +} + // BalanceTaskParams for volume balancing operations type BalanceTaskParams struct { - state protoimpl.MessageState `protogen:"open.v1"` - DestNode string `protobuf:"bytes,1,opt,name=dest_node,json=destNode,proto3" json:"dest_node,omitempty"` // Planned destination node - EstimatedSize uint64 `protobuf:"varint,2,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` // Estimated volume size - DestRack string `protobuf:"bytes,3,opt,name=dest_rack,json=destRack,proto3" json:"dest_rack,omitempty"` // Destination rack for placement rules - DestDc string `protobuf:"bytes,4,opt,name=dest_dc,json=destDc,proto3" json:"dest_dc,omitempty"` // Destination data center - PlacementScore float64 `protobuf:"fixed64,5,opt,name=placement_score,json=placementScore,proto3" json:"placement_score,omitempty"` // Quality score of the planned placement - PlacementConflicts []string `protobuf:"bytes,6,rep,name=placement_conflicts,json=placementConflicts,proto3" json:"placement_conflicts,omitempty"` // Any placement rule conflicts - ForceMove bool `protobuf:"varint,7,opt,name=force_move,json=forceMove,proto3" json:"force_move,omitempty"` // Force move even with conflicts - TimeoutSeconds int32 `protobuf:"varint,8,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` // Operation timeout - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ForceMove bool `protobuf:"varint,1,opt,name=force_move,json=forceMove,proto3" json:"force_move,omitempty"` // Force move even with conflicts + TimeoutSeconds int32 `protobuf:"varint,2,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` // Operation timeout + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *BalanceTaskParams) Reset() { @@ -1343,48 +1370,6 @@ func (*BalanceTaskParams) Descriptor() ([]byte, []int) { return file_worker_proto_rawDescGZIP(), []int{13} } -func (x *BalanceTaskParams) GetDestNode() string { - if x != nil { - return x.DestNode - } - return "" -} - -func (x *BalanceTaskParams) GetEstimatedSize() uint64 { - if x != nil { - return x.EstimatedSize - } - return 0 -} - -func (x *BalanceTaskParams) GetDestRack() string { - if x != nil { - return x.DestRack - } - return "" -} - -func (x *BalanceTaskParams) GetDestDc() string { - if x != nil { - return x.DestDc - } - return "" -} - -func (x *BalanceTaskParams) GetPlacementScore() float64 { - if x != nil { - return x.PlacementScore - } - return 0 -} - -func (x *BalanceTaskParams) GetPlacementConflicts() []string { - if x != nil { - return x.PlacementConflicts - } - return nil -} - func (x *BalanceTaskParams) GetForceMove() bool { if x != nil { return x.ForceMove @@ -1401,17 +1386,11 @@ func (x *BalanceTaskParams) GetTimeoutSeconds() int32 { // ReplicationTaskParams for adding replicas type ReplicationTaskParams struct { - state protoimpl.MessageState `protogen:"open.v1"` - DestNode string `protobuf:"bytes,1,opt,name=dest_node,json=destNode,proto3" json:"dest_node,omitempty"` // Planned destination node for new replica - EstimatedSize uint64 `protobuf:"varint,2,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` // Estimated replica size - DestRack string `protobuf:"bytes,3,opt,name=dest_rack,json=destRack,proto3" json:"dest_rack,omitempty"` // Destination rack for placement rules - DestDc string `protobuf:"bytes,4,opt,name=dest_dc,json=destDc,proto3" json:"dest_dc,omitempty"` // Destination data center - PlacementScore float64 `protobuf:"fixed64,5,opt,name=placement_score,json=placementScore,proto3" json:"placement_score,omitempty"` // Quality score of the planned placement - PlacementConflicts []string `protobuf:"bytes,6,rep,name=placement_conflicts,json=placementConflicts,proto3" json:"placement_conflicts,omitempty"` // Any placement rule conflicts - ReplicaCount int32 `protobuf:"varint,7,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` // Target replica count - VerifyConsistency bool `protobuf:"varint,8,opt,name=verify_consistency,json=verifyConsistency,proto3" json:"verify_consistency,omitempty"` // Verify replica consistency after creation - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ReplicaCount int32 `protobuf:"varint,1,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` // Target replica count + VerifyConsistency bool `protobuf:"varint,2,opt,name=verify_consistency,json=verifyConsistency,proto3" json:"verify_consistency,omitempty"` // Verify replica consistency after creation + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReplicationTaskParams) Reset() { @@ -1444,48 +1423,6 @@ func (*ReplicationTaskParams) Descriptor() ([]byte, []int) { return file_worker_proto_rawDescGZIP(), []int{14} } -func (x *ReplicationTaskParams) GetDestNode() string { - if x != nil { - return x.DestNode - } - return "" -} - -func (x *ReplicationTaskParams) GetEstimatedSize() uint64 { - if x != nil { - return x.EstimatedSize - } - return 0 -} - -func (x *ReplicationTaskParams) GetDestRack() string { - if x != nil { - return x.DestRack - } - return "" -} - -func (x *ReplicationTaskParams) GetDestDc() string { - if x != nil { - return x.DestDc - } - return "" -} - -func (x *ReplicationTaskParams) GetPlacementScore() float64 { - if x != nil { - return x.PlacementScore - } - return 0 -} - -func (x *ReplicationTaskParams) GetPlacementConflicts() []string { - if x != nil { - return x.PlacementConflicts - } - return nil -} - func (x *ReplicationTaskParams) GetReplicaCount() int32 { if x != nil { return x.ReplicaCount @@ -2812,6 +2749,568 @@ func (x *ReplicationTaskConfig) GetTargetReplicaCount() int32 { return 0 } +// MaintenanceTaskData represents complete task state for persistence +type MaintenanceTaskData struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Priority string `protobuf:"bytes,3,opt,name=priority,proto3" json:"priority,omitempty"` + Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` + VolumeId uint32 `protobuf:"varint,5,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Server string `protobuf:"bytes,6,opt,name=server,proto3" json:"server,omitempty"` + Collection string `protobuf:"bytes,7,opt,name=collection,proto3" json:"collection,omitempty"` + TypedParams *TaskParams `protobuf:"bytes,8,opt,name=typed_params,json=typedParams,proto3" json:"typed_params,omitempty"` + Reason string `protobuf:"bytes,9,opt,name=reason,proto3" json:"reason,omitempty"` + CreatedAt int64 `protobuf:"varint,10,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + ScheduledAt int64 `protobuf:"varint,11,opt,name=scheduled_at,json=scheduledAt,proto3" json:"scheduled_at,omitempty"` + StartedAt int64 `protobuf:"varint,12,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + CompletedAt int64 `protobuf:"varint,13,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` + WorkerId string `protobuf:"bytes,14,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` + Error string `protobuf:"bytes,15,opt,name=error,proto3" json:"error,omitempty"` + Progress float64 `protobuf:"fixed64,16,opt,name=progress,proto3" json:"progress,omitempty"` + RetryCount int32 `protobuf:"varint,17,opt,name=retry_count,json=retryCount,proto3" json:"retry_count,omitempty"` + MaxRetries int32 `protobuf:"varint,18,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` + // Enhanced fields for detailed task tracking + CreatedBy string `protobuf:"bytes,19,opt,name=created_by,json=createdBy,proto3" json:"created_by,omitempty"` + CreationContext string `protobuf:"bytes,20,opt,name=creation_context,json=creationContext,proto3" json:"creation_context,omitempty"` + AssignmentHistory []*TaskAssignmentRecord `protobuf:"bytes,21,rep,name=assignment_history,json=assignmentHistory,proto3" json:"assignment_history,omitempty"` + DetailedReason string `protobuf:"bytes,22,opt,name=detailed_reason,json=detailedReason,proto3" json:"detailed_reason,omitempty"` + Tags map[string]string `protobuf:"bytes,23,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + CreationMetrics *TaskCreationMetrics `protobuf:"bytes,24,opt,name=creation_metrics,json=creationMetrics,proto3" json:"creation_metrics,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MaintenanceTaskData) Reset() { + *x = MaintenanceTaskData{} + mi := &file_worker_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MaintenanceTaskData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MaintenanceTaskData) ProtoMessage() {} + +func (x *MaintenanceTaskData) ProtoReflect() protoreflect.Message { + mi := &file_worker_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MaintenanceTaskData.ProtoReflect.Descriptor instead. +func (*MaintenanceTaskData) Descriptor() ([]byte, []int) { + return file_worker_proto_rawDescGZIP(), []int{31} +} + +func (x *MaintenanceTaskData) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *MaintenanceTaskData) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MaintenanceTaskData) GetPriority() string { + if x != nil { + return x.Priority + } + return "" +} + +func (x *MaintenanceTaskData) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *MaintenanceTaskData) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *MaintenanceTaskData) GetServer() string { + if x != nil { + return x.Server + } + return "" +} + +func (x *MaintenanceTaskData) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *MaintenanceTaskData) GetTypedParams() *TaskParams { + if x != nil { + return x.TypedParams + } + return nil +} + +func (x *MaintenanceTaskData) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *MaintenanceTaskData) GetCreatedAt() int64 { + if x != nil { + return x.CreatedAt + } + return 0 +} + +func (x *MaintenanceTaskData) GetScheduledAt() int64 { + if x != nil { + return x.ScheduledAt + } + return 0 +} + +func (x *MaintenanceTaskData) GetStartedAt() int64 { + if x != nil { + return x.StartedAt + } + return 0 +} + +func (x *MaintenanceTaskData) GetCompletedAt() int64 { + if x != nil { + return x.CompletedAt + } + return 0 +} + +func (x *MaintenanceTaskData) GetWorkerId() string { + if x != nil { + return x.WorkerId + } + return "" +} + +func (x *MaintenanceTaskData) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *MaintenanceTaskData) GetProgress() float64 { + if x != nil { + return x.Progress + } + return 0 +} + +func (x *MaintenanceTaskData) GetRetryCount() int32 { + if x != nil { + return x.RetryCount + } + return 0 +} + +func (x *MaintenanceTaskData) GetMaxRetries() int32 { + if x != nil { + return x.MaxRetries + } + return 0 +} + +func (x *MaintenanceTaskData) GetCreatedBy() string { + if x != nil { + return x.CreatedBy + } + return "" +} + +func (x *MaintenanceTaskData) GetCreationContext() string { + if x != nil { + return x.CreationContext + } + return "" +} + +func (x *MaintenanceTaskData) GetAssignmentHistory() []*TaskAssignmentRecord { + if x != nil { + return x.AssignmentHistory + } + return nil +} + +func (x *MaintenanceTaskData) GetDetailedReason() string { + if x != nil { + return x.DetailedReason + } + return "" +} + +func (x *MaintenanceTaskData) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *MaintenanceTaskData) GetCreationMetrics() *TaskCreationMetrics { + if x != nil { + return x.CreationMetrics + } + return nil +} + +// TaskAssignmentRecord tracks worker assignments for a task +type TaskAssignmentRecord struct { + state protoimpl.MessageState `protogen:"open.v1"` + WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` + WorkerAddress string `protobuf:"bytes,2,opt,name=worker_address,json=workerAddress,proto3" json:"worker_address,omitempty"` + AssignedAt int64 `protobuf:"varint,3,opt,name=assigned_at,json=assignedAt,proto3" json:"assigned_at,omitempty"` + UnassignedAt int64 `protobuf:"varint,4,opt,name=unassigned_at,json=unassignedAt,proto3" json:"unassigned_at,omitempty"` // Optional: when worker was unassigned + Reason string `protobuf:"bytes,5,opt,name=reason,proto3" json:"reason,omitempty"` // Reason for assignment/unassignment + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TaskAssignmentRecord) Reset() { + *x = TaskAssignmentRecord{} + mi := &file_worker_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TaskAssignmentRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskAssignmentRecord) ProtoMessage() {} + +func (x *TaskAssignmentRecord) ProtoReflect() protoreflect.Message { + mi := &file_worker_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskAssignmentRecord.ProtoReflect.Descriptor instead. +func (*TaskAssignmentRecord) Descriptor() ([]byte, []int) { + return file_worker_proto_rawDescGZIP(), []int{32} +} + +func (x *TaskAssignmentRecord) GetWorkerId() string { + if x != nil { + return x.WorkerId + } + return "" +} + +func (x *TaskAssignmentRecord) GetWorkerAddress() string { + if x != nil { + return x.WorkerAddress + } + return "" +} + +func (x *TaskAssignmentRecord) GetAssignedAt() int64 { + if x != nil { + return x.AssignedAt + } + return 0 +} + +func (x *TaskAssignmentRecord) GetUnassignedAt() int64 { + if x != nil { + return x.UnassignedAt + } + return 0 +} + +func (x *TaskAssignmentRecord) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +// TaskCreationMetrics tracks why and how a task was created +type TaskCreationMetrics struct { + state protoimpl.MessageState `protogen:"open.v1"` + TriggerMetric string `protobuf:"bytes,1,opt,name=trigger_metric,json=triggerMetric,proto3" json:"trigger_metric,omitempty"` // Name of metric that triggered creation + MetricValue float64 `protobuf:"fixed64,2,opt,name=metric_value,json=metricValue,proto3" json:"metric_value,omitempty"` // Value that triggered creation + Threshold float64 `protobuf:"fixed64,3,opt,name=threshold,proto3" json:"threshold,omitempty"` // Threshold that was exceeded + VolumeMetrics *VolumeHealthMetrics `protobuf:"bytes,4,opt,name=volume_metrics,json=volumeMetrics,proto3" json:"volume_metrics,omitempty"` // Volume health at creation time + AdditionalData map[string]string `protobuf:"bytes,5,rep,name=additional_data,json=additionalData,proto3" json:"additional_data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Additional context data + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TaskCreationMetrics) Reset() { + *x = TaskCreationMetrics{} + mi := &file_worker_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TaskCreationMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskCreationMetrics) ProtoMessage() {} + +func (x *TaskCreationMetrics) ProtoReflect() protoreflect.Message { + mi := &file_worker_proto_msgTypes[33] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskCreationMetrics.ProtoReflect.Descriptor instead. +func (*TaskCreationMetrics) Descriptor() ([]byte, []int) { + return file_worker_proto_rawDescGZIP(), []int{33} +} + +func (x *TaskCreationMetrics) GetTriggerMetric() string { + if x != nil { + return x.TriggerMetric + } + return "" +} + +func (x *TaskCreationMetrics) GetMetricValue() float64 { + if x != nil { + return x.MetricValue + } + return 0 +} + +func (x *TaskCreationMetrics) GetThreshold() float64 { + if x != nil { + return x.Threshold + } + return 0 +} + +func (x *TaskCreationMetrics) GetVolumeMetrics() *VolumeHealthMetrics { + if x != nil { + return x.VolumeMetrics + } + return nil +} + +func (x *TaskCreationMetrics) GetAdditionalData() map[string]string { + if x != nil { + return x.AdditionalData + } + return nil +} + +// VolumeHealthMetrics captures volume state at task creation +type VolumeHealthMetrics struct { + state protoimpl.MessageState `protogen:"open.v1"` + TotalSize uint64 `protobuf:"varint,1,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + UsedSize uint64 `protobuf:"varint,2,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` + GarbageSize uint64 `protobuf:"varint,3,opt,name=garbage_size,json=garbageSize,proto3" json:"garbage_size,omitempty"` + GarbageRatio float64 `protobuf:"fixed64,4,opt,name=garbage_ratio,json=garbageRatio,proto3" json:"garbage_ratio,omitempty"` + FileCount int32 `protobuf:"varint,5,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` + DeletedFileCount int32 `protobuf:"varint,6,opt,name=deleted_file_count,json=deletedFileCount,proto3" json:"deleted_file_count,omitempty"` + LastModified int64 `protobuf:"varint,7,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + ReplicaCount int32 `protobuf:"varint,8,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` + IsEcVolume bool `protobuf:"varint,9,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"` + Collection string `protobuf:"bytes,10,opt,name=collection,proto3" json:"collection,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VolumeHealthMetrics) Reset() { + *x = VolumeHealthMetrics{} + mi := &file_worker_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VolumeHealthMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeHealthMetrics) ProtoMessage() {} + +func (x *VolumeHealthMetrics) ProtoReflect() protoreflect.Message { + mi := &file_worker_proto_msgTypes[34] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeHealthMetrics.ProtoReflect.Descriptor instead. +func (*VolumeHealthMetrics) Descriptor() ([]byte, []int) { + return file_worker_proto_rawDescGZIP(), []int{34} +} + +func (x *VolumeHealthMetrics) GetTotalSize() uint64 { + if x != nil { + return x.TotalSize + } + return 0 +} + +func (x *VolumeHealthMetrics) GetUsedSize() uint64 { + if x != nil { + return x.UsedSize + } + return 0 +} + +func (x *VolumeHealthMetrics) GetGarbageSize() uint64 { + if x != nil { + return x.GarbageSize + } + return 0 +} + +func (x *VolumeHealthMetrics) GetGarbageRatio() float64 { + if x != nil { + return x.GarbageRatio + } + return 0 +} + +func (x *VolumeHealthMetrics) GetFileCount() int32 { + if x != nil { + return x.FileCount + } + return 0 +} + +func (x *VolumeHealthMetrics) GetDeletedFileCount() int32 { + if x != nil { + return x.DeletedFileCount + } + return 0 +} + +func (x *VolumeHealthMetrics) GetLastModified() int64 { + if x != nil { + return x.LastModified + } + return 0 +} + +func (x *VolumeHealthMetrics) GetReplicaCount() int32 { + if x != nil { + return x.ReplicaCount + } + return 0 +} + +func (x *VolumeHealthMetrics) GetIsEcVolume() bool { + if x != nil { + return x.IsEcVolume + } + return false +} + +func (x *VolumeHealthMetrics) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +// TaskStateFile wraps task data with metadata for persistence +type TaskStateFile struct { + state protoimpl.MessageState `protogen:"open.v1"` + Task *MaintenanceTaskData `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + LastUpdated int64 `protobuf:"varint,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + AdminVersion string `protobuf:"bytes,3,opt,name=admin_version,json=adminVersion,proto3" json:"admin_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TaskStateFile) Reset() { + *x = TaskStateFile{} + mi := &file_worker_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TaskStateFile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskStateFile) ProtoMessage() {} + +func (x *TaskStateFile) ProtoReflect() protoreflect.Message { + mi := &file_worker_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskStateFile.ProtoReflect.Descriptor instead. +func (*TaskStateFile) Descriptor() ([]byte, []int) { + return file_worker_proto_rawDescGZIP(), []int{35} +} + +func (x *TaskStateFile) GetTask() *MaintenanceTaskData { + if x != nil { + return x.Task + } + return nil +} + +func (x *TaskStateFile) GetLastUpdated() int64 { + if x != nil { + return x.LastUpdated + } + return 0 +} + +func (x *TaskStateFile) GetAdminVersion() string { + if x != nil { + return x.AdminVersion + } + return "" +} + var File_worker_proto protoreflect.FileDescriptor const file_worker_proto_rawDesc = "" + @@ -2877,26 +3376,26 @@ const file_worker_proto_rawDesc = "" + "\bmetadata\x18\x06 \x03(\v2'.worker_pb.TaskAssignment.MetadataEntryR\bmetadata\x1a;\n" + "\rMetadataEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb3\x04\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xe1\x04\n" + "\n" + "TaskParams\x12\x17\n" + - "\atask_id\x18\f \x01(\tR\x06taskId\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x16\n" + - "\x06server\x18\x02 \x01(\tR\x06server\x12\x1e\n" + + "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + + "\tvolume_id\x18\x02 \x01(\rR\bvolumeId\x12\x1e\n" + "\n" + "collection\x18\x03 \x01(\tR\n" + "collection\x12\x1f\n" + "\vdata_center\x18\x04 \x01(\tR\n" + "dataCenter\x12\x12\n" + - "\x04rack\x18\x05 \x01(\tR\x04rack\x12\x1a\n" + - "\breplicas\x18\x06 \x03(\tR\breplicas\x12\x1f\n" + - "\vvolume_size\x18\v \x01(\x04R\n" + - "volumeSize\x12B\n" + - "\rvacuum_params\x18\a \x01(\v2\x1b.worker_pb.VacuumTaskParamsH\x00R\fvacuumParams\x12X\n" + - "\x15erasure_coding_params\x18\b \x01(\v2\".worker_pb.ErasureCodingTaskParamsH\x00R\x13erasureCodingParams\x12E\n" + - "\x0ebalance_params\x18\t \x01(\v2\x1c.worker_pb.BalanceTaskParamsH\x00R\rbalanceParams\x12Q\n" + - "\x12replication_params\x18\n" + - " \x01(\v2 .worker_pb.ReplicationTaskParamsH\x00R\x11replicationParamsB\r\n" + + "\x04rack\x18\x05 \x01(\tR\x04rack\x12\x1f\n" + + "\vvolume_size\x18\x06 \x01(\x04R\n" + + "volumeSize\x12/\n" + + "\asources\x18\a \x03(\v2\x15.worker_pb.TaskSourceR\asources\x12/\n" + + "\atargets\x18\b \x03(\v2\x15.worker_pb.TaskTargetR\atargets\x12B\n" + + "\rvacuum_params\x18\t \x01(\v2\x1b.worker_pb.VacuumTaskParamsH\x00R\fvacuumParams\x12X\n" + + "\x15erasure_coding_params\x18\n" + + " \x01(\v2\".worker_pb.ErasureCodingTaskParamsH\x00R\x13erasureCodingParams\x12E\n" + + "\x0ebalance_params\x18\v \x01(\v2\x1c.worker_pb.BalanceTaskParamsH\x00R\rbalanceParams\x12Q\n" + + "\x12replication_params\x18\f \x01(\v2 .worker_pb.ReplicationTaskParamsH\x00R\x11replicationParamsB\r\n" + "\vtask_params\"\xcb\x01\n" + "\x10VacuumTaskParams\x12+\n" + "\x11garbage_threshold\x18\x01 \x01(\x01R\x10garbageThreshold\x12!\n" + @@ -2905,49 +3404,43 @@ const file_worker_proto_rawDesc = "" + "batch_size\x18\x03 \x01(\x05R\tbatchSize\x12\x1f\n" + "\vworking_dir\x18\x04 \x01(\tR\n" + "workingDir\x12'\n" + - "\x0fverify_checksum\x18\x05 \x01(\bR\x0everifyChecksum\"\xcb\x03\n" + + "\x0fverify_checksum\x18\x05 \x01(\bR\x0everifyChecksum\"\xfe\x01\n" + "\x17ErasureCodingTaskParams\x120\n" + - "\x14estimated_shard_size\x18\x03 \x01(\x04R\x12estimatedShardSize\x12\x1f\n" + - "\vdata_shards\x18\x04 \x01(\x05R\n" + + "\x14estimated_shard_size\x18\x01 \x01(\x04R\x12estimatedShardSize\x12\x1f\n" + + "\vdata_shards\x18\x02 \x01(\x05R\n" + "dataShards\x12#\n" + - "\rparity_shards\x18\x05 \x01(\x05R\fparityShards\x12\x1f\n" + - "\vworking_dir\x18\x06 \x01(\tR\n" + + "\rparity_shards\x18\x03 \x01(\x05R\fparityShards\x12\x1f\n" + + "\vworking_dir\x18\x04 \x01(\tR\n" + "workingDir\x12#\n" + - "\rmaster_client\x18\a \x01(\tR\fmasterClient\x12%\n" + - "\x0ecleanup_source\x18\b \x01(\bR\rcleanupSource\x12/\n" + - "\x13placement_conflicts\x18\t \x03(\tR\x12placementConflicts\x12<\n" + - "\fdestinations\x18\n" + - " \x03(\v2\x18.worker_pb.ECDestinationR\fdestinations\x12\\\n" + - "\x18existing_shard_locations\x18\v \x03(\v2\".worker_pb.ExistingECShardLocationR\x16existingShardLocations\"\x9a\x01\n" + - "\rECDestination\x12\x12\n" + + "\rmaster_client\x18\x05 \x01(\tR\fmasterClient\x12%\n" + + "\x0ecleanup_source\x18\x06 \x01(\bR\rcleanupSource\"\xcf\x01\n" + + "\n" + + "TaskSource\x12\x12\n" + "\x04node\x18\x01 \x01(\tR\x04node\x12\x17\n" + "\adisk_id\x18\x02 \x01(\rR\x06diskId\x12\x12\n" + "\x04rack\x18\x03 \x01(\tR\x04rack\x12\x1f\n" + "\vdata_center\x18\x04 \x01(\tR\n" + - "dataCenter\x12'\n" + - "\x0fplacement_score\x18\x05 \x01(\x01R\x0eplacementScore\"J\n" + - "\x17ExistingECShardLocation\x12\x12\n" + - "\x04node\x18\x01 \x01(\tR\x04node\x12\x1b\n" + - "\tshard_ids\x18\x02 \x03(\rR\bshardIds\"\xaf\x02\n" + - "\x11BalanceTaskParams\x12\x1b\n" + - "\tdest_node\x18\x01 \x01(\tR\bdestNode\x12%\n" + - "\x0eestimated_size\x18\x02 \x01(\x04R\restimatedSize\x12\x1b\n" + - "\tdest_rack\x18\x03 \x01(\tR\bdestRack\x12\x17\n" + - "\adest_dc\x18\x04 \x01(\tR\x06destDc\x12'\n" + - "\x0fplacement_score\x18\x05 \x01(\x01R\x0eplacementScore\x12/\n" + - "\x13placement_conflicts\x18\x06 \x03(\tR\x12placementConflicts\x12\x1d\n" + + "dataCenter\x12\x1b\n" + + "\tvolume_id\x18\x05 \x01(\rR\bvolumeId\x12\x1b\n" + + "\tshard_ids\x18\x06 \x03(\rR\bshardIds\x12%\n" + + "\x0eestimated_size\x18\a \x01(\x04R\restimatedSize\"\xcf\x01\n" + "\n" + - "force_move\x18\a \x01(\bR\tforceMove\x12'\n" + - "\x0ftimeout_seconds\x18\b \x01(\x05R\x0etimeoutSeconds\"\xbf\x02\n" + - "\x15ReplicationTaskParams\x12\x1b\n" + - "\tdest_node\x18\x01 \x01(\tR\bdestNode\x12%\n" + - "\x0eestimated_size\x18\x02 \x01(\x04R\restimatedSize\x12\x1b\n" + - "\tdest_rack\x18\x03 \x01(\tR\bdestRack\x12\x17\n" + - "\adest_dc\x18\x04 \x01(\tR\x06destDc\x12'\n" + - "\x0fplacement_score\x18\x05 \x01(\x01R\x0eplacementScore\x12/\n" + - "\x13placement_conflicts\x18\x06 \x03(\tR\x12placementConflicts\x12#\n" + - "\rreplica_count\x18\a \x01(\x05R\freplicaCount\x12-\n" + - "\x12verify_consistency\x18\b \x01(\bR\x11verifyConsistency\"\x8e\x02\n" + + "TaskTarget\x12\x12\n" + + "\x04node\x18\x01 \x01(\tR\x04node\x12\x17\n" + + "\adisk_id\x18\x02 \x01(\rR\x06diskId\x12\x12\n" + + "\x04rack\x18\x03 \x01(\tR\x04rack\x12\x1f\n" + + "\vdata_center\x18\x04 \x01(\tR\n" + + "dataCenter\x12\x1b\n" + + "\tvolume_id\x18\x05 \x01(\rR\bvolumeId\x12\x1b\n" + + "\tshard_ids\x18\x06 \x03(\rR\bshardIds\x12%\n" + + "\x0eestimated_size\x18\a \x01(\x04R\restimatedSize\"[\n" + + "\x11BalanceTaskParams\x12\x1d\n" + + "\n" + + "force_move\x18\x01 \x01(\bR\tforceMove\x12'\n" + + "\x0ftimeout_seconds\x18\x02 \x01(\x05R\x0etimeoutSeconds\"k\n" + + "\x15ReplicationTaskParams\x12#\n" + + "\rreplica_count\x18\x01 \x01(\x05R\freplicaCount\x12-\n" + + "\x12verify_consistency\x18\x02 \x01(\bR\x11verifyConsistency\"\x8e\x02\n" + "\n" + "TaskUpdate\x12\x17\n" + "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + @@ -3076,7 +3569,80 @@ const file_worker_proto_rawDesc = "" + "\x13imbalance_threshold\x18\x01 \x01(\x01R\x12imbalanceThreshold\x12(\n" + "\x10min_server_count\x18\x02 \x01(\x05R\x0eminServerCount\"I\n" + "\x15ReplicationTaskConfig\x120\n" + - "\x14target_replica_count\x18\x01 \x01(\x05R\x12targetReplicaCount2V\n" + + "\x14target_replica_count\x18\x01 \x01(\x05R\x12targetReplicaCount\"\xae\a\n" + + "\x13MaintenanceTaskData\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + + "\x04type\x18\x02 \x01(\tR\x04type\x12\x1a\n" + + "\bpriority\x18\x03 \x01(\tR\bpriority\x12\x16\n" + + "\x06status\x18\x04 \x01(\tR\x06status\x12\x1b\n" + + "\tvolume_id\x18\x05 \x01(\rR\bvolumeId\x12\x16\n" + + "\x06server\x18\x06 \x01(\tR\x06server\x12\x1e\n" + + "\n" + + "collection\x18\a \x01(\tR\n" + + "collection\x128\n" + + "\ftyped_params\x18\b \x01(\v2\x15.worker_pb.TaskParamsR\vtypedParams\x12\x16\n" + + "\x06reason\x18\t \x01(\tR\x06reason\x12\x1d\n" + + "\n" + + "created_at\x18\n" + + " \x01(\x03R\tcreatedAt\x12!\n" + + "\fscheduled_at\x18\v \x01(\x03R\vscheduledAt\x12\x1d\n" + + "\n" + + "started_at\x18\f \x01(\x03R\tstartedAt\x12!\n" + + "\fcompleted_at\x18\r \x01(\x03R\vcompletedAt\x12\x1b\n" + + "\tworker_id\x18\x0e \x01(\tR\bworkerId\x12\x14\n" + + "\x05error\x18\x0f \x01(\tR\x05error\x12\x1a\n" + + "\bprogress\x18\x10 \x01(\x01R\bprogress\x12\x1f\n" + + "\vretry_count\x18\x11 \x01(\x05R\n" + + "retryCount\x12\x1f\n" + + "\vmax_retries\x18\x12 \x01(\x05R\n" + + "maxRetries\x12\x1d\n" + + "\n" + + "created_by\x18\x13 \x01(\tR\tcreatedBy\x12)\n" + + "\x10creation_context\x18\x14 \x01(\tR\x0fcreationContext\x12N\n" + + "\x12assignment_history\x18\x15 \x03(\v2\x1f.worker_pb.TaskAssignmentRecordR\x11assignmentHistory\x12'\n" + + "\x0fdetailed_reason\x18\x16 \x01(\tR\x0edetailedReason\x12<\n" + + "\x04tags\x18\x17 \x03(\v2(.worker_pb.MaintenanceTaskData.TagsEntryR\x04tags\x12I\n" + + "\x10creation_metrics\x18\x18 \x01(\v2\x1e.worker_pb.TaskCreationMetricsR\x0fcreationMetrics\x1a7\n" + + "\tTagsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb8\x01\n" + + "\x14TaskAssignmentRecord\x12\x1b\n" + + "\tworker_id\x18\x01 \x01(\tR\bworkerId\x12%\n" + + "\x0eworker_address\x18\x02 \x01(\tR\rworkerAddress\x12\x1f\n" + + "\vassigned_at\x18\x03 \x01(\x03R\n" + + "assignedAt\x12#\n" + + "\runassigned_at\x18\x04 \x01(\x03R\funassignedAt\x12\x16\n" + + "\x06reason\x18\x05 \x01(\tR\x06reason\"\xe4\x02\n" + + "\x13TaskCreationMetrics\x12%\n" + + "\x0etrigger_metric\x18\x01 \x01(\tR\rtriggerMetric\x12!\n" + + "\fmetric_value\x18\x02 \x01(\x01R\vmetricValue\x12\x1c\n" + + "\tthreshold\x18\x03 \x01(\x01R\tthreshold\x12E\n" + + "\x0evolume_metrics\x18\x04 \x01(\v2\x1e.worker_pb.VolumeHealthMetricsR\rvolumeMetrics\x12[\n" + + "\x0fadditional_data\x18\x05 \x03(\v22.worker_pb.TaskCreationMetrics.AdditionalDataEntryR\x0eadditionalData\x1aA\n" + + "\x13AdditionalDataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xf2\x02\n" + + "\x13VolumeHealthMetrics\x12\x1d\n" + + "\n" + + "total_size\x18\x01 \x01(\x04R\ttotalSize\x12\x1b\n" + + "\tused_size\x18\x02 \x01(\x04R\busedSize\x12!\n" + + "\fgarbage_size\x18\x03 \x01(\x04R\vgarbageSize\x12#\n" + + "\rgarbage_ratio\x18\x04 \x01(\x01R\fgarbageRatio\x12\x1d\n" + + "\n" + + "file_count\x18\x05 \x01(\x05R\tfileCount\x12,\n" + + "\x12deleted_file_count\x18\x06 \x01(\x05R\x10deletedFileCount\x12#\n" + + "\rlast_modified\x18\a \x01(\x03R\flastModified\x12#\n" + + "\rreplica_count\x18\b \x01(\x05R\freplicaCount\x12 \n" + + "\fis_ec_volume\x18\t \x01(\bR\n" + + "isEcVolume\x12\x1e\n" + + "\n" + + "collection\x18\n" + + " \x01(\tR\n" + + "collection\"\x8b\x01\n" + + "\rTaskStateFile\x122\n" + + "\x04task\x18\x01 \x01(\v2\x1e.worker_pb.MaintenanceTaskDataR\x04task\x12!\n" + + "\flast_updated\x18\x02 \x01(\x03R\vlastUpdated\x12#\n" + + "\radmin_version\x18\x03 \x01(\tR\fadminVersion2V\n" + "\rWorkerService\x12E\n" + "\fWorkerStream\x12\x18.worker_pb.WorkerMessage\x1a\x17.worker_pb.AdminMessage(\x010\x01B2Z0github.com/seaweedfs/seaweedfs/weed/pb/worker_pbb\x06proto3" @@ -3092,7 +3658,7 @@ func file_worker_proto_rawDescGZIP() []byte { return file_worker_proto_rawDescData } -var file_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 38) +var file_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 45) var file_worker_proto_goTypes = []any{ (*WorkerMessage)(nil), // 0: worker_pb.WorkerMessage (*AdminMessage)(nil), // 1: worker_pb.AdminMessage @@ -3105,8 +3671,8 @@ var file_worker_proto_goTypes = []any{ (*TaskParams)(nil), // 8: worker_pb.TaskParams (*VacuumTaskParams)(nil), // 9: worker_pb.VacuumTaskParams (*ErasureCodingTaskParams)(nil), // 10: worker_pb.ErasureCodingTaskParams - (*ECDestination)(nil), // 11: worker_pb.ECDestination - (*ExistingECShardLocation)(nil), // 12: worker_pb.ExistingECShardLocation + (*TaskSource)(nil), // 11: worker_pb.TaskSource + (*TaskTarget)(nil), // 12: worker_pb.TaskTarget (*BalanceTaskParams)(nil), // 13: worker_pb.BalanceTaskParams (*ReplicationTaskParams)(nil), // 14: worker_pb.ReplicationTaskParams (*TaskUpdate)(nil), // 15: worker_pb.TaskUpdate @@ -3125,13 +3691,20 @@ var file_worker_proto_goTypes = []any{ (*ErasureCodingTaskConfig)(nil), // 28: worker_pb.ErasureCodingTaskConfig (*BalanceTaskConfig)(nil), // 29: worker_pb.BalanceTaskConfig (*ReplicationTaskConfig)(nil), // 30: worker_pb.ReplicationTaskConfig - nil, // 31: worker_pb.WorkerRegistration.MetadataEntry - nil, // 32: worker_pb.TaskAssignment.MetadataEntry - nil, // 33: worker_pb.TaskUpdate.MetadataEntry - nil, // 34: worker_pb.TaskComplete.ResultMetadataEntry - nil, // 35: worker_pb.TaskLogMetadata.CustomDataEntry - nil, // 36: worker_pb.TaskLogEntry.FieldsEntry - nil, // 37: worker_pb.MaintenancePolicy.TaskPoliciesEntry + (*MaintenanceTaskData)(nil), // 31: worker_pb.MaintenanceTaskData + (*TaskAssignmentRecord)(nil), // 32: worker_pb.TaskAssignmentRecord + (*TaskCreationMetrics)(nil), // 33: worker_pb.TaskCreationMetrics + (*VolumeHealthMetrics)(nil), // 34: worker_pb.VolumeHealthMetrics + (*TaskStateFile)(nil), // 35: worker_pb.TaskStateFile + nil, // 36: worker_pb.WorkerRegistration.MetadataEntry + nil, // 37: worker_pb.TaskAssignment.MetadataEntry + nil, // 38: worker_pb.TaskUpdate.MetadataEntry + nil, // 39: worker_pb.TaskComplete.ResultMetadataEntry + nil, // 40: worker_pb.TaskLogMetadata.CustomDataEntry + nil, // 41: worker_pb.TaskLogEntry.FieldsEntry + nil, // 42: worker_pb.MaintenancePolicy.TaskPoliciesEntry + nil, // 43: worker_pb.MaintenanceTaskData.TagsEntry + nil, // 44: worker_pb.TaskCreationMetrics.AdditionalDataEntry } var file_worker_proto_depIdxs = []int32{ 2, // 0: worker_pb.WorkerMessage.registration:type_name -> worker_pb.WorkerRegistration @@ -3147,35 +3720,42 @@ var file_worker_proto_depIdxs = []int32{ 17, // 10: worker_pb.AdminMessage.task_cancellation:type_name -> worker_pb.TaskCancellation 19, // 11: worker_pb.AdminMessage.admin_shutdown:type_name -> worker_pb.AdminShutdown 20, // 12: worker_pb.AdminMessage.task_log_request:type_name -> worker_pb.TaskLogRequest - 31, // 13: worker_pb.WorkerRegistration.metadata:type_name -> worker_pb.WorkerRegistration.MetadataEntry + 36, // 13: worker_pb.WorkerRegistration.metadata:type_name -> worker_pb.WorkerRegistration.MetadataEntry 8, // 14: worker_pb.TaskAssignment.params:type_name -> worker_pb.TaskParams - 32, // 15: worker_pb.TaskAssignment.metadata:type_name -> worker_pb.TaskAssignment.MetadataEntry - 9, // 16: worker_pb.TaskParams.vacuum_params:type_name -> worker_pb.VacuumTaskParams - 10, // 17: worker_pb.TaskParams.erasure_coding_params:type_name -> worker_pb.ErasureCodingTaskParams - 13, // 18: worker_pb.TaskParams.balance_params:type_name -> worker_pb.BalanceTaskParams - 14, // 19: worker_pb.TaskParams.replication_params:type_name -> worker_pb.ReplicationTaskParams - 11, // 20: worker_pb.ErasureCodingTaskParams.destinations:type_name -> worker_pb.ECDestination - 12, // 21: worker_pb.ErasureCodingTaskParams.existing_shard_locations:type_name -> worker_pb.ExistingECShardLocation - 33, // 22: worker_pb.TaskUpdate.metadata:type_name -> worker_pb.TaskUpdate.MetadataEntry - 34, // 23: worker_pb.TaskComplete.result_metadata:type_name -> worker_pb.TaskComplete.ResultMetadataEntry + 37, // 15: worker_pb.TaskAssignment.metadata:type_name -> worker_pb.TaskAssignment.MetadataEntry + 11, // 16: worker_pb.TaskParams.sources:type_name -> worker_pb.TaskSource + 12, // 17: worker_pb.TaskParams.targets:type_name -> worker_pb.TaskTarget + 9, // 18: worker_pb.TaskParams.vacuum_params:type_name -> worker_pb.VacuumTaskParams + 10, // 19: worker_pb.TaskParams.erasure_coding_params:type_name -> worker_pb.ErasureCodingTaskParams + 13, // 20: worker_pb.TaskParams.balance_params:type_name -> worker_pb.BalanceTaskParams + 14, // 21: worker_pb.TaskParams.replication_params:type_name -> worker_pb.ReplicationTaskParams + 38, // 22: worker_pb.TaskUpdate.metadata:type_name -> worker_pb.TaskUpdate.MetadataEntry + 39, // 23: worker_pb.TaskComplete.result_metadata:type_name -> worker_pb.TaskComplete.ResultMetadataEntry 22, // 24: worker_pb.TaskLogResponse.metadata:type_name -> worker_pb.TaskLogMetadata 23, // 25: worker_pb.TaskLogResponse.log_entries:type_name -> worker_pb.TaskLogEntry - 35, // 26: worker_pb.TaskLogMetadata.custom_data:type_name -> worker_pb.TaskLogMetadata.CustomDataEntry - 36, // 27: worker_pb.TaskLogEntry.fields:type_name -> worker_pb.TaskLogEntry.FieldsEntry + 40, // 26: worker_pb.TaskLogMetadata.custom_data:type_name -> worker_pb.TaskLogMetadata.CustomDataEntry + 41, // 27: worker_pb.TaskLogEntry.fields:type_name -> worker_pb.TaskLogEntry.FieldsEntry 25, // 28: worker_pb.MaintenanceConfig.policy:type_name -> worker_pb.MaintenancePolicy - 37, // 29: worker_pb.MaintenancePolicy.task_policies:type_name -> worker_pb.MaintenancePolicy.TaskPoliciesEntry + 42, // 29: worker_pb.MaintenancePolicy.task_policies:type_name -> worker_pb.MaintenancePolicy.TaskPoliciesEntry 27, // 30: worker_pb.TaskPolicy.vacuum_config:type_name -> worker_pb.VacuumTaskConfig 28, // 31: worker_pb.TaskPolicy.erasure_coding_config:type_name -> worker_pb.ErasureCodingTaskConfig 29, // 32: worker_pb.TaskPolicy.balance_config:type_name -> worker_pb.BalanceTaskConfig 30, // 33: worker_pb.TaskPolicy.replication_config:type_name -> worker_pb.ReplicationTaskConfig - 26, // 34: worker_pb.MaintenancePolicy.TaskPoliciesEntry.value:type_name -> worker_pb.TaskPolicy - 0, // 35: worker_pb.WorkerService.WorkerStream:input_type -> worker_pb.WorkerMessage - 1, // 36: worker_pb.WorkerService.WorkerStream:output_type -> worker_pb.AdminMessage - 36, // [36:37] is the sub-list for method output_type - 35, // [35:36] is the sub-list for method input_type - 35, // [35:35] is the sub-list for extension type_name - 35, // [35:35] is the sub-list for extension extendee - 0, // [0:35] is the sub-list for field type_name + 8, // 34: worker_pb.MaintenanceTaskData.typed_params:type_name -> worker_pb.TaskParams + 32, // 35: worker_pb.MaintenanceTaskData.assignment_history:type_name -> worker_pb.TaskAssignmentRecord + 43, // 36: worker_pb.MaintenanceTaskData.tags:type_name -> worker_pb.MaintenanceTaskData.TagsEntry + 33, // 37: worker_pb.MaintenanceTaskData.creation_metrics:type_name -> worker_pb.TaskCreationMetrics + 34, // 38: worker_pb.TaskCreationMetrics.volume_metrics:type_name -> worker_pb.VolumeHealthMetrics + 44, // 39: worker_pb.TaskCreationMetrics.additional_data:type_name -> worker_pb.TaskCreationMetrics.AdditionalDataEntry + 31, // 40: worker_pb.TaskStateFile.task:type_name -> worker_pb.MaintenanceTaskData + 26, // 41: worker_pb.MaintenancePolicy.TaskPoliciesEntry.value:type_name -> worker_pb.TaskPolicy + 0, // 42: worker_pb.WorkerService.WorkerStream:input_type -> worker_pb.WorkerMessage + 1, // 43: worker_pb.WorkerService.WorkerStream:output_type -> worker_pb.AdminMessage + 43, // [43:44] is the sub-list for method output_type + 42, // [42:43] is the sub-list for method input_type + 42, // [42:42] is the sub-list for extension type_name + 42, // [42:42] is the sub-list for extension extendee + 0, // [0:42] is the sub-list for field type_name } func init() { file_worker_proto_init() } @@ -3218,7 +3798,7 @@ func file_worker_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_worker_proto_rawDesc), len(file_worker_proto_rawDesc)), NumEnums: 0, - NumMessages: 38, + NumMessages: 45, NumExtensions: 0, NumServices: 1, }, |
