aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Lu <chrislusf@users.noreply.github.com>2025-07-16 23:39:27 -0700
committerGitHub <noreply@github.com>2025-07-16 23:39:27 -0700
commit69553e5ba6d46ed924b0c3adc3f8d9666550999a (patch)
tree7711c4d9fe1919d2c6eaa841779bcde6e24b0248
parenta524b4f485ce5aa2f234c742bd7d1e75386f569b (diff)
downloadseaweedfs-69553e5ba6d46ed924b0c3adc3f8d9666550999a.tar.xz
seaweedfs-69553e5ba6d46ed924b0c3adc3f8d9666550999a.zip
convert error fromating to %w everywhere (#6995)
-rw-r--r--weed/admin/dash/admin_server.go28
-rw-r--r--weed/admin/dash/bucket_management.go8
-rw-r--r--weed/admin/dash/cluster_topology.go2
-rw-r--r--weed/admin/dash/config_persistence.go30
-rw-r--r--weed/admin/dash/mq_management.go10
-rw-r--r--weed/admin/dash/topic_retention.go8
-rw-r--r--weed/admin/dash/user_management.go22
-rw-r--r--weed/admin/dash/worker_grpc_server.go2
-rw-r--r--weed/admin/handlers/file_browser_handlers.go20
-rw-r--r--weed/admin/maintenance/maintenance_manager.go2
-rw-r--r--weed/admin/maintenance/maintenance_scanner.go2
-rw-r--r--weed/admin/maintenance/maintenance_worker.go6
-rw-r--r--weed/command/admin.go8
-rw-r--r--weed/command/filer_copy.go16
-rw-r--r--weed/command/filer_meta_backup.go4
-rw-r--r--weed/command/filer_remote_gateway_buckets.go4
-rw-r--r--weed/command/filer_remote_sync_dir.go4
-rw-r--r--weed/command/fix.go4
-rw-r--r--weed/command/master_follower.go2
-rw-r--r--weed/command/mount_std.go2
-rw-r--r--weed/credential/filer_etc/filer_etc_identity.go18
-rw-r--r--weed/credential/migration.go6
-rw-r--r--weed/credential/postgres/postgres_identity.go66
-rw-r--r--weed/credential/postgres/postgres_policy.go18
-rw-r--r--weed/credential/postgres/postgres_store.go12
-rw-r--r--weed/filer/abstract_sql/abstract_sql_store_kv.go8
-rw-r--r--weed/filer/arangodb/arangodb_store_kv.go4
-rw-r--r--weed/filer/cassandra/cassandra_store_kv.go2
-rw-r--r--weed/filer/cassandra2/cassandra_store_kv.go2
-rw-r--r--weed/filer/elastic/v7/elastic_store.go8
-rw-r--r--weed/filer/elastic/v7/elastic_store_kv.go6
-rw-r--r--weed/filer/etcd/etcd_store.go4
-rw-r--r--weed/filer/etcd/etcd_store_kv.go6
-rw-r--r--weed/filer/filechunk_manifest.go2
-rw-r--r--weed/filer/filer_delete_entry.go4
-rw-r--r--weed/filer/filer_notify.go6
-rw-r--r--weed/filer/filer_notify_append.go2
-rw-r--r--weed/filer/filer_notify_read.go8
-rw-r--r--weed/filer/hbase/hbase_store.go2
-rw-r--r--weed/filer/leveldb/leveldb_store_kv.go6
-rw-r--r--weed/filer/leveldb3/leveldb3_store_kv.go6
-rw-r--r--weed/filer/meta_aggregator.go4
-rw-r--r--weed/filer/mongodb/mongodb_store_kv.go4
-rw-r--r--weed/filer/mysql/mysql_store.go2
-rw-r--r--weed/filer/redis/universal_redis_store_kv.go4
-rw-r--r--weed/filer/redis2/universal_redis_store_kv.go4
-rw-r--r--weed/filer/redis3/universal_redis_store_kv.go4
-rw-r--r--weed/filer/redis_lua/universal_redis_store_kv.go4
-rw-r--r--weed/filer/remote_mapping.go16
-rw-r--r--weed/filer/rocksdb/rocksdb_store.go2
-rw-r--r--weed/filer/rocksdb/rocksdb_store_kv.go6
-rw-r--r--weed/filer/s3iam_conf.go4
-rw-r--r--weed/filer/stream.go2
-rw-r--r--weed/filer/tarantool/tarantool_store.go4
-rw-r--r--weed/filer/tarantool/tarantool_store_kv.go4
-rw-r--r--weed/filer/ydb/ydb_store.go6
-rw-r--r--weed/glog/glog_file.go2
-rw-r--r--weed/iamapi/iamapi_server.go2
-rw-r--r--weed/mount/weedfs_write.go2
-rw-r--r--weed/mq/broker/broker_connect.go4
-rw-r--r--weed/mq/broker/broker_grpc_configure.go2
-rw-r--r--weed/mq/broker/broker_grpc_lookup.go8
-rw-r--r--weed/mq/broker/broker_grpc_pub.go2
-rw-r--r--weed/mq/broker/broker_grpc_pub_balancer.go2
-rw-r--r--weed/mq/broker/broker_topic_conf_read_write.go4
-rw-r--r--weed/mq/client/agent_client/publish_session.go6
-rw-r--r--weed/mq/client/agent_client/subscribe_session.go4
-rw-r--r--weed/mq/client/pub_client/publish.go2
-rw-r--r--weed/mq/client/pub_client/scheduler.go10
-rw-r--r--weed/mq/client/sub_client/on_each_partition.go4
-rw-r--r--weed/mq/logstore/log_to_parquet.go26
-rw-r--r--weed/mq/logstore/read_log_from_disk.go4
-rw-r--r--weed/mq/logstore/read_parquet_to_log.go6
-rw-r--r--weed/mq/schema/to_parquet_schema.go2
-rw-r--r--weed/mq/topic/local_partition.go2
-rw-r--r--weed/mq/topic/topic.go10
-rw-r--r--weed/notification/aws_sqs/aws_sqs_pub.go2
-rw-r--r--weed/notification/webhook/http.go8
-rw-r--r--weed/notification/webhook/webhook_queue.go4
-rw-r--r--weed/operation/assign_file_id.go4
-rw-r--r--weed/operation/chunked_file.go2
-rw-r--r--weed/operation/upload_content.go10
-rw-r--r--weed/pb/filer_pb/filer_client_bfs.go4
-rw-r--r--weed/pb/filer_pb/filer_pb_helper.go6
-rw-r--r--weed/pb/filer_pb_tail.go6
-rw-r--r--weed/pb/grpc_client_server.go4
-rw-r--r--weed/remote_storage/gcs/gcs_storage_client.go2
-rw-r--r--weed/remote_storage/s3/aliyun.go2
-rw-r--r--weed/remote_storage/s3/backblaze.go2
-rw-r--r--weed/remote_storage/s3/baidu.go2
-rw-r--r--weed/remote_storage/s3/contabo.go2
-rw-r--r--weed/remote_storage/s3/filebase.go2
-rw-r--r--weed/remote_storage/s3/s3_storage_client.go8
-rw-r--r--weed/remote_storage/s3/storj.go2
-rw-r--r--weed/remote_storage/s3/tencent.go2
-rw-r--r--weed/remote_storage/s3/wasabi.go2
-rw-r--r--weed/replication/replicator.go2
-rw-r--r--weed/replication/sink/filersink/fetch_write.go4
-rw-r--r--weed/replication/sink/s3sink/s3_sink.go2
-rw-r--r--weed/replication/sub/notification_aws_sqs.go2
-rw-r--r--weed/replication/sub/notification_kafka.go2
-rw-r--r--weed/s3api/auth_credentials.go4
-rw-r--r--weed/s3api/cors/cors.go12
-rw-r--r--weed/s3api/policy_engine/engine.go4
-rw-r--r--weed/s3api/policy_engine/types.go4
-rw-r--r--weed/s3api/s3api_bucket_config.go4
-rw-r--r--weed/s3api/s3api_bucket_handlers.go4
-rw-r--r--weed/s3api/s3api_circuit_breaker.go4
-rw-r--r--weed/s3api/s3api_object_handlers_copy.go32
-rw-r--r--weed/s3api/s3api_object_handlers_list.go2
-rw-r--r--weed/s3api/s3api_object_handlers_put.go4
-rw-r--r--weed/s3api/s3api_object_retention.go10
-rw-r--r--weed/s3api/s3api_object_versioning.go6
-rw-r--r--weed/server/common.go14
-rw-r--r--weed/server/filer_grpc_server.go2
-rw-r--r--weed/server/filer_grpc_server_sub_meta.go8
-rw-r--r--weed/server/filer_grpc_server_traverse_meta.go2
-rw-r--r--weed/server/filer_server_handlers_write_cipher.go4
-rw-r--r--weed/server/raft_hashicorp.go16
-rw-r--r--weed/server/volume_grpc_copy.go4
-rw-r--r--weed/server/volume_grpc_erasure_coding.go2
-rw-r--r--weed/server/volume_grpc_remote.go2
-rw-r--r--weed/server/volume_grpc_tail.go2
-rw-r--r--weed/server/volume_server_handlers_write.go2
-rw-r--r--weed/server/webdav_server.go4
-rw-r--r--weed/sftpd/sftp_file_reader.go4
-rw-r--r--weed/sftpd/sftp_filer.go12
-rw-r--r--weed/sftpd/sftp_server.go2
-rw-r--r--weed/sftpd/sftp_service.go6
-rw-r--r--weed/sftpd/user/filestore.go10
-rw-r--r--weed/shell/command_cluster_raft_add.go2
-rw-r--r--weed/shell/command_cluster_raft_ps.go2
-rw-r--r--weed/shell/command_cluster_raft_remove.go2
-rw-r--r--weed/shell/command_ec_common.go2
-rw-r--r--weed/shell/command_ec_encode.go10
-rw-r--r--weed/shell/command_mq_topic_list.go2
-rw-r--r--weed/shell/command_remote_cache.go4
-rw-r--r--weed/shell/command_remote_meta_sync.go2
-rw-r--r--weed/shell/command_remote_mount.go6
-rw-r--r--weed/shell/command_remote_mount_buckets.go2
-rw-r--r--weed/shell/command_remote_uncache.go2
-rw-r--r--weed/shell/command_remote_unmount.go4
-rw-r--r--weed/shell/command_s3_bucket_create.go2
-rw-r--r--weed/shell/command_s3_bucket_delete.go2
-rw-r--r--weed/shell/command_s3_bucket_list.go6
-rw-r--r--weed/shell/command_s3_bucket_quota.go2
-rw-r--r--weed/shell/command_s3_bucket_quota_check.go4
-rw-r--r--weed/shell/command_s3_clean_uploads.go6
-rw-r--r--weed/shell/command_volume_check_disk.go2
-rw-r--r--weed/shell/command_volume_configure_replication.go2
-rw-r--r--weed/shell/command_volume_fsck.go16
-rw-r--r--weed/storage/disk_location_ec.go4
-rw-r--r--weed/storage/erasure_coding/ec_encoder.go24
-rw-r--r--weed/storage/erasure_coding/ec_volume.go2
-rw-r--r--weed/storage/erasure_coding/ec_volume_delete.go6
-rw-r--r--weed/storage/needle/needle.go4
-rw-r--r--weed/storage/needle_map/memdb.go2
-rw-r--r--weed/storage/needle_map_leveldb.go4
-rw-r--r--weed/storage/store_ec.go8
-rw-r--r--weed/storage/volume_checking.go2
-rw-r--r--weed/storage/volume_info/volume_info.go4
-rw-r--r--weed/storage/volume_loading.go2
-rw-r--r--weed/storage/volume_read.go6
-rw-r--r--weed/storage/volume_write.go2
-rw-r--r--weed/telemetry/client.go6
-rw-r--r--weed/topology/cluster_commands.go4
-rw-r--r--weed/topology/store_replicate.go2
-rw-r--r--weed/wdclient/net2/managed_connection.go2
-rw-r--r--weed/wdclient/resource_pool/simple_resource_pool.go2
-rw-r--r--weed/worker/client.go18
-rw-r--r--weed/worker/tasks/balance/ui.go10
-rw-r--r--weed/worker/tasks/erasure_coding/ui.go10
-rw-r--r--weed/worker/tasks/vacuum/ui.go10
-rw-r--r--weed/worker/worker.go4
174 files changed, 524 insertions, 524 deletions
diff --git a/weed/admin/dash/admin_server.go b/weed/admin/dash/admin_server.go
index 156adca75..9f97677e3 100644
--- a/weed/admin/dash/admin_server.go
+++ b/weed/admin/dash/admin_server.go
@@ -215,7 +215,7 @@ func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) {
})
if err != nil {
- return nil, fmt.Errorf("failed to get volume information: %v", err)
+ return nil, fmt.Errorf("failed to get volume information: %w", err)
}
// Get filer configuration to determine FilerGroup
@@ -232,7 +232,7 @@ func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) {
})
if err != nil {
- return nil, fmt.Errorf("failed to get filer configuration: %v", err)
+ return nil, fmt.Errorf("failed to get filer configuration: %w", err)
}
// Now list buckets from the filer and match with collection data
@@ -330,7 +330,7 @@ func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) {
})
if err != nil {
- return nil, fmt.Errorf("failed to list Object Store buckets: %v", err)
+ return nil, fmt.Errorf("failed to list Object Store buckets: %w", err)
}
return buckets, nil
@@ -355,7 +355,7 @@ func (s *AdminServer) GetBucketDetails(bucketName string) (*BucketDetails, error
Name: bucketName,
})
if err != nil {
- return fmt.Errorf("bucket not found: %v", err)
+ return fmt.Errorf("bucket not found: %w", err)
}
details.Bucket.CreatedAt = time.Unix(bucketResp.Entry.Attributes.Crtime, 0)
@@ -488,7 +488,7 @@ func (s *AdminServer) DeleteS3Bucket(bucketName string) error {
IgnoreRecursiveError: false,
})
if err != nil {
- return fmt.Errorf("failed to delete bucket: %v", err)
+ return fmt.Errorf("failed to delete bucket: %w", err)
}
return nil
@@ -687,7 +687,7 @@ func (s *AdminServer) GetClusterFilers() (*ClusterFilersData, error) {
})
if err != nil {
- return nil, fmt.Errorf("failed to get filer nodes from master: %v", err)
+ return nil, fmt.Errorf("failed to get filer nodes from master: %w", err)
}
return &ClusterFilersData{
@@ -729,7 +729,7 @@ func (s *AdminServer) GetClusterBrokers() (*ClusterBrokersData, error) {
})
if err != nil {
- return nil, fmt.Errorf("failed to get broker nodes from master: %v", err)
+ return nil, fmt.Errorf("failed to get broker nodes from master: %w", err)
}
return &ClusterBrokersData{
@@ -1170,7 +1170,7 @@ func (as *AdminServer) getMaintenanceConfig() (*maintenance.MaintenanceConfigDat
func (as *AdminServer) updateMaintenanceConfig(config *maintenance.MaintenanceConfig) error {
// Save configuration to persistent storage
if err := as.configPersistence.SaveMaintenanceConfig(config); err != nil {
- return fmt.Errorf("failed to save maintenance configuration: %v", err)
+ return fmt.Errorf("failed to save maintenance configuration: %w", err)
}
// Update maintenance manager if available
@@ -1357,7 +1357,7 @@ func (s *AdminServer) CreateTopicWithRetention(namespace, name string, partition
// Find broker leader to create the topic
brokerLeader, err := s.findBrokerLeader()
if err != nil {
- return fmt.Errorf("failed to find broker leader: %v", err)
+ return fmt.Errorf("failed to find broker leader: %w", err)
}
// Create retention configuration
@@ -1391,7 +1391,7 @@ func (s *AdminServer) CreateTopicWithRetention(namespace, name string, partition
})
if err != nil {
- return fmt.Errorf("failed to create topic: %v", err)
+ return fmt.Errorf("failed to create topic: %w", err)
}
glog.V(0).Infof("Created topic %s.%s with %d partitions (retention: enabled=%v, seconds=%d)",
@@ -1421,7 +1421,7 @@ func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool,
})
if err != nil {
- return fmt.Errorf("failed to get broker nodes from master: %v", err)
+ return fmt.Errorf("failed to get broker nodes from master: %w", err)
}
if brokerAddress == "" {
@@ -1431,7 +1431,7 @@ func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool,
// Create gRPC connection
conn, err := grpc.Dial(brokerAddress, s.grpcDialOption)
if err != nil {
- return fmt.Errorf("failed to connect to broker: %v", err)
+ return fmt.Errorf("failed to connect to broker: %w", err)
}
defer conn.Close()
@@ -1448,7 +1448,7 @@ func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool,
},
})
if err != nil {
- return fmt.Errorf("failed to get current topic configuration: %v", err)
+ return fmt.Errorf("failed to get current topic configuration: %w", err)
}
// Create the topic configuration request, preserving all existing settings
@@ -1480,7 +1480,7 @@ func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool,
// Send the configuration request with preserved settings
_, err = client.ConfigureTopic(ctx, configRequest)
if err != nil {
- return fmt.Errorf("failed to update topic retention: %v", err)
+ return fmt.Errorf("failed to update topic retention: %w", err)
}
glog.V(0).Infof("Updated topic %s.%s retention (enabled: %v, seconds: %d) while preserving %d partitions",
diff --git a/weed/admin/dash/bucket_management.go b/weed/admin/dash/bucket_management.go
index 09a8449a5..faa19ec99 100644
--- a/weed/admin/dash/bucket_management.go
+++ b/weed/admin/dash/bucket_management.go
@@ -251,7 +251,7 @@ func (s *AdminServer) SetBucketQuota(bucketName string, quotaBytes int64, quotaE
Name: bucketName,
})
if err != nil {
- return fmt.Errorf("bucket not found: %v", err)
+ return fmt.Errorf("bucket not found: %w", err)
}
bucketEntry := lookupResp.Entry
@@ -275,7 +275,7 @@ func (s *AdminServer) SetBucketQuota(bucketName string, quotaBytes int64, quotaE
Entry: bucketEntry,
})
if err != nil {
- return fmt.Errorf("failed to update bucket quota: %v", err)
+ return fmt.Errorf("failed to update bucket quota: %w", err)
}
return nil
@@ -308,7 +308,7 @@ func (s *AdminServer) CreateS3BucketWithObjectLock(bucketName string, quotaBytes
})
// Ignore error if directory already exists
if err != nil && !strings.Contains(err.Error(), "already exists") && !strings.Contains(err.Error(), "existing entry") {
- return fmt.Errorf("failed to create /buckets directory: %v", err)
+ return fmt.Errorf("failed to create /buckets directory: %w", err)
}
// Check if bucket already exists
@@ -368,7 +368,7 @@ func (s *AdminServer) CreateS3BucketWithObjectLock(bucketName string, quotaBytes
},
})
if err != nil {
- return fmt.Errorf("failed to create bucket directory: %v", err)
+ return fmt.Errorf("failed to create bucket directory: %w", err)
}
return nil
diff --git a/weed/admin/dash/cluster_topology.go b/weed/admin/dash/cluster_topology.go
index 9abc8ac2a..2bac7145e 100644
--- a/weed/admin/dash/cluster_topology.go
+++ b/weed/admin/dash/cluster_topology.go
@@ -25,7 +25,7 @@ func (s *AdminServer) GetClusterTopology() (*ClusterTopology, error) {
if err != nil {
currentMaster := s.masterClient.GetMaster(context.Background())
glog.Errorf("Failed to connect to master server %s: %v", currentMaster, err)
- return nil, fmt.Errorf("gRPC topology request failed: %v", err)
+ return nil, fmt.Errorf("gRPC topology request failed: %w", err)
}
// Cache the result
diff --git a/weed/admin/dash/config_persistence.go b/weed/admin/dash/config_persistence.go
index 93d9f6a09..a2f74f4e7 100644
--- a/weed/admin/dash/config_persistence.go
+++ b/weed/admin/dash/config_persistence.go
@@ -40,18 +40,18 @@ func (cp *ConfigPersistence) SaveMaintenanceConfig(config *MaintenanceConfig) er
// Create directory if it doesn't exist
if err := os.MkdirAll(cp.dataDir, ConfigDirPermissions); err != nil {
- return fmt.Errorf("failed to create config directory: %v", err)
+ return fmt.Errorf("failed to create config directory: %w", err)
}
// Marshal configuration to JSON
configData, err := json.MarshalIndent(config, "", " ")
if err != nil {
- return fmt.Errorf("failed to marshal maintenance config: %v", err)
+ return fmt.Errorf("failed to marshal maintenance config: %w", err)
}
// Write to file
if err := os.WriteFile(configPath, configData, ConfigFilePermissions); err != nil {
- return fmt.Errorf("failed to write maintenance config file: %v", err)
+ return fmt.Errorf("failed to write maintenance config file: %w", err)
}
glog.V(1).Infof("Saved maintenance configuration to %s", configPath)
@@ -76,13 +76,13 @@ func (cp *ConfigPersistence) LoadMaintenanceConfig() (*MaintenanceConfig, error)
// Read file
configData, err := os.ReadFile(configPath)
if err != nil {
- return nil, fmt.Errorf("failed to read maintenance config file: %v", err)
+ return nil, fmt.Errorf("failed to read maintenance config file: %w", err)
}
// Unmarshal JSON
var config MaintenanceConfig
if err := json.Unmarshal(configData, &config); err != nil {
- return nil, fmt.Errorf("failed to unmarshal maintenance config: %v", err)
+ return nil, fmt.Errorf("failed to unmarshal maintenance config: %w", err)
}
glog.V(1).Infof("Loaded maintenance configuration from %s", configPath)
@@ -99,18 +99,18 @@ func (cp *ConfigPersistence) SaveAdminConfig(config map[string]interface{}) erro
// Create directory if it doesn't exist
if err := os.MkdirAll(cp.dataDir, ConfigDirPermissions); err != nil {
- return fmt.Errorf("failed to create config directory: %v", err)
+ return fmt.Errorf("failed to create config directory: %w", err)
}
// Marshal configuration to JSON
configData, err := json.MarshalIndent(config, "", " ")
if err != nil {
- return fmt.Errorf("failed to marshal admin config: %v", err)
+ return fmt.Errorf("failed to marshal admin config: %w", err)
}
// Write to file
if err := os.WriteFile(configPath, configData, ConfigFilePermissions); err != nil {
- return fmt.Errorf("failed to write admin config file: %v", err)
+ return fmt.Errorf("failed to write admin config file: %w", err)
}
glog.V(1).Infof("Saved admin configuration to %s", configPath)
@@ -135,13 +135,13 @@ func (cp *ConfigPersistence) LoadAdminConfig() (map[string]interface{}, error) {
// Read file
configData, err := os.ReadFile(configPath)
if err != nil {
- return nil, fmt.Errorf("failed to read admin config file: %v", err)
+ return nil, fmt.Errorf("failed to read admin config file: %w", err)
}
// Unmarshal JSON
var config map[string]interface{}
if err := json.Unmarshal(configData, &config); err != nil {
- return nil, fmt.Errorf("failed to unmarshal admin config: %v", err)
+ return nil, fmt.Errorf("failed to unmarshal admin config: %w", err)
}
glog.V(1).Infof("Loaded admin configuration from %s", configPath)
@@ -164,7 +164,7 @@ func (cp *ConfigPersistence) ListConfigFiles() ([]string, error) {
files, err := os.ReadDir(cp.dataDir)
if err != nil {
- return nil, fmt.Errorf("failed to read config directory: %v", err)
+ return nil, fmt.Errorf("failed to read config directory: %w", err)
}
var configFiles []string
@@ -196,11 +196,11 @@ func (cp *ConfigPersistence) BackupConfig(filename string) error {
// Copy file
configData, err := os.ReadFile(configPath)
if err != nil {
- return fmt.Errorf("failed to read config file: %v", err)
+ return fmt.Errorf("failed to read config file: %w", err)
}
if err := os.WriteFile(backupPath, configData, ConfigFilePermissions); err != nil {
- return fmt.Errorf("failed to create backup: %v", err)
+ return fmt.Errorf("failed to create backup: %w", err)
}
glog.V(1).Infof("Created backup of %s as %s", filename, backupName)
@@ -221,13 +221,13 @@ func (cp *ConfigPersistence) RestoreConfig(filename, backupName string) error {
// Read backup file
backupData, err := os.ReadFile(backupPath)
if err != nil {
- return fmt.Errorf("failed to read backup file: %v", err)
+ return fmt.Errorf("failed to read backup file: %w", err)
}
// Write to config file
configPath := filepath.Join(cp.dataDir, filename)
if err := os.WriteFile(configPath, backupData, ConfigFilePermissions); err != nil {
- return fmt.Errorf("failed to restore config: %v", err)
+ return fmt.Errorf("failed to restore config: %w", err)
}
glog.V(1).Infof("Restored %s from backup %s", filename, backupName)
diff --git a/weed/admin/dash/mq_management.go b/weed/admin/dash/mq_management.go
index d47547f6b..5e513af1e 100644
--- a/weed/admin/dash/mq_management.go
+++ b/weed/admin/dash/mq_management.go
@@ -154,7 +154,7 @@ func (s *AdminServer) GetTopicDetails(namespace, topicName string) (*TopicDetail
// Find broker leader
brokerLeader, err := s.findBrokerLeader()
if err != nil {
- return nil, fmt.Errorf("failed to find broker leader: %v", err)
+ return nil, fmt.Errorf("failed to find broker leader: %w", err)
}
var topicDetails *TopicDetailsData
@@ -172,7 +172,7 @@ func (s *AdminServer) GetTopicDetails(namespace, topicName string) (*TopicDetail
},
})
if err != nil {
- return fmt.Errorf("failed to get topic configuration: %v", err)
+ return fmt.Errorf("failed to get topic configuration: %w", err)
}
// Initialize topic details
@@ -297,7 +297,7 @@ func (s *AdminServer) GetConsumerGroupOffsets(namespace, topicName string) ([]Co
if err == io.EOF {
break
}
- return fmt.Errorf("failed to receive version entries: %v", err)
+ return fmt.Errorf("failed to receive version entries: %w", err)
}
// Only process directories that are versions (start with "v")
@@ -398,7 +398,7 @@ func (s *AdminServer) GetConsumerGroupOffsets(namespace, topicName string) ([]Co
})
if err != nil {
- return nil, fmt.Errorf("failed to get consumer group offsets: %v", err)
+ return nil, fmt.Errorf("failed to get consumer group offsets: %w", err)
}
return offsets, nil
@@ -544,7 +544,7 @@ func (s *AdminServer) findBrokerLeader() (string, error) {
})
if err != nil {
- return "", fmt.Errorf("failed to list brokers: %v", err)
+ return "", fmt.Errorf("failed to list brokers: %w", err)
}
if len(brokers) == 0 {
diff --git a/weed/admin/dash/topic_retention.go b/weed/admin/dash/topic_retention.go
index 06a9e9ad6..fed4893a4 100644
--- a/weed/admin/dash/topic_retention.go
+++ b/weed/admin/dash/topic_retention.go
@@ -34,7 +34,7 @@ func (p *TopicRetentionPurger) PurgeExpiredTopicData() error {
// Get all topics with retention enabled
topics, err := p.getTopicsWithRetention()
if err != nil {
- return fmt.Errorf("failed to get topics with retention: %v", err)
+ return fmt.Errorf("failed to get topics with retention: %w", err)
}
glog.V(1).Infof("Found %d topics with retention enabled", len(topics))
@@ -67,7 +67,7 @@ func (p *TopicRetentionPurger) getTopicsWithRetention() ([]TopicRetentionConfig,
// Find broker leader to get topics
brokerLeader, err := p.adminServer.findBrokerLeader()
if err != nil {
- return nil, fmt.Errorf("failed to find broker leader: %v", err)
+ return nil, fmt.Errorf("failed to find broker leader: %w", err)
}
// Get all topics from the broker
@@ -147,7 +147,7 @@ func (p *TopicRetentionPurger) purgeTopicData(topicRetention TopicRetentionConfi
if err == io.EOF {
break
}
- return fmt.Errorf("failed to receive version entries: %v", err)
+ return fmt.Errorf("failed to receive version entries: %w", err)
}
// Only process directories that are versions (start with "v")
@@ -257,7 +257,7 @@ func (p *TopicRetentionPurger) deleteDirectoryRecursively(client filer_pb.Seawee
if err == io.EOF {
break
}
- return fmt.Errorf("failed to receive entries: %v", err)
+ return fmt.Errorf("failed to receive entries: %w", err)
}
entryPath := filepath.Join(dirPath, resp.Entry.Name)
diff --git a/weed/admin/dash/user_management.go b/weed/admin/dash/user_management.go
index 1ed0b071e..747c398d7 100644
--- a/weed/admin/dash/user_management.go
+++ b/weed/admin/dash/user_management.go
@@ -53,7 +53,7 @@ func (s *AdminServer) CreateObjectStoreUser(req CreateUserRequest) (*ObjectStore
if err == credential.ErrUserAlreadyExists {
return nil, fmt.Errorf("user %s already exists", req.Username)
}
- return nil, fmt.Errorf("failed to create user: %v", err)
+ return nil, fmt.Errorf("failed to create user: %w", err)
}
// Return created user
@@ -82,7 +82,7 @@ func (s *AdminServer) UpdateObjectStoreUser(username string, req UpdateUserReque
if err == credential.ErrUserNotFound {
return nil, fmt.Errorf("user %s not found", username)
}
- return nil, fmt.Errorf("failed to get user: %v", err)
+ return nil, fmt.Errorf("failed to get user: %w", err)
}
// Create updated identity
@@ -112,7 +112,7 @@ func (s *AdminServer) UpdateObjectStoreUser(username string, req UpdateUserReque
// Update user using credential manager
err = s.credentialManager.UpdateUser(ctx, username, updatedIdentity)
if err != nil {
- return nil, fmt.Errorf("failed to update user: %v", err)
+ return nil, fmt.Errorf("failed to update user: %w", err)
}
// Return updated user
@@ -145,7 +145,7 @@ func (s *AdminServer) DeleteObjectStoreUser(username string) error {
if err == credential.ErrUserNotFound {
return fmt.Errorf("user %s not found", username)
}
- return fmt.Errorf("failed to delete user: %v", err)
+ return fmt.Errorf("failed to delete user: %w", err)
}
return nil
@@ -165,7 +165,7 @@ func (s *AdminServer) GetObjectStoreUserDetails(username string) (*UserDetails,
if err == credential.ErrUserNotFound {
return nil, fmt.Errorf("user %s not found", username)
}
- return nil, fmt.Errorf("failed to get user: %v", err)
+ return nil, fmt.Errorf("failed to get user: %w", err)
}
details := &UserDetails{
@@ -204,7 +204,7 @@ func (s *AdminServer) CreateAccessKey(username string) (*AccessKeyInfo, error) {
if err == credential.ErrUserNotFound {
return nil, fmt.Errorf("user %s not found", username)
}
- return nil, fmt.Errorf("failed to get user: %v", err)
+ return nil, fmt.Errorf("failed to get user: %w", err)
}
// Generate new access key
@@ -219,7 +219,7 @@ func (s *AdminServer) CreateAccessKey(username string) (*AccessKeyInfo, error) {
// Create access key using credential manager
err = s.credentialManager.CreateAccessKey(ctx, username, credential)
if err != nil {
- return nil, fmt.Errorf("failed to create access key: %v", err)
+ return nil, fmt.Errorf("failed to create access key: %w", err)
}
return &AccessKeyInfo{
@@ -246,7 +246,7 @@ func (s *AdminServer) DeleteAccessKey(username, accessKeyId string) error {
if err == credential.ErrAccessKeyNotFound {
return fmt.Errorf("access key %s not found for user %s", accessKeyId, username)
}
- return fmt.Errorf("failed to delete access key: %v", err)
+ return fmt.Errorf("failed to delete access key: %w", err)
}
return nil
@@ -266,7 +266,7 @@ func (s *AdminServer) GetUserPolicies(username string) ([]string, error) {
if err == credential.ErrUserNotFound {
return nil, fmt.Errorf("user %s not found", username)
}
- return nil, fmt.Errorf("failed to get user: %v", err)
+ return nil, fmt.Errorf("failed to get user: %w", err)
}
return identity.Actions, nil
@@ -286,7 +286,7 @@ func (s *AdminServer) UpdateUserPolicies(username string, actions []string) erro
if err == credential.ErrUserNotFound {
return fmt.Errorf("user %s not found", username)
}
- return fmt.Errorf("failed to get user: %v", err)
+ return fmt.Errorf("failed to get user: %w", err)
}
// Create updated identity with new actions
@@ -300,7 +300,7 @@ func (s *AdminServer) UpdateUserPolicies(username string, actions []string) erro
// Update user using credential manager
err = s.credentialManager.UpdateUser(ctx, username, updatedIdentity)
if err != nil {
- return fmt.Errorf("failed to update user policies: %v", err)
+ return fmt.Errorf("failed to update user policies: %w", err)
}
return nil
diff --git a/weed/admin/dash/worker_grpc_server.go b/weed/admin/dash/worker_grpc_server.go
index c824cc388..36f97261a 100644
--- a/weed/admin/dash/worker_grpc_server.go
+++ b/weed/admin/dash/worker_grpc_server.go
@@ -133,7 +133,7 @@ func (s *WorkerGrpcServer) WorkerStream(stream worker_pb.WorkerService_WorkerStr
// Wait for initial registration message
msg, err := stream.Recv()
if err != nil {
- return fmt.Errorf("failed to receive registration message: %v", err)
+ return fmt.Errorf("failed to receive registration message: %w", err)
}
registration := msg.GetRegistration()
diff --git a/weed/admin/handlers/file_browser_handlers.go b/weed/admin/handlers/file_browser_handlers.go
index c644cc942..f19aa3e1b 100644
--- a/weed/admin/handlers/file_browser_handlers.go
+++ b/weed/admin/handlers/file_browser_handlers.go
@@ -307,19 +307,19 @@ func (h *FileBrowserHandlers) uploadFileToFiler(filePath string, fileHeader *mul
// Validate and sanitize the filer address
if err := h.validateFilerAddress(filerAddress); err != nil {
- return fmt.Errorf("invalid filer address: %v", err)
+ return fmt.Errorf("invalid filer address: %w", err)
}
// Validate and sanitize the file path
cleanFilePath, err := h.validateAndCleanFilePath(filePath)
if err != nil {
- return fmt.Errorf("invalid file path: %v", err)
+ return fmt.Errorf("invalid file path: %w", err)
}
// Open the file
file, err := fileHeader.Open()
if err != nil {
- return fmt.Errorf("failed to open file: %v", err)
+ return fmt.Errorf("failed to open file: %w", err)
}
defer file.Close()
@@ -330,19 +330,19 @@ func (h *FileBrowserHandlers) uploadFileToFiler(filePath string, fileHeader *mul
// Create form file field
part, err := writer.CreateFormFile("file", fileHeader.Filename)
if err != nil {
- return fmt.Errorf("failed to create form file: %v", err)
+ return fmt.Errorf("failed to create form file: %w", err)
}
// Copy file content to form
_, err = io.Copy(part, file)
if err != nil {
- return fmt.Errorf("failed to copy file content: %v", err)
+ return fmt.Errorf("failed to copy file content: %w", err)
}
// Close the writer to finalize the form
err = writer.Close()
if err != nil {
- return fmt.Errorf("failed to close multipart writer: %v", err)
+ return fmt.Errorf("failed to close multipart writer: %w", err)
}
// Create the upload URL with validated components
@@ -351,7 +351,7 @@ func (h *FileBrowserHandlers) uploadFileToFiler(filePath string, fileHeader *mul
// Create HTTP request
req, err := http.NewRequest("POST", uploadURL, &body)
if err != nil {
- return fmt.Errorf("failed to create request: %v", err)
+ return fmt.Errorf("failed to create request: %w", err)
}
// Set content type with boundary
@@ -361,7 +361,7 @@ func (h *FileBrowserHandlers) uploadFileToFiler(filePath string, fileHeader *mul
client := &http.Client{Timeout: 60 * time.Second} // Increased timeout for larger files
resp, err := client.Do(req)
if err != nil {
- return fmt.Errorf("failed to upload file: %v", err)
+ return fmt.Errorf("failed to upload file: %w", err)
}
defer resp.Body.Close()
@@ -383,7 +383,7 @@ func (h *FileBrowserHandlers) validateFilerAddress(address string) error {
// Parse the address to validate it's a proper host:port format
host, port, err := net.SplitHostPort(address)
if err != nil {
- return fmt.Errorf("invalid address format: %v", err)
+ return fmt.Errorf("invalid address format: %w", err)
}
// Validate host is not empty
@@ -398,7 +398,7 @@ func (h *FileBrowserHandlers) validateFilerAddress(address string) error {
portNum, err := strconv.Atoi(port)
if err != nil {
- return fmt.Errorf("invalid port number: %v", err)
+ return fmt.Errorf("invalid port number: %w", err)
}
if portNum < 1 || portNum > 65535 {
diff --git a/weed/admin/maintenance/maintenance_manager.go b/weed/admin/maintenance/maintenance_manager.go
index 17d1eef6d..5d87d817e 100644
--- a/weed/admin/maintenance/maintenance_manager.go
+++ b/weed/admin/maintenance/maintenance_manager.go
@@ -53,7 +53,7 @@ func (mm *MaintenanceManager) Start() error {
// Validate configuration durations to prevent ticker panics
if err := mm.validateConfig(); err != nil {
- return fmt.Errorf("invalid maintenance configuration: %v", err)
+ return fmt.Errorf("invalid maintenance configuration: %w", err)
}
mm.running = true
diff --git a/weed/admin/maintenance/maintenance_scanner.go b/weed/admin/maintenance/maintenance_scanner.go
index 4d7cda125..271765ef8 100644
--- a/weed/admin/maintenance/maintenance_scanner.go
+++ b/weed/admin/maintenance/maintenance_scanner.go
@@ -35,7 +35,7 @@ func (ms *MaintenanceScanner) ScanForMaintenanceTasks() ([]*TaskDetectionResult,
// Get volume health metrics
volumeMetrics, err := ms.getVolumeHealthMetrics()
if err != nil {
- return nil, fmt.Errorf("failed to get volume health metrics: %v", err)
+ return nil, fmt.Errorf("failed to get volume health metrics: %w", err)
}
// Use task system for all task types
diff --git a/weed/admin/maintenance/maintenance_worker.go b/weed/admin/maintenance/maintenance_worker.go
index 8a87a8403..ab2157f24 100644
--- a/weed/admin/maintenance/maintenance_worker.go
+++ b/weed/admin/maintenance/maintenance_worker.go
@@ -159,7 +159,7 @@ func (mws *MaintenanceWorkerService) executeGenericTask(task *MaintenanceTask) e
// Create task instance using the registry
taskInstance, err := mws.taskRegistry.CreateTask(taskType, taskParams)
if err != nil {
- return fmt.Errorf("failed to create task instance: %v", err)
+ return fmt.Errorf("failed to create task instance: %w", err)
}
// Update progress to show task has started
@@ -168,7 +168,7 @@ func (mws *MaintenanceWorkerService) executeGenericTask(task *MaintenanceTask) e
// Execute the task
err = taskInstance.Execute(taskParams)
if err != nil {
- return fmt.Errorf("task execution failed: %v", err)
+ return fmt.Errorf("task execution failed: %w", err)
}
// Update progress to show completion
@@ -405,7 +405,7 @@ func (mwc *MaintenanceWorkerCommand) Run() error {
// Start the worker service
err := mwc.workerService.Start()
if err != nil {
- return fmt.Errorf("failed to start maintenance worker: %v", err)
+ return fmt.Errorf("failed to start maintenance worker: %w", err)
}
// Wait for interrupt signal
diff --git a/weed/command/admin.go b/weed/command/admin.go
index 31d4ed087..6ac42330c 100644
--- a/weed/command/admin.go
+++ b/weed/command/admin.go
@@ -186,7 +186,7 @@ func startAdminServer(ctx context.Context, options AdminOptions) error {
sessionKeyBytes := make([]byte, 32)
_, err := rand.Read(sessionKeyBytes)
if err != nil {
- return fmt.Errorf("failed to generate session key: %v", err)
+ return fmt.Errorf("failed to generate session key: %w", err)
}
store := cookie.NewStore(sessionKeyBytes)
r.Use(sessions.Sessions("admin-session", store))
@@ -234,7 +234,7 @@ func startAdminServer(ctx context.Context, options AdminOptions) error {
// Start worker gRPC server for worker connections
err = adminServer.StartWorkerGrpcServer(*options.port)
if err != nil {
- return fmt.Errorf("failed to start worker gRPC server: %v", err)
+ return fmt.Errorf("failed to start worker gRPC server: %w", err)
}
// Set up cleanup for gRPC server
@@ -304,7 +304,7 @@ func startAdminServer(ctx context.Context, options AdminOptions) error {
defer cancel()
if err := server.Shutdown(shutdownCtx); err != nil {
- return fmt.Errorf("admin server forced to shutdown: %v", err)
+ return fmt.Errorf("admin server forced to shutdown: %w", err)
}
return nil
@@ -328,7 +328,7 @@ func expandHomeDir(path string) (string, error) {
// Get current user
currentUser, err := user.Current()
if err != nil {
- return "", fmt.Errorf("failed to get current user: %v", err)
+ return "", fmt.Errorf("failed to get current user: %w", err)
}
// Handle different tilde patterns
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index 547e46c4f..38e4eb7b9 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -268,7 +268,7 @@ func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error {
}
if shouldCopy, err := worker.checkExistingFileFirst(task, f); err != nil {
- return fmt.Errorf("check existing file: %v", err)
+ return fmt.Errorf("check existing file: %w", err)
} else if !shouldCopy {
if *worker.options.verbose {
fmt.Printf("skipping copied file: %v\n", f.Name())
@@ -395,7 +395,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
}
if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil {
- return fmt.Errorf("update fh: %v", err)
+ return fmt.Errorf("update fh: %w", err)
}
return nil
}); err != nil {
@@ -428,7 +428,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
uploader, err := operation.NewUploader()
if err != nil {
- uploadError = fmt.Errorf("upload data %v: %v\n", fileName, err)
+ uploadError = fmt.Errorf("upload data %v: %w\n", fileName, err)
return
}
@@ -456,7 +456,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
)
if err != nil {
- uploadError = fmt.Errorf("upload data %v: %v\n", fileName, err)
+ uploadError = fmt.Errorf("upload data %v: %w\n", fileName, err)
return
}
if uploadResult.Error != "" {
@@ -489,7 +489,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
manifestedChunks, manifestErr := filer.MaybeManifestize(worker.saveDataAsChunk, chunks)
if manifestErr != nil {
- return fmt.Errorf("create manifest: %v", manifestErr)
+ return fmt.Errorf("create manifest: %w", manifestErr)
}
if err := pb.WithGrpcFilerClient(false, worker.signature, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
@@ -512,7 +512,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
}
if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil {
- return fmt.Errorf("update fh: %v", err)
+ return fmt.Errorf("update fh: %w", err)
}
return nil
}); err != nil {
@@ -546,7 +546,7 @@ func detectMimeType(f *os.File) string {
func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) {
uploader, uploaderErr := operation.NewUploader()
if uploaderErr != nil {
- return nil, fmt.Errorf("upload data: %v", uploaderErr)
+ return nil, fmt.Errorf("upload data: %w", uploaderErr)
}
finalFileId, uploadResult, flushErr, _ := uploader.UploadWithRetry(
@@ -573,7 +573,7 @@ func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, off
)
if flushErr != nil {
- return nil, fmt.Errorf("upload data: %v", flushErr)
+ return nil, fmt.Errorf("upload data: %w", flushErr)
}
if uploadResult.Error != "" {
return nil, fmt.Errorf("upload result: %v", uploadResult.Error)
diff --git a/weed/command/filer_meta_backup.go b/weed/command/filer_meta_backup.go
index e8c4680ba..f77f758ab 100644
--- a/weed/command/filer_meta_backup.go
+++ b/weed/command/filer_meta_backup.go
@@ -133,14 +133,14 @@ func (metaBackup *FilerMetaBackupOptions) traverseMetadata() (err error) {
println("+", parentPath.Child(entry.Name))
if err := metaBackup.store.InsertEntry(context.Background(), filer.FromPbEntry(string(parentPath), entry)); err != nil {
- saveErr = fmt.Errorf("insert entry error: %v\n", err)
+ saveErr = fmt.Errorf("insert entry error: %w\n", err)
return
}
})
if traverseErr != nil {
- return fmt.Errorf("traverse: %v", traverseErr)
+ return fmt.Errorf("traverse: %w", traverseErr)
}
return saveErr
}
diff --git a/weed/command/filer_remote_gateway_buckets.go b/weed/command/filer_remote_gateway_buckets.go
index aa3869171..5c7e0ae21 100644
--- a/weed/command/filer_remote_gateway_buckets.go
+++ b/weed/command/filer_remote_gateway_buckets.go
@@ -23,7 +23,7 @@ func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSo
// read filer remote storage mount mappings
if detectErr := option.collectRemoteStorageConf(); detectErr != nil {
- return fmt.Errorf("read mount info: %v", detectErr)
+ return fmt.Errorf("read mount info: %w", detectErr)
}
eachEntryFunc, err := option.makeBucketedEventProcessor(filerSource)
@@ -168,7 +168,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {
newMappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)
if readErr != nil {
- return fmt.Errorf("unmarshal mappings: %v", readErr)
+ return fmt.Errorf("unmarshal mappings: %w", readErr)
}
option.mappings = newMappings
}
diff --git a/weed/command/filer_remote_sync_dir.go b/weed/command/filer_remote_sync_dir.go
index c85ba03af..5011ca36e 100644
--- a/weed/command/filer_remote_sync_dir.go
+++ b/weed/command/filer_remote_sync_dir.go
@@ -25,7 +25,7 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
// read filer remote storage mount mappings
_, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir)
if detectErr != nil {
- return fmt.Errorf("read mount info: %v", detectErr)
+ return fmt.Errorf("read mount info: %w", detectErr)
}
eachEntryFunc, err := option.makeEventProcessor(remoteStorage, mountedDir, remoteStorageMountLocation, filerSource)
@@ -99,7 +99,7 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem
if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {
mappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)
if readErr != nil {
- return fmt.Errorf("unmarshal mappings: %v", readErr)
+ return fmt.Errorf("unmarshal mappings: %w", readErr)
}
if remoteLoc, found := mappings.Mappings[mountedDir]; found {
if remoteStorageMountLocation.Bucket != remoteLoc.Bucket || remoteStorageMountLocation.Path != remoteLoc.Path {
diff --git a/weed/command/fix.go b/weed/command/fix.go
index e0cbc58a4..2b7b425f3 100644
--- a/weed/command/fix.go
+++ b/weed/command/fix.go
@@ -170,7 +170,7 @@ func doFixOneVolume(basepath string, baseFileName string, collection string, vol
}
if err := storage.ScanVolumeFile(basepath, collection, vid, storage.NeedleMapInMemory, scanner); err != nil {
- err := fmt.Errorf("scan .dat File: %v", err)
+ err := fmt.Errorf("scan .dat File: %w", err)
if *fixIgnoreError {
glog.Error(err)
} else {
@@ -179,7 +179,7 @@ func doFixOneVolume(basepath string, baseFileName string, collection string, vol
}
if err := SaveToIdx(scanner, indexFileName); err != nil {
- err := fmt.Errorf("save to .idx File: %v", err)
+ err := fmt.Errorf("save to .idx File: %w", err)
if *fixIgnoreError {
glog.Error(err)
} else {
diff --git a/weed/command/master_follower.go b/weed/command/master_follower.go
index 43707946d..55b046092 100644
--- a/weed/command/master_follower.go
+++ b/weed/command/master_follower.go
@@ -92,7 +92,7 @@ func startMasterFollower(masterOptions MasterOptions) {
err = pb.WithOneOfGrpcMasterClients(false, masters, grpcDialOption, func(client master_pb.SeaweedClient) error {
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
if err != nil {
- return fmt.Errorf("get master grpc address %v configuration: %v", masters, err)
+ return fmt.Errorf("get master grpc address %v configuration: %w", masters, err)
}
masterOptions.defaultReplication = &resp.DefaultReplication
masterOptions.volumeSizeLimitMB = aws.Uint(uint(resp.VolumeSizeLimitMB))
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 677dca793..588d38ce4 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -78,7 +78,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
err = pb.WithOneOfGrpcFilerClients(false, filerAddresses, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
- return fmt.Errorf("get filer grpc address %v configuration: %v", filerAddresses, err)
+ return fmt.Errorf("get filer grpc address %v configuration: %w", filerAddresses, err)
}
cipher = resp.Cipher
return nil
diff --git a/weed/credential/filer_etc/filer_etc_identity.go b/weed/credential/filer_etc/filer_etc_identity.go
index 103c988ff..f57c7c3ac 100644
--- a/weed/credential/filer_etc/filer_etc_identity.go
+++ b/weed/credential/filer_etc/filer_etc_identity.go
@@ -34,7 +34,7 @@ func (store *FilerEtcStore) SaveConfiguration(ctx context.Context, config *iam_p
return store.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
var buf bytes.Buffer
if err := filer.ProtoToText(&buf, config); err != nil {
- return fmt.Errorf("failed to marshal configuration: %v", err)
+ return fmt.Errorf("failed to marshal configuration: %w", err)
}
return filer.SaveInsideFiler(client, filer.IamConfigDirectory, filer.IamIdentityFile, buf.Bytes())
})
@@ -44,7 +44,7 @@ func (store *FilerEtcStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
// Load existing configuration
config, err := store.LoadConfiguration(ctx)
if err != nil {
- return fmt.Errorf("failed to load configuration: %v", err)
+ return fmt.Errorf("failed to load configuration: %w", err)
}
// Check if user already exists
@@ -64,7 +64,7 @@ func (store *FilerEtcStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
func (store *FilerEtcStore) GetUser(ctx context.Context, username string) (*iam_pb.Identity, error) {
config, err := store.LoadConfiguration(ctx)
if err != nil {
- return nil, fmt.Errorf("failed to load configuration: %v", err)
+ return nil, fmt.Errorf("failed to load configuration: %w", err)
}
for _, identity := range config.Identities {
@@ -79,7 +79,7 @@ func (store *FilerEtcStore) GetUser(ctx context.Context, username string) (*iam_
func (store *FilerEtcStore) UpdateUser(ctx context.Context, username string, identity *iam_pb.Identity) error {
config, err := store.LoadConfiguration(ctx)
if err != nil {
- return fmt.Errorf("failed to load configuration: %v", err)
+ return fmt.Errorf("failed to load configuration: %w", err)
}
// Find and update the user
@@ -96,7 +96,7 @@ func (store *FilerEtcStore) UpdateUser(ctx context.Context, username string, ide
func (store *FilerEtcStore) DeleteUser(ctx context.Context, username string) error {
config, err := store.LoadConfiguration(ctx)
if err != nil {
- return fmt.Errorf("failed to load configuration: %v", err)
+ return fmt.Errorf("failed to load configuration: %w", err)
}
// Find and remove the user
@@ -113,7 +113,7 @@ func (store *FilerEtcStore) DeleteUser(ctx context.Context, username string) err
func (store *FilerEtcStore) ListUsers(ctx context.Context) ([]string, error) {
config, err := store.LoadConfiguration(ctx)
if err != nil {
- return nil, fmt.Errorf("failed to load configuration: %v", err)
+ return nil, fmt.Errorf("failed to load configuration: %w", err)
}
var usernames []string
@@ -127,7 +127,7 @@ func (store *FilerEtcStore) ListUsers(ctx context.Context) ([]string, error) {
func (store *FilerEtcStore) GetUserByAccessKey(ctx context.Context, accessKey string) (*iam_pb.Identity, error) {
config, err := store.LoadConfiguration(ctx)
if err != nil {
- return nil, fmt.Errorf("failed to load configuration: %v", err)
+ return nil, fmt.Errorf("failed to load configuration: %w", err)
}
for _, identity := range config.Identities {
@@ -144,7 +144,7 @@ func (store *FilerEtcStore) GetUserByAccessKey(ctx context.Context, accessKey st
func (store *FilerEtcStore) CreateAccessKey(ctx context.Context, username string, cred *iam_pb.Credential) error {
config, err := store.LoadConfiguration(ctx)
if err != nil {
- return fmt.Errorf("failed to load configuration: %v", err)
+ return fmt.Errorf("failed to load configuration: %w", err)
}
// Find the user and add the credential
@@ -168,7 +168,7 @@ func (store *FilerEtcStore) CreateAccessKey(ctx context.Context, username string
func (store *FilerEtcStore) DeleteAccessKey(ctx context.Context, username string, accessKey string) error {
config, err := store.LoadConfiguration(ctx)
if err != nil {
- return fmt.Errorf("failed to load configuration: %v", err)
+ return fmt.Errorf("failed to load configuration: %w", err)
}
// Find the user and remove the credential
diff --git a/weed/credential/migration.go b/weed/credential/migration.go
index b286bce62..41d0e3840 100644
--- a/weed/credential/migration.go
+++ b/weed/credential/migration.go
@@ -31,7 +31,7 @@ func MigrateCredentials(fromStoreName, toStoreName CredentialStoreTypeName, conf
glog.Infof("Loading configuration from %s store...", fromStoreName)
config, err := fromCM.LoadConfiguration(ctx)
if err != nil {
- return fmt.Errorf("failed to load configuration from source store: %v", err)
+ return fmt.Errorf("failed to load configuration from source store: %w", err)
}
if config == nil || len(config.Identities) == 0 {
@@ -94,7 +94,7 @@ func ExportCredentials(storeName CredentialStoreTypeName, configuration util.Con
// Load configuration
config, err := cm.LoadConfiguration(ctx)
if err != nil {
- return nil, fmt.Errorf("failed to load configuration: %v", err)
+ return nil, fmt.Errorf("failed to load configuration: %w", err)
}
return config, nil
@@ -164,7 +164,7 @@ func ValidateCredentials(storeName CredentialStoreTypeName, configuration util.C
// Load configuration
config, err := cm.LoadConfiguration(ctx)
if err != nil {
- return fmt.Errorf("failed to load configuration: %v", err)
+ return fmt.Errorf("failed to load configuration: %w", err)
}
if config == nil || len(config.Identities) == 0 {
diff --git a/weed/credential/postgres/postgres_identity.go b/weed/credential/postgres/postgres_identity.go
index ea3627c50..11908b0d8 100644
--- a/weed/credential/postgres/postgres_identity.go
+++ b/weed/credential/postgres/postgres_identity.go
@@ -20,7 +20,7 @@ func (store *PostgresStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3Ap
// Query all users
rows, err := store.db.QueryContext(ctx, "SELECT username, email, account_data, actions FROM users")
if err != nil {
- return nil, fmt.Errorf("failed to query users: %v", err)
+ return nil, fmt.Errorf("failed to query users: %w", err)
}
defer rows.Close()
@@ -29,7 +29,7 @@ func (store *PostgresStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3Ap
var accountDataJSON, actionsJSON []byte
if err := rows.Scan(&username, &email, &accountDataJSON, &actionsJSON); err != nil {
- return nil, fmt.Errorf("failed to scan user row: %v", err)
+ return nil, fmt.Errorf("failed to scan user row: %w", err)
}
identity := &iam_pb.Identity{
@@ -84,16 +84,16 @@ func (store *PostgresStore) SaveConfiguration(ctx context.Context, config *iam_p
// Start transaction
tx, err := store.db.BeginTx(ctx, nil)
if err != nil {
- return fmt.Errorf("failed to begin transaction: %v", err)
+ return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
// Clear existing data
if _, err := tx.ExecContext(ctx, "DELETE FROM credentials"); err != nil {
- return fmt.Errorf("failed to clear credentials: %v", err)
+ return fmt.Errorf("failed to clear credentials: %w", err)
}
if _, err := tx.ExecContext(ctx, "DELETE FROM users"); err != nil {
- return fmt.Errorf("failed to clear users: %v", err)
+ return fmt.Errorf("failed to clear users: %w", err)
}
// Insert all identities
@@ -147,7 +147,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
var count int
err := store.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", identity.Name).Scan(&count)
if err != nil {
- return fmt.Errorf("failed to check user existence: %v", err)
+ return fmt.Errorf("failed to check user existence: %w", err)
}
if count > 0 {
return credential.ErrUserAlreadyExists
@@ -156,7 +156,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
// Start transaction
tx, err := store.db.BeginTx(ctx, nil)
if err != nil {
- return fmt.Errorf("failed to begin transaction: %v", err)
+ return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
@@ -165,7 +165,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
if identity.Account != nil {
accountDataJSON, err = json.Marshal(identity.Account)
if err != nil {
- return fmt.Errorf("failed to marshal account data: %v", err)
+ return fmt.Errorf("failed to marshal account data: %w", err)
}
}
@@ -174,7 +174,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
if identity.Actions != nil {
actionsJSON, err = json.Marshal(identity.Actions)
if err != nil {
- return fmt.Errorf("failed to marshal actions: %v", err)
+ return fmt.Errorf("failed to marshal actions: %w", err)
}
}
@@ -183,7 +183,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
"INSERT INTO users (username, email, account_data, actions) VALUES ($1, $2, $3, $4)",
identity.Name, "", accountDataJSON, actionsJSON)
if err != nil {
- return fmt.Errorf("failed to insert user: %v", err)
+ return fmt.Errorf("failed to insert user: %w", err)
}
// Insert credentials
@@ -192,7 +192,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
"INSERT INTO credentials (username, access_key, secret_key) VALUES ($1, $2, $3)",
identity.Name, cred.AccessKey, cred.SecretKey)
if err != nil {
- return fmt.Errorf("failed to insert credential: %v", err)
+ return fmt.Errorf("failed to insert credential: %w", err)
}
}
@@ -214,7 +214,7 @@ func (store *PostgresStore) GetUser(ctx context.Context, username string) (*iam_
if err == sql.ErrNoRows {
return nil, credential.ErrUserNotFound
}
- return nil, fmt.Errorf("failed to query user: %v", err)
+ return nil, fmt.Errorf("failed to query user: %w", err)
}
identity := &iam_pb.Identity{
@@ -224,28 +224,28 @@ func (store *PostgresStore) GetUser(ctx context.Context, username string) (*iam_
// Parse account data
if len(accountDataJSON) > 0 {
if err := json.Unmarshal(accountDataJSON, &identity.Account); err != nil {
- return nil, fmt.Errorf("failed to unmarshal account data: %v", err)
+ return nil, fmt.Errorf("failed to unmarshal account data: %w", err)
}
}
// Parse actions
if len(actionsJSON) > 0 {
if err := json.Unmarshal(actionsJSON, &identity.Actions); err != nil {
- return nil, fmt.Errorf("failed to unmarshal actions: %v", err)
+ return nil, fmt.Errorf("failed to unmarshal actions: %w", err)
}
}
// Query credentials
rows, err := store.db.QueryContext(ctx, "SELECT access_key, secret_key FROM credentials WHERE username = $1", username)
if err != nil {
- return nil, fmt.Errorf("failed to query credentials: %v", err)
+ return nil, fmt.Errorf("failed to query credentials: %w", err)
}
defer rows.Close()
for rows.Next() {
var accessKey, secretKey string
if err := rows.Scan(&accessKey, &secretKey); err != nil {
- return nil, fmt.Errorf("failed to scan credential: %v", err)
+ return nil, fmt.Errorf("failed to scan credential: %w", err)
}
identity.Credentials = append(identity.Credentials, &iam_pb.Credential{
@@ -265,7 +265,7 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
// Start transaction
tx, err := store.db.BeginTx(ctx, nil)
if err != nil {
- return fmt.Errorf("failed to begin transaction: %v", err)
+ return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
@@ -273,7 +273,7 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
var count int
err = tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", username).Scan(&count)
if err != nil {
- return fmt.Errorf("failed to check user existence: %v", err)
+ return fmt.Errorf("failed to check user existence: %w", err)
}
if count == 0 {
return credential.ErrUserNotFound
@@ -284,7 +284,7 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
if identity.Account != nil {
accountDataJSON, err = json.Marshal(identity.Account)
if err != nil {
- return fmt.Errorf("failed to marshal account data: %v", err)
+ return fmt.Errorf("failed to marshal account data: %w", err)
}
}
@@ -293,7 +293,7 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
if identity.Actions != nil {
actionsJSON, err = json.Marshal(identity.Actions)
if err != nil {
- return fmt.Errorf("failed to marshal actions: %v", err)
+ return fmt.Errorf("failed to marshal actions: %w", err)
}
}
@@ -302,13 +302,13 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
"UPDATE users SET email = $2, account_data = $3, actions = $4, updated_at = CURRENT_TIMESTAMP WHERE username = $1",
username, "", accountDataJSON, actionsJSON)
if err != nil {
- return fmt.Errorf("failed to update user: %v", err)
+ return fmt.Errorf("failed to update user: %w", err)
}
// Delete existing credentials
_, err = tx.ExecContext(ctx, "DELETE FROM credentials WHERE username = $1", username)
if err != nil {
- return fmt.Errorf("failed to delete existing credentials: %v", err)
+ return fmt.Errorf("failed to delete existing credentials: %w", err)
}
// Insert new credentials
@@ -317,7 +317,7 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
"INSERT INTO credentials (username, access_key, secret_key) VALUES ($1, $2, $3)",
username, cred.AccessKey, cred.SecretKey)
if err != nil {
- return fmt.Errorf("failed to insert credential: %v", err)
+ return fmt.Errorf("failed to insert credential: %w", err)
}
}
@@ -331,12 +331,12 @@ func (store *PostgresStore) DeleteUser(ctx context.Context, username string) err
result, err := store.db.ExecContext(ctx, "DELETE FROM users WHERE username = $1", username)
if err != nil {
- return fmt.Errorf("failed to delete user: %v", err)
+ return fmt.Errorf("failed to delete user: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
- return fmt.Errorf("failed to get rows affected: %v", err)
+ return fmt.Errorf("failed to get rows affected: %w", err)
}
if rowsAffected == 0 {
@@ -353,7 +353,7 @@ func (store *PostgresStore) ListUsers(ctx context.Context) ([]string, error) {
rows, err := store.db.QueryContext(ctx, "SELECT username FROM users ORDER BY username")
if err != nil {
- return nil, fmt.Errorf("failed to query users: %v", err)
+ return nil, fmt.Errorf("failed to query users: %w", err)
}
defer rows.Close()
@@ -361,7 +361,7 @@ func (store *PostgresStore) ListUsers(ctx context.Context) ([]string, error) {
for rows.Next() {
var username string
if err := rows.Scan(&username); err != nil {
- return nil, fmt.Errorf("failed to scan username: %v", err)
+ return nil, fmt.Errorf("failed to scan username: %w", err)
}
usernames = append(usernames, username)
}
@@ -380,7 +380,7 @@ func (store *PostgresStore) GetUserByAccessKey(ctx context.Context, accessKey st
if err == sql.ErrNoRows {
return nil, credential.ErrAccessKeyNotFound
}
- return nil, fmt.Errorf("failed to query access key: %v", err)
+ return nil, fmt.Errorf("failed to query access key: %w", err)
}
return store.GetUser(ctx, username)
@@ -395,7 +395,7 @@ func (store *PostgresStore) CreateAccessKey(ctx context.Context, username string
var count int
err := store.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", username).Scan(&count)
if err != nil {
- return fmt.Errorf("failed to check user existence: %v", err)
+ return fmt.Errorf("failed to check user existence: %w", err)
}
if count == 0 {
return credential.ErrUserNotFound
@@ -406,7 +406,7 @@ func (store *PostgresStore) CreateAccessKey(ctx context.Context, username string
"INSERT INTO credentials (username, access_key, secret_key) VALUES ($1, $2, $3)",
username, cred.AccessKey, cred.SecretKey)
if err != nil {
- return fmt.Errorf("failed to insert credential: %v", err)
+ return fmt.Errorf("failed to insert credential: %w", err)
}
return nil
@@ -421,12 +421,12 @@ func (store *PostgresStore) DeleteAccessKey(ctx context.Context, username string
"DELETE FROM credentials WHERE username = $1 AND access_key = $2",
username, accessKey)
if err != nil {
- return fmt.Errorf("failed to delete access key: %v", err)
+ return fmt.Errorf("failed to delete access key: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
- return fmt.Errorf("failed to get rows affected: %v", err)
+ return fmt.Errorf("failed to get rows affected: %w", err)
}
if rowsAffected == 0 {
@@ -434,7 +434,7 @@ func (store *PostgresStore) DeleteAccessKey(ctx context.Context, username string
var count int
err = store.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", username).Scan(&count)
if err != nil {
- return fmt.Errorf("failed to check user existence: %v", err)
+ return fmt.Errorf("failed to check user existence: %w", err)
}
if count == 0 {
return credential.ErrUserNotFound
diff --git a/weed/credential/postgres/postgres_policy.go b/weed/credential/postgres/postgres_policy.go
index 4e50e0771..061646f7f 100644
--- a/weed/credential/postgres/postgres_policy.go
+++ b/weed/credential/postgres/postgres_policy.go
@@ -18,7 +18,7 @@ func (store *PostgresStore) GetPolicies(ctx context.Context) (map[string]policy_
rows, err := store.db.QueryContext(ctx, "SELECT name, document FROM policies")
if err != nil {
- return nil, fmt.Errorf("failed to query policies: %v", err)
+ return nil, fmt.Errorf("failed to query policies: %w", err)
}
defer rows.Close()
@@ -27,7 +27,7 @@ func (store *PostgresStore) GetPolicies(ctx context.Context) (map[string]policy_
var documentJSON []byte
if err := rows.Scan(&name, &documentJSON); err != nil {
- return nil, fmt.Errorf("failed to scan policy row: %v", err)
+ return nil, fmt.Errorf("failed to scan policy row: %w", err)
}
var document policy_engine.PolicyDocument
@@ -49,14 +49,14 @@ func (store *PostgresStore) CreatePolicy(ctx context.Context, name string, docum
documentJSON, err := json.Marshal(document)
if err != nil {
- return fmt.Errorf("failed to marshal policy document: %v", err)
+ return fmt.Errorf("failed to marshal policy document: %w", err)
}
_, err = store.db.ExecContext(ctx,
"INSERT INTO policies (name, document) VALUES ($1, $2) ON CONFLICT (name) DO UPDATE SET document = $2, updated_at = CURRENT_TIMESTAMP",
name, documentJSON)
if err != nil {
- return fmt.Errorf("failed to insert policy: %v", err)
+ return fmt.Errorf("failed to insert policy: %w", err)
}
return nil
@@ -70,19 +70,19 @@ func (store *PostgresStore) UpdatePolicy(ctx context.Context, name string, docum
documentJSON, err := json.Marshal(document)
if err != nil {
- return fmt.Errorf("failed to marshal policy document: %v", err)
+ return fmt.Errorf("failed to marshal policy document: %w", err)
}
result, err := store.db.ExecContext(ctx,
"UPDATE policies SET document = $2, updated_at = CURRENT_TIMESTAMP WHERE name = $1",
name, documentJSON)
if err != nil {
- return fmt.Errorf("failed to update policy: %v", err)
+ return fmt.Errorf("failed to update policy: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
- return fmt.Errorf("failed to get rows affected: %v", err)
+ return fmt.Errorf("failed to get rows affected: %w", err)
}
if rowsAffected == 0 {
@@ -100,12 +100,12 @@ func (store *PostgresStore) DeletePolicy(ctx context.Context, name string) error
result, err := store.db.ExecContext(ctx, "DELETE FROM policies WHERE name = $1", name)
if err != nil {
- return fmt.Errorf("failed to delete policy: %v", err)
+ return fmt.Errorf("failed to delete policy: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
- return fmt.Errorf("failed to get rows affected: %v", err)
+ return fmt.Errorf("failed to get rows affected: %w", err)
}
if rowsAffected == 0 {
diff --git a/weed/credential/postgres/postgres_store.go b/weed/credential/postgres/postgres_store.go
index 40d200668..c5fa6e727 100644
--- a/weed/credential/postgres/postgres_store.go
+++ b/weed/credential/postgres/postgres_store.go
@@ -58,13 +58,13 @@ func (store *PostgresStore) Initialize(configuration util.Configuration, prefix
db, err := sql.Open("postgres", connStr)
if err != nil {
- return fmt.Errorf("failed to open database: %v", err)
+ return fmt.Errorf("failed to open database: %w", err)
}
// Test connection
if err := db.Ping(); err != nil {
db.Close()
- return fmt.Errorf("failed to ping database: %v", err)
+ return fmt.Errorf("failed to ping database: %w", err)
}
// Set connection pool settings
@@ -77,7 +77,7 @@ func (store *PostgresStore) Initialize(configuration util.Configuration, prefix
// Create tables if they don't exist
if err := store.createTables(); err != nil {
db.Close()
- return fmt.Errorf("failed to create tables: %v", err)
+ return fmt.Errorf("failed to create tables: %w", err)
}
store.configured = true
@@ -124,15 +124,15 @@ func (store *PostgresStore) createTables() error {
// Execute table creation
if _, err := store.db.Exec(usersTable); err != nil {
- return fmt.Errorf("failed to create users table: %v", err)
+ return fmt.Errorf("failed to create users table: %w", err)
}
if _, err := store.db.Exec(credentialsTable); err != nil {
- return fmt.Errorf("failed to create credentials table: %v", err)
+ return fmt.Errorf("failed to create credentials table: %w", err)
}
if _, err := store.db.Exec(policiesTable); err != nil {
- return fmt.Errorf("failed to create policies table: %v", err)
+ return fmt.Errorf("failed to create policies table: %w", err)
}
return nil
diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go
index afc8474b7..ad9e6ab41 100644
--- a/weed/filer/abstract_sql/abstract_sql_store_kv.go
+++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go
@@ -15,7 +15,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
db, _, _, err := store.getTxOrDB(ctx, "", false)
if err != nil {
- return fmt.Errorf("findDB: %v", err)
+ return fmt.Errorf("findDB: %w", err)
}
dirStr, dirHash, name := GenDirAndName(key)
@@ -50,7 +50,7 @@ func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []b
db, _, _, err := store.getTxOrDB(ctx, "", false)
if err != nil {
- return nil, fmt.Errorf("findDB: %v", err)
+ return nil, fmt.Errorf("findDB: %w", err)
}
dirStr, dirHash, name := GenDirAndName(key)
@@ -63,7 +63,7 @@ func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []b
}
if err != nil {
- return nil, fmt.Errorf("kv get: %v", err)
+ return nil, fmt.Errorf("kv get: %w", err)
}
return
@@ -73,7 +73,7 @@ func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err er
db, _, _, err := store.getTxOrDB(ctx, "", false)
if err != nil {
- return fmt.Errorf("findDB: %v", err)
+ return fmt.Errorf("findDB: %w", err)
}
dirStr, dirHash, name := GenDirAndName(key)
diff --git a/weed/filer/arangodb/arangodb_store_kv.go b/weed/filer/arangodb/arangodb_store_kv.go
index 8d50c2b2c..ae768c7fb 100644
--- a/weed/filer/arangodb/arangodb_store_kv.go
+++ b/weed/filer/arangodb/arangodb_store_kv.go
@@ -18,7 +18,7 @@ func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte)
exists, err := store.kvCollection.DocumentExists(ctx, model.Key)
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
if exists {
_, err = store.kvCollection.UpdateDocument(ctx, model.Key, model)
@@ -26,7 +26,7 @@ func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte)
_, err = store.kvCollection.CreateDocument(ctx, model)
}
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
diff --git a/weed/filer/cassandra/cassandra_store_kv.go b/weed/filer/cassandra/cassandra_store_kv.go
index 79ce3d2a0..eea59ffbd 100644
--- a/weed/filer/cassandra/cassandra_store_kv.go
+++ b/weed/filer/cassandra/cassandra_store_kv.go
@@ -44,7 +44,7 @@ func (store *CassandraStore) KvDelete(ctx context.Context, key []byte) (err erro
if err := store.session.Query(
"DELETE FROM filemeta WHERE directory=? AND name=?",
dir, name).Exec(); err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/cassandra2/cassandra_store_kv.go b/weed/filer/cassandra2/cassandra_store_kv.go
index 3b8c3d51a..e11193b8b 100644
--- a/weed/filer/cassandra2/cassandra_store_kv.go
+++ b/weed/filer/cassandra2/cassandra_store_kv.go
@@ -45,7 +45,7 @@ func (store *Cassandra2Store) KvDelete(ctx context.Context, key []byte) (err err
if err := store.session.Query(
"DELETE FROM filemeta WHERE dirhash=? AND directory=? AND name=?",
util.HashStringToLong(dir), dir, name).Exec(); err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go
index 0bfe04c90..5b88025e4 100644
--- a/weed/filer/elastic/v7/elastic_store.go
+++ b/weed/filer/elastic/v7/elastic_store.go
@@ -78,7 +78,7 @@ func (store *ElasticStore) initialize(options []elastic.ClientOptionFunc) (err e
ctx := context.Background()
store.client, err = elastic.NewClient(options...)
if err != nil {
- return fmt.Errorf("init elastic %v", err)
+ return fmt.Errorf("init elastic %w", err)
}
if ok, err := store.client.IndexExists(indexKV).Do(ctx); err == nil && !ok {
_, err = store.client.CreateIndex(indexKV).Body(kvMappings).Do(ctx)
@@ -114,7 +114,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
value, err := jsoniter.Marshal(esEntry)
if err != nil {
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
- return fmt.Errorf("insert entry marshal %v", err)
+ return fmt.Errorf("insert entry marshal %w", err)
}
_, err = store.client.Index().
Index(index).
@@ -124,7 +124,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
Do(ctx)
if err != nil {
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
- return fmt.Errorf("insert entry %v", err)
+ return fmt.Errorf("insert entry %w", err)
}
return nil
}
@@ -194,7 +194,7 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
}
}
glog.ErrorfCtx(ctx, "delete entry(index:%s,_id:%s) %v.", index, id, err)
- return fmt.Errorf("delete entry %v", err)
+ return fmt.Errorf("delete entry %w", err)
}
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
diff --git a/weed/filer/elastic/v7/elastic_store_kv.go b/weed/filer/elastic/v7/elastic_store_kv.go
index e3d9d192b..6986ea7ef 100644
--- a/weed/filer/elastic/v7/elastic_store_kv.go
+++ b/weed/filer/elastic/v7/elastic_store_kv.go
@@ -26,7 +26,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error)
}
}
glog.ErrorfCtx(ctx, "delete key(id:%s) %v.", string(key), err)
- return fmt.Errorf("delete key %v", err)
+ return fmt.Errorf("delete key %w", err)
}
func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
@@ -53,7 +53,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
val, err := jsoniter.Marshal(esEntry)
if err != nil {
glog.ErrorfCtx(ctx, "insert key(%s) %v.", string(key), err)
- return fmt.Errorf("insert key %v", err)
+ return fmt.Errorf("insert key %w", err)
}
_, err = store.client.Index().
Index(indexKV).
@@ -62,7 +62,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
BodyJson(string(val)).
Do(ctx)
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
}
diff --git a/weed/filer/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go
index 7cff4bc25..d300a7048 100644
--- a/weed/filer/etcd/etcd_store.go
+++ b/weed/filer/etcd/etcd_store.go
@@ -48,7 +48,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix
timeoutStr := configuration.GetString(prefix + "timeout")
timeout, err := time.ParseDuration(timeoutStr)
if err != nil {
- return fmt.Errorf("parse etcd store timeout: %v", err)
+ return fmt.Errorf("parse etcd store timeout: %w", err)
}
store.timeout = timeout
@@ -66,7 +66,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix
var err error
tlsConfig, err = tlsInfo.ClientConfig()
if err != nil {
- return fmt.Errorf("TLS client configuration error: %v", err)
+ return fmt.Errorf("TLS client configuration error: %w", err)
}
}
diff --git a/weed/filer/etcd/etcd_store_kv.go b/weed/filer/etcd/etcd_store_kv.go
index 1616cfd82..e2536ba1c 100644
--- a/weed/filer/etcd/etcd_store_kv.go
+++ b/weed/filer/etcd/etcd_store_kv.go
@@ -11,7 +11,7 @@ func (store *EtcdStore) KvPut(ctx context.Context, key []byte, value []byte) (er
_, err = store.client.Put(ctx, store.etcdKeyPrefix+string(key), string(value))
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
@@ -22,7 +22,7 @@ func (store *EtcdStore) KvGet(ctx context.Context, key []byte) (value []byte, er
resp, err := store.client.Get(ctx, store.etcdKeyPrefix+string(key))
if err != nil {
- return nil, fmt.Errorf("kv get: %v", err)
+ return nil, fmt.Errorf("kv get: %w", err)
}
if len(resp.Kvs) == 0 {
@@ -37,7 +37,7 @@ func (store *EtcdStore) KvDelete(ctx context.Context, key []byte) (err error) {
_, err = store.client.Delete(ctx, store.etcdKeyPrefix+string(key))
if err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go
index 884be591e..e8de430f0 100644
--- a/weed/filer/filechunk_manifest.go
+++ b/weed/filer/filechunk_manifest.go
@@ -220,7 +220,7 @@ func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer
Chunks: dataChunks,
})
if serErr != nil {
- return nil, fmt.Errorf("serializing manifest: %v", serErr)
+ return nil, fmt.Errorf("serializing manifest: %w", serErr)
}
minOffset, maxOffset := int64(math.MaxInt64), int64(math.MinInt64)
diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go
index a2a6faedc..cfe07ec5a 100644
--- a/weed/filer/filer_delete_entry.go
+++ b/weed/filer/filer_delete_entry.go
@@ -113,7 +113,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
glog.V(3).InfofCtx(ctx, "deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
- return fmt.Errorf("filer store delete: %v", storeDeletionErr)
+ return fmt.Errorf("filer store delete: %w", storeDeletionErr)
}
f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
@@ -127,7 +127,7 @@ func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shou
glog.V(3).InfofCtx(ctx, "deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil {
- return fmt.Errorf("filer store delete: %v", storeDeletionErr)
+ return fmt.Errorf("filer store delete: %w", storeDeletionErr)
}
if !entry.IsDirectory() {
f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go
index 4c99da72a..4ad84f2e6 100644
--- a/weed/filer/filer_notify.go
+++ b/weed/filer/filer_notify.go
@@ -120,7 +120,7 @@ func (f *Filer) ReadPersistedLogBuffer(startPosition log_buffer.MessagePosition,
if visitErr == io.EOF {
return
}
- err = fmt.Errorf("reading from persisted logs: %v", visitErr)
+ err = fmt.Errorf("reading from persisted logs: %w", visitErr)
return
}
var logEntry *filer_pb.LogEntry
@@ -130,12 +130,12 @@ func (f *Filer) ReadPersistedLogBuffer(startPosition log_buffer.MessagePosition,
if visitErr == io.EOF {
break
}
- err = fmt.Errorf("read next from persisted logs: %v", visitErr)
+ err = fmt.Errorf("read next from persisted logs: %w", visitErr)
return
}
isDone, visitErr = eachLogEntryFn(logEntry)
if visitErr != nil {
- err = fmt.Errorf("process persisted log entry: %v", visitErr)
+ err = fmt.Errorf("process persisted log entry: %w", visitErr)
return
}
lastTsNs = logEntry.TsNs
diff --git a/weed/filer/filer_notify_append.go b/weed/filer/filer_notify_append.go
index 699a4b70e..9150f92d6 100644
--- a/weed/filer/filer_notify_append.go
+++ b/weed/filer/filer_notify_append.go
@@ -60,7 +60,7 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi
assignResult, err := operation.Assign(context.Background(), f.GetMaster, f.GrpcDialOption, assignRequest)
if err != nil {
- return nil, nil, fmt.Errorf("AssignVolume: %v", err)
+ return nil, nil, fmt.Errorf("AssignVolume: %w", err)
}
if assignResult.Error != "" {
return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error)
diff --git a/weed/filer/filer_notify_read.go b/weed/filer/filer_notify_read.go
index e67b283dd..d25412d0d 100644
--- a/weed/filer/filer_notify_read.go
+++ b/weed/filer/filer_notify_read.go
@@ -33,7 +33,7 @@ func (f *Filer) collectPersistedLogBuffer(startPosition log_buffer.MessagePositi
dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "")
if listDayErr != nil {
- return nil, fmt.Errorf("fail to list log by day: %v", listDayErr)
+ return nil, fmt.Errorf("fail to list log by day: %w", listDayErr)
}
return NewOrderedLogVisitor(f, startPosition, stopTsNs, dayEntries)
@@ -45,7 +45,7 @@ func (f *Filer) HasPersistedLogFiles(startPosition log_buffer.MessagePosition) (
dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 1, "", "", "")
if listDayErr != nil {
- return false, fmt.Errorf("fail to list log by day: %v", listDayErr)
+ return false, fmt.Errorf("fail to list log by day: %w", listDayErr)
}
if len(dayEntries) == 0 {
return false, nil
@@ -118,7 +118,7 @@ func (o *OrderedLogVisitor) GetNext() (logEntry *filer_pb.LogEntry, err error) {
if nextErr == io.EOF {
// do nothing since the filer has no more log entries
} else {
- return nil, fmt.Errorf("failed to get next log entry: %v", nextErr)
+ return nil, fmt.Errorf("failed to get next log entry: %w", nextErr)
}
} else {
heap.Push(o.pq, &LogEntryItem{
@@ -245,7 +245,7 @@ func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) {
if nextErr == io.EOF {
// do nothing since the filer has no more log entries
} else {
- return fmt.Errorf("failed to get next log entry for %v: %v", entryName, err)
+ return fmt.Errorf("failed to get next log entry for %v: %w", entryName, err)
}
} else {
heap.Push(v.pq, &LogEntryItem{
diff --git a/weed/filer/hbase/hbase_store.go b/weed/filer/hbase/hbase_store.go
index 599f7d1d6..8642146e6 100644
--- a/weed/filer/hbase/hbase_store.go
+++ b/weed/filer/hbase/hbase_store.go
@@ -48,7 +48,7 @@ func (store *HbaseStore) initialize(zkquorum, table string) (err error) {
headers := map[string][]string{store.cfMetaDir: nil}
get, err := hrpc.NewGet(context.Background(), store.table, []byte(key), hrpc.Families(headers))
if err != nil {
- return fmt.Errorf("NewGet returned an error: %v", err)
+ return fmt.Errorf("NewGet returned an error: %w", err)
}
_, err = store.Client.Get(get)
if err != gohbase.TableNotFound {
diff --git a/weed/filer/leveldb/leveldb_store_kv.go b/weed/filer/leveldb/leveldb_store_kv.go
index 7f6507f27..c961b5e91 100644
--- a/weed/filer/leveldb/leveldb_store_kv.go
+++ b/weed/filer/leveldb/leveldb_store_kv.go
@@ -12,7 +12,7 @@ func (store *LevelDBStore) KvPut(ctx context.Context, key []byte, value []byte)
err = store.db.Put(key, value, nil)
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
@@ -27,7 +27,7 @@ func (store *LevelDBStore) KvGet(ctx context.Context, key []byte) (value []byte,
}
if err != nil {
- return nil, fmt.Errorf("kv get: %v", err)
+ return nil, fmt.Errorf("kv get: %w", err)
}
return
@@ -38,7 +38,7 @@ func (store *LevelDBStore) KvDelete(ctx context.Context, key []byte) (err error)
err = store.db.Delete(key, nil)
if err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/leveldb3/leveldb3_store_kv.go b/weed/filer/leveldb3/leveldb3_store_kv.go
index a40909f90..984a25b51 100644
--- a/weed/filer/leveldb3/leveldb3_store_kv.go
+++ b/weed/filer/leveldb3/leveldb3_store_kv.go
@@ -13,7 +13,7 @@ func (store *LevelDB3Store) KvPut(ctx context.Context, key []byte, value []byte)
err = store.dbs[DEFAULT].Put(key, value, nil)
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
@@ -28,7 +28,7 @@ func (store *LevelDB3Store) KvGet(ctx context.Context, key []byte) (value []byte
}
if err != nil {
- return nil, fmt.Errorf("kv get: %v", err)
+ return nil, fmt.Errorf("kv get: %w", err)
}
return
@@ -39,7 +39,7 @@ func (store *LevelDB3Store) KvDelete(ctx context.Context, key []byte) (err error
err = store.dbs[DEFAULT].Delete(key, nil)
if err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go
index 976822ad1..2ff62bf13 100644
--- a/weed/filer/meta_aggregator.go
+++ b/weed/filer/meta_aggregator.go
@@ -189,7 +189,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
})
if err != nil {
glog.V(0).Infof("SubscribeLocalMetadata %v: %v", peer, err)
- return fmt.Errorf("subscribe: %v", err)
+ return fmt.Errorf("subscribe: %w", err)
}
for {
@@ -204,7 +204,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
if err := processEventFn(resp); err != nil {
glog.V(0).Infof("SubscribeLocalMetadata process %v: %v", resp, err)
- return fmt.Errorf("process %v: %v", resp, err)
+ return fmt.Errorf("process %v: %w", resp, err)
}
f.onMetadataChangeEvent(resp)
diff --git a/weed/filer/mongodb/mongodb_store_kv.go b/weed/filer/mongodb/mongodb_store_kv.go
index 49bc2d515..13d2dd08c 100644
--- a/weed/filer/mongodb/mongodb_store_kv.go
+++ b/weed/filer/mongodb/mongodb_store_kv.go
@@ -24,7 +24,7 @@ func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte)
_, err = c.UpdateOne(ctx, filter, update, opts)
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
@@ -56,7 +56,7 @@ func (store *MongodbStore) KvDelete(ctx context.Context, key []byte) (err error)
where := bson.M{"directory": dir, "name": name}
_, err = store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where)
if err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/mysql/mysql_store.go b/weed/filer/mysql/mysql_store.go
index 314bb8b2f..9dbe09069 100644
--- a/weed/filer/mysql/mysql_store.go
+++ b/weed/filer/mysql/mysql_store.go
@@ -106,7 +106,7 @@ func (store *MysqlStore) initialize(dsn string, upsertQuery string, enableUpsert
}
cfg, err := mysql.ParseDSN(dsn)
if err != nil {
- return fmt.Errorf("can not parse DSN error:%v", err)
+ return fmt.Errorf("can not parse DSN error:%w", err)
}
var dbErr error
diff --git a/weed/filer/redis/universal_redis_store_kv.go b/weed/filer/redis/universal_redis_store_kv.go
index 230cda759..1098b7482 100644
--- a/weed/filer/redis/universal_redis_store_kv.go
+++ b/weed/filer/redis/universal_redis_store_kv.go
@@ -13,7 +13,7 @@ func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value [
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
@@ -35,7 +35,7 @@ func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err
_, err = store.Client.Del(ctx, string(key)).Result()
if err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/redis2/universal_redis_store_kv.go b/weed/filer/redis2/universal_redis_store_kv.go
index 5b515f605..ab85ccb61 100644
--- a/weed/filer/redis2/universal_redis_store_kv.go
+++ b/weed/filer/redis2/universal_redis_store_kv.go
@@ -13,7 +13,7 @@ func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
@@ -35,7 +35,7 @@ func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (er
_, err = store.Client.Del(ctx, string(key)).Result()
if err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/redis3/universal_redis_store_kv.go b/weed/filer/redis3/universal_redis_store_kv.go
index fd665d5d4..ba44a9c07 100644
--- a/weed/filer/redis3/universal_redis_store_kv.go
+++ b/weed/filer/redis3/universal_redis_store_kv.go
@@ -13,7 +13,7 @@ func (store *UniversalRedis3Store) KvPut(ctx context.Context, key []byte, value
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
@@ -35,7 +35,7 @@ func (store *UniversalRedis3Store) KvDelete(ctx context.Context, key []byte) (er
_, err = store.Client.Del(ctx, string(key)).Result()
if err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/redis_lua/universal_redis_store_kv.go b/weed/filer/redis_lua/universal_redis_store_kv.go
index 01a7ba560..79b6495ce 100644
--- a/weed/filer/redis_lua/universal_redis_store_kv.go
+++ b/weed/filer/redis_lua/universal_redis_store_kv.go
@@ -13,7 +13,7 @@ func (store *UniversalRedisLuaStore) KvPut(ctx context.Context, key []byte, valu
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
@@ -35,7 +35,7 @@ func (store *UniversalRedisLuaStore) KvDelete(ctx context.Context, key []byte) (
_, err = store.Client.Del(ctx, string(key)).Result()
if err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/remote_mapping.go b/weed/filer/remote_mapping.go
index c8adbeb4d..cc48d859e 100644
--- a/weed/filer/remote_mapping.go
+++ b/weed/filer/remote_mapping.go
@@ -16,13 +16,13 @@ func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress pb.ServerAdd
return readErr
}); readErr != nil {
if readErr != filer_pb.ErrNotFound {
- return nil, fmt.Errorf("read existing mapping: %v", readErr)
+ return nil, fmt.Errorf("read existing mapping: %w", readErr)
}
oldContent = nil
}
mappings, readErr = UnmarshalRemoteStorageMappings(oldContent)
if readErr != nil {
- return nil, fmt.Errorf("unmarshal mappings: %v", readErr)
+ return nil, fmt.Errorf("unmarshal mappings: %w", readErr)
}
return
@@ -38,7 +38,7 @@ func InsertMountMapping(filerClient filer_pb.FilerClient, dir string, remoteStor
})
if err != nil {
if err != filer_pb.ErrNotFound {
- return fmt.Errorf("read existing mapping: %v", err)
+ return fmt.Errorf("read existing mapping: %w", err)
}
}
@@ -53,7 +53,7 @@ func InsertMountMapping(filerClient filer_pb.FilerClient, dir string, remoteStor
return SaveInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, newContent)
})
if err != nil {
- return fmt.Errorf("save mapping: %v", err)
+ return fmt.Errorf("save mapping: %w", err)
}
return nil
@@ -69,7 +69,7 @@ func DeleteMountMapping(filerClient filer_pb.FilerClient, dir string) (err error
})
if err != nil {
if err != filer_pb.ErrNotFound {
- return fmt.Errorf("read existing mapping: %v", err)
+ return fmt.Errorf("read existing mapping: %w", err)
}
}
@@ -84,7 +84,7 @@ func DeleteMountMapping(filerClient filer_pb.FilerClient, dir string) (err error
return SaveInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, newContent)
})
if err != nil {
- return fmt.Errorf("save mapping: %v", err)
+ return fmt.Errorf("save mapping: %w", err)
}
return nil
@@ -100,7 +100,7 @@ func addRemoteStorageMapping(oldContent []byte, dir string, storageLocation *rem
mappings.Mappings[dir] = storageLocation
if newContent, err = proto.Marshal(mappings); err != nil {
- return oldContent, fmt.Errorf("marshal mappings: %v", err)
+ return oldContent, fmt.Errorf("marshal mappings: %w", err)
}
return
@@ -116,7 +116,7 @@ func removeRemoteStorageMapping(oldContent []byte, dir string) (newContent []byt
delete(mappings.Mappings, dir)
if newContent, err = proto.Marshal(mappings); err != nil {
- return oldContent, fmt.Errorf("marshal mappings: %v", err)
+ return oldContent, fmt.Errorf("marshal mappings: %w", err)
}
return
diff --git a/weed/filer/rocksdb/rocksdb_store.go b/weed/filer/rocksdb/rocksdb_store.go
index ed2af0765..044dc1342 100644
--- a/weed/filer/rocksdb/rocksdb_store.go
+++ b/weed/filer/rocksdb/rocksdb_store.go
@@ -230,7 +230,7 @@ func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey
}
if err := iter.Err(); err != nil {
- return fmt.Errorf("prefix scan iterator: %v", err)
+ return fmt.Errorf("prefix scan iterator: %w", err)
}
return nil
}
diff --git a/weed/filer/rocksdb/rocksdb_store_kv.go b/weed/filer/rocksdb/rocksdb_store_kv.go
index b94aab1ec..8432303b9 100644
--- a/weed/filer/rocksdb/rocksdb_store_kv.go
+++ b/weed/filer/rocksdb/rocksdb_store_kv.go
@@ -15,7 +15,7 @@ func (store *RocksDBStore) KvPut(ctx context.Context, key []byte, value []byte)
err = store.db.Put(store.wo, key, value)
if err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
@@ -30,7 +30,7 @@ func (store *RocksDBStore) KvGet(ctx context.Context, key []byte) (value []byte,
}
if err != nil {
- return nil, fmt.Errorf("kv get: %v", err)
+ return nil, fmt.Errorf("kv get: %w", err)
}
return
@@ -41,7 +41,7 @@ func (store *RocksDBStore) KvDelete(ctx context.Context, key []byte) (err error)
err = store.db.Delete(store.wo, key)
if err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/s3iam_conf.go b/weed/filer/s3iam_conf.go
index ea5b1a040..3d0a09214 100644
--- a/weed/filer/s3iam_conf.go
+++ b/weed/filer/s3iam_conf.go
@@ -29,12 +29,12 @@ func ProtoToText(writer io.Writer, config proto.Message) error {
text, marshalErr := m.Marshal(config)
if marshalErr != nil {
- return fmt.Errorf("marshal proto message: %v", marshalErr)
+ return fmt.Errorf("marshal proto message: %w", marshalErr)
}
_, writeErr := writer.Write(text)
if writeErr != nil {
- return fmt.Errorf("fail to write proto message: %v", writeErr)
+ return fmt.Errorf("fail to write proto message: %w", writeErr)
}
return writeErr
diff --git a/weed/filer/stream.go b/weed/filer/stream.go
index e7be28a0b..579b5ed50 100644
--- a/weed/filer/stream.go
+++ b/weed/filer/stream.go
@@ -134,7 +134,7 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
stats.FilerRequestHistogram.WithLabelValues("chunkDownload").Observe(time.Since(start).Seconds())
if err != nil {
stats.FilerHandlerCounter.WithLabelValues("chunkDownloadError").Inc()
- return fmt.Errorf("read chunk: %v", err)
+ return fmt.Errorf("read chunk: %w", err)
}
stats.FilerHandlerCounter.WithLabelValues("chunkDownload").Inc()
downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize))
diff --git a/weed/filer/tarantool/tarantool_store.go b/weed/filer/tarantool/tarantool_store.go
index 44727140a..4c9f8a600 100644
--- a/weed/filer/tarantool/tarantool_store.go
+++ b/weed/filer/tarantool/tarantool_store.go
@@ -51,7 +51,7 @@ func (store *TarantoolStore) Initialize(configuration weed_util.Configuration, p
timeoutStr := configuration.GetString(prefix + "timeout")
timeout, err := time.ParseDuration(timeoutStr)
if err != nil {
- return fmt.Errorf("parse tarantool store timeout: %v", err)
+ return fmt.Errorf("parse tarantool store timeout: %w", err)
}
maxReconnects := configuration.GetInt(prefix + "maxReconnects")
@@ -80,7 +80,7 @@ func (store *TarantoolStore) initialize(addresses []string, user string, passwor
ctx := context.Background()
p, err := pool.ConnectWithOpts(ctx, poolInstances, poolOpts)
if err != nil {
- return fmt.Errorf("Can't create connection pool: %v", err)
+ return fmt.Errorf("Can't create connection pool: %w", err)
}
_, err = p.Do(tarantool.NewPingRequest(), pool.ANY).Get()
diff --git a/weed/filer/tarantool/tarantool_store_kv.go b/weed/filer/tarantool/tarantool_store_kv.go
index e9f0f4dd0..e45ff778c 100644
--- a/weed/filer/tarantool/tarantool_store_kv.go
+++ b/weed/filer/tarantool/tarantool_store_kv.go
@@ -33,7 +33,7 @@ func (store *TarantoolStore) KvPut(ctx context.Context, key []byte, value []byte
ret := crud.Result{}
if err := store.pool.Do(req, pool.RW).GetTyped(&ret); err != nil {
- return fmt.Errorf("kv put: %v", err)
+ return fmt.Errorf("kv put: %w", err)
}
return nil
@@ -88,7 +88,7 @@ func (store *TarantoolStore) KvDelete(ctx context.Context, key []byte) (err erro
Opts(delOpts)
if _, err := store.pool.Do(req, pool.RW).Get(); err != nil {
- return fmt.Errorf("kv delete: %v", err)
+ return fmt.Errorf("kv delete: %w", err)
}
return nil
diff --git a/weed/filer/ydb/ydb_store.go b/weed/filer/ydb/ydb_store.go
index e3f074ed9..90b13aa04 100644
--- a/weed/filer/ydb/ydb_store.go
+++ b/weed/filer/ydb/ydb_store.go
@@ -140,13 +140,13 @@ func (store *YdbStore) doTxOrDB(ctx context.Context, q *string, params *table.Qu
if tx, ok := ctx.Value("tx").(query.Transaction); ok {
res, err = tx.Query(ctx, *q, query.WithParameters(params))
if err != nil {
- return fmt.Errorf("execute transaction: %v", err)
+ return fmt.Errorf("execute transaction: %w", err)
}
} else {
err = store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) {
res, err = s.Query(ctx, *q, query.WithParameters(params), ts)
if err != nil {
- return fmt.Errorf("execute statement: %v", err)
+ return fmt.Errorf("execute statement: %w", err)
}
return nil
}, query.WithIdempotent())
@@ -158,7 +158,7 @@ func (store *YdbStore) doTxOrDB(ctx context.Context, q *string, params *table.Qu
defer func() { _ = res.Close(ctx) }()
if processResultFunc != nil {
if err = processResultFunc(res); err != nil {
- return fmt.Errorf("process result: %v", err)
+ return fmt.Errorf("process result: %w", err)
}
}
}
diff --git a/weed/glog/glog_file.go b/weed/glog/glog_file.go
index 782a6e048..631a4cc99 100644
--- a/weed/glog/glog_file.go
+++ b/weed/glog/glog_file.go
@@ -153,5 +153,5 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) {
}
lastErr = err
}
- return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+ return nil, "", fmt.Errorf("log: cannot create log: %w", lastErr)
}
diff --git a/weed/iamapi/iamapi_server.go b/weed/iamapi/iamapi_server.go
index e77d23e53..cf507ee82 100644
--- a/weed/iamapi/iamapi_server.go
+++ b/weed/iamapi/iamapi_server.go
@@ -104,7 +104,7 @@ func (iama *IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfigur
func (iama *IamS3ApiConfigure) GetS3ApiConfigurationFromCredentialManager(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
config, err := iama.credentialManager.LoadConfiguration(context.Background())
if err != nil {
- return fmt.Errorf("failed to load configuration from credential manager: %v", err)
+ return fmt.Errorf("failed to load configuration from credential manager: %w", err)
}
*s3cfg = *config
return nil
diff --git a/weed/mount/weedfs_write.go b/weed/mount/weedfs_write.go
index 77ad01b89..de8a756ce 100644
--- a/weed/mount/weedfs_write.go
+++ b/weed/mount/weedfs_write.go
@@ -49,7 +49,7 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
if err != nil {
glog.V(0).Infof("upload data %v: %v", filename, err)
- return nil, fmt.Errorf("upload data: %v", err)
+ return nil, fmt.Errorf("upload data: %w", err)
}
if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v: %v", filename, err)
diff --git a/weed/mq/broker/broker_connect.go b/weed/mq/broker/broker_connect.go
index 386d86570..c92fc299c 100644
--- a/weed/mq/broker/broker_connect.go
+++ b/weed/mq/broker/broker_connect.go
@@ -25,7 +25,7 @@ func (b *MessageQueueBroker) BrokerConnectToBalancer(brokerBalancer string, stop
return pb.WithBrokerGrpcClient(true, brokerBalancer, b.grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {
stream, err := client.PublisherToPubBalancer(context.Background())
if err != nil {
- return fmt.Errorf("connect to balancer %v: %v", brokerBalancer, err)
+ return fmt.Errorf("connect to balancer %v: %w", brokerBalancer, err)
}
defer stream.CloseSend()
err = stream.Send(&mq_pb.PublisherToPubBalancerRequest{
@@ -36,7 +36,7 @@ func (b *MessageQueueBroker) BrokerConnectToBalancer(brokerBalancer string, stop
},
})
if err != nil {
- return fmt.Errorf("send init message: %v", err)
+ return fmt.Errorf("send init message: %w", err)
}
for {
diff --git a/weed/mq/broker/broker_grpc_configure.go b/weed/mq/broker/broker_grpc_configure.go
index 892a43007..fb916d880 100644
--- a/weed/mq/broker/broker_grpc_configure.go
+++ b/weed/mq/broker/broker_grpc_configure.go
@@ -66,7 +66,7 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb.
// save the topic configuration on filer
if err := b.fca.SaveTopicConfToFiler(t, resp); err != nil {
- return nil, fmt.Errorf("configure topic: %v", err)
+ return nil, fmt.Errorf("configure topic: %w", err)
}
b.PubBalancer.OnPartitionChange(request.Topic, resp.BrokerPartitionAssignments)
diff --git a/weed/mq/broker/broker_grpc_lookup.go b/weed/mq/broker/broker_grpc_lookup.go
index e167e4e25..d2dfcaa41 100644
--- a/weed/mq/broker/broker_grpc_lookup.go
+++ b/weed/mq/broker/broker_grpc_lookup.go
@@ -164,14 +164,14 @@ func (b *MessageQueueBroker) GetTopicConfiguration(ctx context.Context, request
if conf, createdAtNs, modifiedAtNs, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil {
glog.V(0).Infof("get topic configuration %s: %v", request.Topic, err)
- return nil, fmt.Errorf("failed to read topic configuration: %v", err)
+ return nil, fmt.Errorf("failed to read topic configuration: %w", err)
}
// Ensure topic assignments are active
err = b.ensureTopicActiveAssignments(t, conf)
if err != nil {
glog.V(0).Infof("ensure topic active assignments %s: %v", request.Topic, err)
- return nil, fmt.Errorf("failed to ensure topic assignments: %v", err)
+ return nil, fmt.Errorf("failed to ensure topic assignments: %w", err)
}
// Build the response with complete configuration including metadata
@@ -208,7 +208,7 @@ func (b *MessageQueueBroker) GetTopicPublishers(ctx context.Context, request *mq
var conf *mq_pb.ConfigureTopicResponse
if conf, _, _, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil {
glog.V(0).Infof("get topic configuration for publishers %s: %v", request.Topic, err)
- return nil, fmt.Errorf("failed to read topic configuration: %v", err)
+ return nil, fmt.Errorf("failed to read topic configuration: %w", err)
}
// Collect publishers from each partition that is hosted on this broker
@@ -262,7 +262,7 @@ func (b *MessageQueueBroker) GetTopicSubscribers(ctx context.Context, request *m
var conf *mq_pb.ConfigureTopicResponse
if conf, _, _, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil {
glog.V(0).Infof("get topic configuration for subscribers %s: %v", request.Topic, err)
- return nil, fmt.Errorf("failed to read topic configuration: %v", err)
+ return nil, fmt.Errorf("failed to read topic configuration: %w", err)
}
// Collect subscribers from each partition that is hosted on this broker
diff --git a/weed/mq/broker/broker_grpc_pub.go b/weed/mq/broker/broker_grpc_pub.go
index d2224ad99..c7cb81fcc 100644
--- a/weed/mq/broker/broker_grpc_pub.go
+++ b/weed/mq/broker/broker_grpc_pub.go
@@ -145,7 +145,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
// send to the local partition
if err = localTopicPartition.Publish(dataMessage); err != nil {
- return fmt.Errorf("topic %v partition %v publish error: %v", initMessage.Topic, initMessage.Partition, err)
+ return fmt.Errorf("topic %v partition %v publish error: %w", initMessage.Topic, initMessage.Partition, err)
}
// Update published offset and last seen time for this publisher
diff --git a/weed/mq/broker/broker_grpc_pub_balancer.go b/weed/mq/broker/broker_grpc_pub_balancer.go
index 5978d2173..8327ead7d 100644
--- a/weed/mq/broker/broker_grpc_pub_balancer.go
+++ b/weed/mq/broker/broker_grpc_pub_balancer.go
@@ -15,7 +15,7 @@ func (b *MessageQueueBroker) PublisherToPubBalancer(stream mq_pb.SeaweedMessagin
}
req, err := stream.Recv()
if err != nil {
- return fmt.Errorf("receive init message: %v", err)
+ return fmt.Errorf("receive init message: %w", err)
}
// process init message
diff --git a/weed/mq/broker/broker_topic_conf_read_write.go b/weed/mq/broker/broker_topic_conf_read_write.go
index 222ff16ba..647f78099 100644
--- a/weed/mq/broker/broker_topic_conf_read_write.go
+++ b/weed/mq/broker/broker_topic_conf_read_write.go
@@ -14,12 +14,12 @@ func (b *MessageQueueBroker) GetOrGenerateLocalPartition(t topic.Topic, partitio
conf, readConfErr := b.fca.ReadTopicConfFromFiler(t)
if readConfErr != nil {
glog.Errorf("topic %v not found: %v", t, readConfErr)
- return nil, fmt.Errorf("topic %v not found: %v", t, readConfErr)
+ return nil, fmt.Errorf("topic %v not found: %w", t, readConfErr)
}
localTopicPartition, _, getOrGenError = b.doGetOrGenLocalPartition(t, partition, conf)
if getOrGenError != nil {
glog.Errorf("topic %v partition %v not setup: %v", t, partition, getOrGenError)
- return nil, fmt.Errorf("topic %v partition %v not setup: %v", t, partition, getOrGenError)
+ return nil, fmt.Errorf("topic %v partition %v not setup: %w", t, partition, getOrGenError)
}
return localTopicPartition, nil
}
diff --git a/weed/mq/client/agent_client/publish_session.go b/weed/mq/client/agent_client/publish_session.go
index c12d345a1..384f12f48 100644
--- a/weed/mq/client/agent_client/publish_session.go
+++ b/weed/mq/client/agent_client/publish_session.go
@@ -44,13 +44,13 @@ func NewPublishSession(agentAddress string, topicSchema *schema.Schema, partitio
stream, err := agentClient.PublishRecord(context.Background())
if err != nil {
- return nil, fmt.Errorf("publish record: %v", err)
+ return nil, fmt.Errorf("publish record: %w", err)
}
if err = stream.Send(&mq_agent_pb.PublishRecordRequest{
SessionId: resp.SessionId,
}); err != nil {
- return nil, fmt.Errorf("send session id: %v", err)
+ return nil, fmt.Errorf("send session id: %w", err)
}
return &PublishSession{
@@ -67,7 +67,7 @@ func (a *PublishSession) CloseSession() error {
}
err := a.stream.CloseSend()
if err != nil {
- return fmt.Errorf("close send: %v", err)
+ return fmt.Errorf("close send: %w", err)
}
a.schema = nil
return err
diff --git a/weed/mq/client/agent_client/subscribe_session.go b/weed/mq/client/agent_client/subscribe_session.go
index 397450a29..f9803b66b 100644
--- a/weed/mq/client/agent_client/subscribe_session.go
+++ b/weed/mq/client/agent_client/subscribe_session.go
@@ -50,13 +50,13 @@ func NewSubscribeSession(agentAddress string, option *SubscribeOption) (*Subscri
stream, err := agentClient.SubscribeRecord(context.Background())
if err != nil {
- return nil, fmt.Errorf("subscribe record: %v", err)
+ return nil, fmt.Errorf("subscribe record: %w", err)
}
if err = stream.Send(&mq_agent_pb.SubscribeRecordRequest{
Init: initRequest,
}); err != nil {
- return nil, fmt.Errorf("send session id: %v", err)
+ return nil, fmt.Errorf("send session id: %w", err)
}
return &SubscribeSession{
diff --git a/weed/mq/client/pub_client/publish.go b/weed/mq/client/pub_client/publish.go
index 2a31a2185..1988e9279 100644
--- a/weed/mq/client/pub_client/publish.go
+++ b/weed/mq/client/pub_client/publish.go
@@ -38,7 +38,7 @@ func (p *TopicPublisher) PublishRecord(key []byte, recordValue *schema_pb.Record
// serialize record value
value, err := proto.Marshal(recordValue)
if err != nil {
- return fmt.Errorf("failed to marshal record value: %v", err)
+ return fmt.Errorf("failed to marshal record value: %w", err)
}
return p.doPublish(key, value)
diff --git a/weed/mq/client/pub_client/scheduler.go b/weed/mq/client/pub_client/scheduler.go
index a768fa7f8..40e8014c6 100644
--- a/weed/mq/client/pub_client/scheduler.go
+++ b/weed/mq/client/pub_client/scheduler.go
@@ -137,7 +137,7 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro
brokerClient := mq_pb.NewSeaweedMessagingClient(grpcConnection)
stream, err := brokerClient.PublishMessage(context.Background())
if err != nil {
- return fmt.Errorf("create publish client: %v", err)
+ return fmt.Errorf("create publish client: %w", err)
}
publishClient := &PublishClient{
SeaweedMessaging_PublishMessageClient: stream,
@@ -154,12 +154,12 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro
},
},
}); err != nil {
- return fmt.Errorf("send init message: %v", err)
+ return fmt.Errorf("send init message: %w", err)
}
// process the hello message
resp, err := stream.Recv()
if err != nil {
- return fmt.Errorf("recv init response: %v", err)
+ return fmt.Errorf("recv init response: %w", err)
}
if resp.Error != "" {
return fmt.Errorf("init response error: %v", resp.Error)
@@ -208,7 +208,7 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro
Data: data,
},
}); err != nil {
- return fmt.Errorf("send publish data: %v", err)
+ return fmt.Errorf("send publish data: %w", err)
}
publishCounter++
atomic.StoreInt64(&publishedTsNs, data.TsNs)
@@ -218,7 +218,7 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro
} else {
// CloseSend would cancel the context on the server side
if err := publishClient.CloseSend(); err != nil {
- return fmt.Errorf("close send: %v", err)
+ return fmt.Errorf("close send: %w", err)
}
}
diff --git a/weed/mq/client/sub_client/on_each_partition.go b/weed/mq/client/sub_client/on_each_partition.go
index 14a38cfa8..b6d6e90b5 100644
--- a/weed/mq/client/sub_client/on_each_partition.go
+++ b/weed/mq/client/sub_client/on_each_partition.go
@@ -22,7 +22,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
subscribeClient, err := client.SubscribeMessage(context.Background())
if err != nil {
- return fmt.Errorf("create subscribe client: %v", err)
+ return fmt.Errorf("create subscribe client: %w", err)
}
slidingWindowSize := sub.SubscriberConfig.SlidingWindowSize
@@ -94,7 +94,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
if errors.Is(err, io.EOF) {
return nil
}
- return fmt.Errorf("subscribe recv: %v", err)
+ return fmt.Errorf("subscribe recv: %w", err)
}
if resp.Message == nil {
glog.V(0).Infof("subscriber %s/%s received nil message", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup)
diff --git a/weed/mq/logstore/log_to_parquet.go b/weed/mq/logstore/log_to_parquet.go
index 2a646e4ee..d2762ff24 100644
--- a/weed/mq/logstore/log_to_parquet.go
+++ b/weed/mq/logstore/log_to_parquet.go
@@ -31,7 +31,7 @@ func CompactTopicPartitions(filerClient filer_pb.FilerClient, t topic.Topic, tim
// list the topic partition versions
topicVersions, err := collectTopicVersions(filerClient, t, timeAgo)
if err != nil {
- return fmt.Errorf("list topic files: %v", err)
+ return fmt.Errorf("list topic files: %w", err)
}
// compact the partitions
@@ -120,7 +120,7 @@ func compactTopicPartitionDir(filerClient filer_pb.FilerClient, topicName, parti
// create a parquet schema
parquetSchema, err := schema.ToParquetSchema(topicName, recordType)
if err != nil {
- return fmt.Errorf("ToParquetSchema failed: %v", err)
+ return fmt.Errorf("ToParquetSchema failed: %w", err)
}
// TODO parallelize the writing
@@ -210,7 +210,7 @@ func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir strin
tempFile, err := os.CreateTemp(".", "t*.parquet")
if err != nil {
- return fmt.Errorf("create temp file: %v", err)
+ return fmt.Errorf("create temp file: %w", err)
}
defer func() {
tempFile.Close()
@@ -241,7 +241,7 @@ func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir strin
record := &schema_pb.RecordValue{}
if err := proto.Unmarshal(entry.Data, record); err != nil {
- return fmt.Errorf("unmarshal record value: %v", err)
+ return fmt.Errorf("unmarshal record value: %w", err)
}
record.Fields[SW_COLUMN_NAME_TS] = &schema_pb.Value{
@@ -256,7 +256,7 @@ func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir strin
}
if err := schema.AddRecordValue(rowBuilder, recordType, parquetLevels, record); err != nil {
- return fmt.Errorf("add record value: %v", err)
+ return fmt.Errorf("add record value: %w", err)
}
rows = append(rows, rowBuilder.Row())
@@ -264,18 +264,18 @@ func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir strin
return nil
}); err != nil {
- return fmt.Errorf("iterate log entry %v/%v: %v", partitionDir, logFile.Name, err)
+ return fmt.Errorf("iterate log entry %v/%v: %w", partitionDir, logFile.Name, err)
}
fmt.Printf("processed %d rows\n", len(rows))
if _, err := writer.WriteRows(rows); err != nil {
- return fmt.Errorf("write rows: %v", err)
+ return fmt.Errorf("write rows: %w", err)
}
}
if err := writer.Close(); err != nil {
- return fmt.Errorf("close writer: %v", err)
+ return fmt.Errorf("close writer: %w", err)
}
// write to parquet file to partitionDir
@@ -291,13 +291,13 @@ func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir strin
func saveParquetFileToPartitionDir(filerClient filer_pb.FilerClient, sourceFile *os.File, partitionDir, parquetFileName string, preference *operation.StoragePreference, startTsNs, stopTsNs int64) error {
uploader, err := operation.NewUploader()
if err != nil {
- return fmt.Errorf("new uploader: %v", err)
+ return fmt.Errorf("new uploader: %w", err)
}
// get file size
fileInfo, err := sourceFile.Stat()
if err != nil {
- return fmt.Errorf("stat source file: %v", err)
+ return fmt.Errorf("stat source file: %w", err)
}
// upload file in chunks
@@ -360,7 +360,7 @@ func saveParquetFileToPartitionDir(filerClient filer_pb.FilerClient, sourceFile
Entry: entry,
})
}); err != nil {
- return fmt.Errorf("create entry: %v", err)
+ return fmt.Errorf("create entry: %w", err)
}
fmt.Printf("saved to %s/%s\n", partitionDir, parquetFileName)
@@ -436,12 +436,12 @@ func eachChunk(buf []byte, eachLogEntryFn log_buffer.EachLogEntryFuncType) (proc
logEntry := &filer_pb.LogEntry{}
if err = proto.Unmarshal(entryData, logEntry); err != nil {
pos += 4 + int(size)
- err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %v", err)
+ err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %w", err)
return
}
if _, err = eachLogEntryFn(logEntry); err != nil {
- err = fmt.Errorf("process log entry %v: %v", logEntry, err)
+ err = fmt.Errorf("process log entry %v: %w", logEntry, err)
return
}
diff --git a/weed/mq/logstore/read_log_from_disk.go b/weed/mq/logstore/read_log_from_disk.go
index 12fca1706..19b96a88d 100644
--- a/weed/mq/logstore/read_log_from_disk.go
+++ b/weed/mq/logstore/read_log_from_disk.go
@@ -34,7 +34,7 @@ func GenLogOnDiskReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p top
logEntry := &filer_pb.LogEntry{}
if err = proto.Unmarshal(entryData, logEntry); err != nil {
pos += 4 + int(size)
- err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %v", err)
+ err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %w", err)
return
}
if logEntry.TsNs <= starTsNs {
@@ -48,7 +48,7 @@ func GenLogOnDiskReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p top
// fmt.Printf(" read logEntry: %v, ts %v\n", string(logEntry.Key), time.Unix(0, logEntry.TsNs).UTC())
if _, err = eachLogEntryFn(logEntry); err != nil {
- err = fmt.Errorf("process log entry %v: %v", logEntry, err)
+ err = fmt.Errorf("process log entry %v: %w", logEntry, err)
return
}
diff --git a/weed/mq/logstore/read_parquet_to_log.go b/weed/mq/logstore/read_parquet_to_log.go
index 1c53129f4..a64779520 100644
--- a/weed/mq/logstore/read_parquet_to_log.go
+++ b/weed/mq/logstore/read_parquet_to_log.go
@@ -69,7 +69,7 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
// convert parquet row to schema_pb.RecordValue
recordValue, err := schema.ToRecordValue(recordType, parquetLevels, row)
if err != nil {
- return processedTsNs, fmt.Errorf("ToRecordValue failed: %v", err)
+ return processedTsNs, fmt.Errorf("ToRecordValue failed: %w", err)
}
processedTsNs = recordValue.Fields[SW_COLUMN_NAME_TS].GetInt64Value()
if processedTsNs <= starTsNs {
@@ -81,7 +81,7 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
data, marshalErr := proto.Marshal(recordValue)
if marshalErr != nil {
- return processedTsNs, fmt.Errorf("marshal record value: %v", marshalErr)
+ return processedTsNs, fmt.Errorf("marshal record value: %w", marshalErr)
}
logEntry := &filer_pb.LogEntry{
@@ -93,7 +93,7 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
// fmt.Printf(" parquet entry %s ts %v\n", string(logEntry.Key), time.Unix(0, logEntry.TsNs).UTC())
if _, err = eachLogEntryFn(logEntry); err != nil {
- return processedTsNs, fmt.Errorf("process log entry %v: %v", logEntry, err)
+ return processedTsNs, fmt.Errorf("process log entry %v: %w", logEntry, err)
}
}
diff --git a/weed/mq/schema/to_parquet_schema.go b/weed/mq/schema/to_parquet_schema.go
index 196546a32..036acc153 100644
--- a/weed/mq/schema/to_parquet_schema.go
+++ b/weed/mq/schema/to_parquet_schema.go
@@ -9,7 +9,7 @@ import (
func ToParquetSchema(topicName string, recordType *schema_pb.RecordType) (*parquet.Schema, error) {
rootNode, err := toParquetFieldTypeRecord(recordType)
if err != nil {
- return nil, fmt.Errorf("failed to convert record type to parquet schema: %v", err)
+ return nil, fmt.Errorf("failed to convert record type to parquet schema: %w", err)
}
// Fields are sorted by name, so the value should be sorted also
diff --git a/weed/mq/topic/local_partition.go b/weed/mq/topic/local_partition.go
index d1433775a..00ea04eee 100644
--- a/weed/mq/topic/local_partition.go
+++ b/weed/mq/topic/local_partition.go
@@ -155,7 +155,7 @@ func (p *LocalPartition) MaybeConnectToFollowers(initMessage *mq_pb.PublishMessa
followerClient := mq_pb.NewSeaweedMessagingClient(p.followerGrpcConnection)
p.publishFolloweMeStream, err = followerClient.PublishFollowMe(ctx)
if err != nil {
- return fmt.Errorf("fail to create publish client: %v", err)
+ return fmt.Errorf("fail to create publish client: %w", err)
}
if err = p.publishFolloweMeStream.Send(&mq_pb.PublishFollowMeRequest{
Message: &mq_pb.PublishFollowMeRequest_Init{
diff --git a/weed/mq/topic/topic.go b/weed/mq/topic/topic.go
index 1e42c3f4d..56b9cda5f 100644
--- a/weed/mq/topic/topic.go
+++ b/weed/mq/topic/topic.go
@@ -52,12 +52,12 @@ func (t Topic) ReadConfFile(client filer_pb.SeaweedFilerClient) (*mq_pb.Configur
return nil, err
}
if err != nil {
- return nil, fmt.Errorf("read topic.conf of %v: %v", t, err)
+ return nil, fmt.Errorf("read topic.conf of %v: %w", t, err)
}
// parse into filer conf object
conf := &mq_pb.ConfigureTopicResponse{}
if err = jsonpb.Unmarshal(data, conf); err != nil {
- return nil, fmt.Errorf("unmarshal topic %v conf: %v", t, err)
+ return nil, fmt.Errorf("unmarshal topic %v conf: %w", t, err)
}
return conf, nil
}
@@ -75,7 +75,7 @@ func (t Topic) ReadConfFileWithMetadata(client filer_pb.SeaweedFilerClient) (*mq
if errors.Is(err, filer_pb.ErrNotFound) {
return nil, 0, 0, err
}
- return nil, 0, 0, fmt.Errorf("lookup topic.conf of %v: %v", t, err)
+ return nil, 0, 0, fmt.Errorf("lookup topic.conf of %v: %w", t, err)
}
// Get file metadata
@@ -88,7 +88,7 @@ func (t Topic) ReadConfFileWithMetadata(client filer_pb.SeaweedFilerClient) (*mq
// Parse the configuration
conf := &mq_pb.ConfigureTopicResponse{}
if err = jsonpb.Unmarshal(resp.Entry.Content, conf); err != nil {
- return nil, 0, 0, fmt.Errorf("unmarshal topic %v conf: %v", t, err)
+ return nil, 0, 0, fmt.Errorf("unmarshal topic %v conf: %w", t, err)
}
return conf, createdAtNs, modifiedAtNs, nil
@@ -98,7 +98,7 @@ func (t Topic) WriteConfFile(client filer_pb.SeaweedFilerClient, conf *mq_pb.Con
var buf bytes.Buffer
filer.ProtoToText(&buf, conf)
if err := filer.SaveInsideFiler(client, t.Dir(), filer.TopicConfFile, buf.Bytes()); err != nil {
- return fmt.Errorf("save topic %v conf: %v", t, err)
+ return fmt.Errorf("save topic %v conf: %w", t, err)
}
return nil
}
diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go
index c9e674257..fedcf3566 100644
--- a/weed/notification/aws_sqs/aws_sqs_pub.go
+++ b/weed/notification/aws_sqs/aws_sqs_pub.go
@@ -49,7 +49,7 @@ func (k *AwsSqsPub) initialize(awsAccessKeyId, awsSecretAccessKey, region, queue
sess, err := session.NewSession(config)
if err != nil {
- return fmt.Errorf("create aws session: %v", err)
+ return fmt.Errorf("create aws session: %w", err)
}
k.svc = sqs.New(sess)
diff --git a/weed/notification/webhook/http.go b/weed/notification/webhook/http.go
index bb6a11a09..6b1a0e26d 100644
--- a/weed/notification/webhook/http.go
+++ b/weed/notification/webhook/http.go
@@ -32,7 +32,7 @@ func (h *httpClient) sendMessage(message *webhookMessage) error {
// Serialize the protobuf message to JSON for HTTP payload
notificationData, err := json.Marshal(message.Notification)
if err != nil {
- return fmt.Errorf("failed to marshal notification: %v", err)
+ return fmt.Errorf("failed to marshal notification: %w", err)
}
payload := map[string]interface{}{
@@ -43,12 +43,12 @@ func (h *httpClient) sendMessage(message *webhookMessage) error {
jsonData, err := json.Marshal(payload)
if err != nil {
- return fmt.Errorf("failed to marshal message: %v", err)
+ return fmt.Errorf("failed to marshal message: %w", err)
}
req, err := http.NewRequest(http.MethodPost, h.endpoint, bytes.NewBuffer(jsonData))
if err != nil {
- return fmt.Errorf("failed to create request: %v", err)
+ return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
@@ -68,7 +68,7 @@ func (h *httpClient) sendMessage(message *webhookMessage) error {
glog.Errorf("failed to drain response: %v", err)
}
- return fmt.Errorf("failed to send request: %v", err)
+ return fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
diff --git a/weed/notification/webhook/webhook_queue.go b/weed/notification/webhook/webhook_queue.go
index f5853dc2d..b1b21bc9a 100644
--- a/weed/notification/webhook/webhook_queue.go
+++ b/weed/notification/webhook/webhook_queue.go
@@ -120,7 +120,7 @@ func (w *Queue) setupWatermillQueue(cfg *config) error {
logger,
)
if err != nil {
- return fmt.Errorf("failed to create router: %v", err)
+ return fmt.Errorf("failed to create router: %w", err)
}
w.router = router
@@ -135,7 +135,7 @@ func (w *Queue) setupWatermillQueue(cfg *config) error {
poisonQueue, err := middleware.PoisonQueue(w.queueChannel, deadLetterTopic)
if err != nil {
- return fmt.Errorf("failed to create poison queue: %v", err)
+ return fmt.Errorf("failed to create poison queue: %w", err)
}
router.AddPlugin(plugin.SignalsHandler)
diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go
index eb54c674b..61fd2de48 100644
--- a/weed/operation/assign_file_id.go
+++ b/weed/operation/assign_file_id.go
@@ -80,7 +80,7 @@ func (ap *singleThreadAssignProxy) doAssign(grpcConnection *grpc.ClientConn, pri
ap.assignClient, err = client.StreamAssign(context.Background())
if err != nil {
ap.assignClient = nil
- return nil, fmt.Errorf("fail to create stream assign client: %v", err)
+ return nil, fmt.Errorf("fail to create stream assign client: %w", err)
}
}
@@ -105,7 +105,7 @@ func (ap *singleThreadAssignProxy) doAssign(grpcConnection *grpc.ClientConn, pri
WritableVolumeCount: request.WritableVolumeCount,
}
if err = ap.assignClient.Send(req); err != nil {
- return nil, fmt.Errorf("StreamAssignSend: %v", err)
+ return nil, fmt.Errorf("StreamAssignSend: %w", err)
}
resp, grpcErr := ap.assignClient.Recv()
if grpcErr != nil {
diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go
index be3e5c98e..b0c6c651f 100644
--- a/weed/operation/chunked_file.go
+++ b/weed/operation/chunked_file.go
@@ -83,7 +83,7 @@ func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, g
results, err := DeleteFileIds(masterFn, usePublicUrl, grpcDialOption, fileIds)
if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err)
- return fmt.Errorf("chunk delete: %v", err)
+ return fmt.Errorf("chunk delete: %w", err)
}
for _, result := range results {
if result.Error != "" {
diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go
index 187fe6c3f..a48cf5ea2 100644
--- a/weed/operation/upload_content.go
+++ b/weed/operation/upload_content.go
@@ -129,7 +129,7 @@ func (uploader *Uploader) UploadWithRetry(filerClient filer_pb.FilerClient, assi
return nil
}); grpcAssignErr != nil {
- return fmt.Errorf("filerGrpcAddress assign volume: %v", grpcAssignErr)
+ return fmt.Errorf("filerGrpcAddress assign volume: %w", grpcAssignErr)
}
uploadOption.UploadUrl = genFileUrlFn(host, fileId)
@@ -171,7 +171,7 @@ func (uploader *Uploader) doUpload(ctx context.Context, reader io.Reader, option
} else {
data, err = io.ReadAll(reader)
if err != nil {
- err = fmt.Errorf("read input: %v", err)
+ err = fmt.Errorf("read input: %w", err)
return
}
}
@@ -245,7 +245,7 @@ func (uploader *Uploader) doUploadData(ctx context.Context, data []byte, option
cipherKey := util.GenCipherKey()
encryptedData, encryptionErr := util.Encrypt(data, cipherKey)
if encryptionErr != nil {
- err = fmt.Errorf("encrypt input: %v", encryptionErr)
+ err = fmt.Errorf("encrypt input: %w", encryptionErr)
return
}
@@ -389,13 +389,13 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
resp_body, ra_err := io.ReadAll(resp.Body)
if ra_err != nil {
- return nil, fmt.Errorf("read response body %v: %v", option.UploadUrl, ra_err)
+ return nil, fmt.Errorf("read response body %v: %w", option.UploadUrl, ra_err)
}
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
glog.ErrorfCtx(ctx, "unmarshal %s: %v", option.UploadUrl, string(resp_body))
- return nil, fmt.Errorf("unmarshal %v: %v", option.UploadUrl, unmarshal_err)
+ return nil, fmt.Errorf("unmarshal %v: %w", option.UploadUrl, unmarshal_err)
}
if ret.Error != "" {
return nil, fmt.Errorf("unmarshalled error %v: %v", option.UploadUrl, ret.Error)
diff --git a/weed/pb/filer_pb/filer_client_bfs.go b/weed/pb/filer_pb/filer_client_bfs.go
index 55d492aca..eb2e9ccee 100644
--- a/weed/pb/filer_pb/filer_client_bfs.go
+++ b/weed/pb/filer_pb/filer_client_bfs.go
@@ -77,7 +77,7 @@ func StreamBfs(client SeaweedFilerClient, dir util.FullPath, olderThanTsNs int64
Directory: string(dir),
})
if err != nil {
- return fmt.Errorf("traverse bfs metadata: %v", err)
+ return fmt.Errorf("traverse bfs metadata: %w", err)
}
for {
resp, err := stream.Recv()
@@ -85,7 +85,7 @@ func StreamBfs(client SeaweedFilerClient, dir util.FullPath, olderThanTsNs int64
if err == io.EOF {
break
}
- return fmt.Errorf("traverse bfs metadata: %v", err)
+ return fmt.Errorf("traverse bfs metadata: %w", err)
}
if err := fn(util.FullPath(resp.Directory), resp.Entry); err != nil {
return err
diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go
index c212100d9..b5fd4e1e0 100644
--- a/weed/pb/filer_pb/filer_pb_helper.go
+++ b/weed/pb/filer_pb/filer_pb_helper.go
@@ -112,7 +112,7 @@ func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *Create
resp, err := client.CreateEntry(ctx, request)
if err != nil {
glog.V(1).InfofCtx(ctx, "create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
- return fmt.Errorf("CreateEntry: %v", err)
+ return fmt.Errorf("CreateEntry: %w", err)
}
if resp.Error != "" {
glog.V(1).InfofCtx(ctx, "create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
@@ -125,7 +125,7 @@ func UpdateEntry(ctx context.Context, client SeaweedFilerClient, request *Update
_, err := client.UpdateEntry(ctx, request)
if err != nil {
glog.V(1).InfofCtx(ctx, "update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
- return fmt.Errorf("UpdateEntry: %v", err)
+ return fmt.Errorf("UpdateEntry: %w", err)
}
return nil
}
@@ -137,7 +137,7 @@ func LookupEntry(ctx context.Context, client SeaweedFilerClient, request *Lookup
return nil, ErrNotFound
}
glog.V(3).InfofCtx(ctx, "read %s/%v: %v", request.Directory, request.Name, err)
- return nil, fmt.Errorf("LookupEntry1: %v", err)
+ return nil, fmt.Errorf("LookupEntry1: %w", err)
}
if resp.Entry == nil {
return nil, ErrNotFound
diff --git a/weed/pb/filer_pb_tail.go b/weed/pb/filer_pb_tail.go
index b7cca7585..f5ffac129 100644
--- a/weed/pb/filer_pb_tail.go
+++ b/weed/pb/filer_pb_tail.go
@@ -41,7 +41,7 @@ func FollowMetadata(filerAddress ServerAddress, grpcDialOption grpc.DialOption,
err := WithFilerClient(true, option.SelfSignature, filerAddress, grpcDialOption, makeSubscribeMetadataFunc(option, processEventFn))
if err != nil {
- return fmt.Errorf("subscribing filer meta change: %v", err)
+ return fmt.Errorf("subscribing filer meta change: %w", err)
}
return err
}
@@ -50,7 +50,7 @@ func WithFilerClientFollowMetadata(filerClient filer_pb.FilerClient, option *Met
err := filerClient.WithFilerClient(true, makeSubscribeMetadataFunc(option, processEventFn))
if err != nil {
- return fmt.Errorf("subscribing filer meta change: %v", err)
+ return fmt.Errorf("subscribing filer meta change: %w", err)
}
return nil
@@ -72,7 +72,7 @@ func makeSubscribeMetadataFunc(option *MetadataFollowOption, processEventFn Proc
UntilNs: option.StopTsNs,
})
if err != nil {
- return fmt.Errorf("subscribe: %v", err)
+ return fmt.Errorf("subscribe: %w", err)
}
for {
diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go
index 3bca1d07e..26cdb4f37 100644
--- a/weed/pb/grpc_client_server.go
+++ b/weed/pb/grpc_client_server.go
@@ -200,7 +200,7 @@ func ParseServerAddress(server string, deltaPort int) (newServerAddress string,
host, port, parseErr := hostAndPort(server)
if parseErr != nil {
- return "", fmt.Errorf("server port parse error: %v", parseErr)
+ return "", fmt.Errorf("server port parse error: %w", parseErr)
}
newPort := int(port) + deltaPort
@@ -215,7 +215,7 @@ func hostAndPort(address string) (host string, port uint64, err error) {
}
port, err = strconv.ParseUint(address[colonIndex+1:], 10, 64)
if err != nil {
- return "", 0, fmt.Errorf("server port parse error: %v", err)
+ return "", 0, fmt.Errorf("server port parse error: %w", err)
}
return address[:colonIndex], port, err
diff --git a/weed/remote_storage/gcs/gcs_storage_client.go b/weed/remote_storage/gcs/gcs_storage_client.go
index b048effd9..8e8a97a1c 100644
--- a/weed/remote_storage/gcs/gcs_storage_client.go
+++ b/weed/remote_storage/gcs/gcs_storage_client.go
@@ -56,7 +56,7 @@ func (s gcsRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.
c, err := storage.NewClient(context.Background(), option.WithCredentialsFile(googleApplicationCredentials))
if err != nil {
- return nil, fmt.Errorf("failed to create client: %v", err)
+ return nil, fmt.Errorf("failed to create client: %w", err)
}
client.client = c
diff --git a/weed/remote_storage/s3/aliyun.go b/weed/remote_storage/s3/aliyun.go
index 08b3071e6..19a0ef4ae 100644
--- a/weed/remote_storage/s3/aliyun.go
+++ b/weed/remote_storage/s3/aliyun.go
@@ -42,7 +42,7 @@ func (s AliyunRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stora
sess, err := session.NewSession(config)
if err != nil {
- return nil, fmt.Errorf("create aliyun session: %v", err)
+ return nil, fmt.Errorf("create aliyun session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)
diff --git a/weed/remote_storage/s3/backblaze.go b/weed/remote_storage/s3/backblaze.go
index 49f9f3ed9..0f42730c9 100644
--- a/weed/remote_storage/s3/backblaze.go
+++ b/weed/remote_storage/s3/backblaze.go
@@ -37,7 +37,7 @@ func (s BackBlazeRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_st
sess, err := session.NewSession(config)
if err != nil {
- return nil, fmt.Errorf("create backblaze session: %v", err)
+ return nil, fmt.Errorf("create backblaze session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)
diff --git a/weed/remote_storage/s3/baidu.go b/weed/remote_storage/s3/baidu.go
index e562410fd..32976c4a0 100644
--- a/weed/remote_storage/s3/baidu.go
+++ b/weed/remote_storage/s3/baidu.go
@@ -42,7 +42,7 @@ func (s BaiduRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storag
sess, err := session.NewSession(config)
if err != nil {
- return nil, fmt.Errorf("create baidu session: %v", err)
+ return nil, fmt.Errorf("create baidu session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)
diff --git a/weed/remote_storage/s3/contabo.go b/weed/remote_storage/s3/contabo.go
index 69d161c07..8147350e9 100644
--- a/weed/remote_storage/s3/contabo.go
+++ b/weed/remote_storage/s3/contabo.go
@@ -43,7 +43,7 @@ func (s ContaboRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stor
sess, err := session.NewSession(config)
if err != nil {
- return nil, fmt.Errorf("create contabo session: %v", err)
+ return nil, fmt.Errorf("create contabo session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)
diff --git a/weed/remote_storage/s3/filebase.go b/weed/remote_storage/s3/filebase.go
index 903b25858..ed8cfb409 100644
--- a/weed/remote_storage/s3/filebase.go
+++ b/weed/remote_storage/s3/filebase.go
@@ -43,7 +43,7 @@ func (s FilebaseRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_sto
sess, err := session.NewSession(config)
if err != nil {
- return nil, fmt.Errorf("create filebase session: %v", err)
+ return nil, fmt.Errorf("create filebase session: %w", err)
}
sess.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
diff --git a/weed/remote_storage/s3/s3_storage_client.go b/weed/remote_storage/s3/s3_storage_client.go
index 48c8acdf9..280a856b0 100644
--- a/weed/remote_storage/s3/s3_storage_client.go
+++ b/weed/remote_storage/s3/s3_storage_client.go
@@ -48,7 +48,7 @@ func (s s3RemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.R
sess, err := session.NewSession(config)
if err != nil {
- return nil, fmt.Errorf("create aws session: %v", err)
+ return nil, fmt.Errorf("create aws session: %w", err)
}
if conf.S3V4Signature {
sess.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
@@ -108,10 +108,10 @@ func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation
return true
})
if listErr != nil {
- err = fmt.Errorf("list %v: %v", remote, listErr)
+ err = fmt.Errorf("list %v: %w", remote, listErr)
}
if localErr != nil {
- err = fmt.Errorf("process %v: %v", remote, localErr)
+ err = fmt.Errorf("process %v: %w", remote, localErr)
}
}
return
@@ -252,7 +252,7 @@ func (s *s3RemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation)
func (s *s3RemoteStorageClient) ListBuckets() (buckets []*remote_storage.Bucket, err error) {
resp, err := s.conn.ListBuckets(&s3.ListBucketsInput{})
if err != nil {
- return nil, fmt.Errorf("list buckets: %v", err)
+ return nil, fmt.Errorf("list buckets: %w", err)
}
for _, b := range resp.Buckets {
buckets = append(buckets, &remote_storage.Bucket{
diff --git a/weed/remote_storage/s3/storj.go b/weed/remote_storage/s3/storj.go
index e2523b404..dd2fead56 100644
--- a/weed/remote_storage/s3/storj.go
+++ b/weed/remote_storage/s3/storj.go
@@ -42,7 +42,7 @@ func (s StorjRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storag
sess, err := session.NewSession(config)
if err != nil {
- return nil, fmt.Errorf("create storj session: %v", err)
+ return nil, fmt.Errorf("create storj session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)
diff --git a/weed/remote_storage/s3/tencent.go b/weed/remote_storage/s3/tencent.go
index 36952545a..d010b03b9 100644
--- a/weed/remote_storage/s3/tencent.go
+++ b/weed/remote_storage/s3/tencent.go
@@ -42,7 +42,7 @@ func (s TencentRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stor
sess, err := session.NewSession(config)
if err != nil {
- return nil, fmt.Errorf("create tencent session: %v", err)
+ return nil, fmt.Errorf("create tencent session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)
diff --git a/weed/remote_storage/s3/wasabi.go b/weed/remote_storage/s3/wasabi.go
index 0cabd3387..8d330a29f 100644
--- a/weed/remote_storage/s3/wasabi.go
+++ b/weed/remote_storage/s3/wasabi.go
@@ -42,7 +42,7 @@ func (s WasabiRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stora
sess, err := session.NewSession(config)
if err != nil {
- return nil, fmt.Errorf("create wasabi session: %v", err)
+ return nil, fmt.Errorf("create wasabi session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)
diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go
index 57aa63e5f..654725725 100644
--- a/weed/replication/replicator.go
+++ b/weed/replication/replicator.go
@@ -84,7 +84,7 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false, message.Signatures)
if err != nil {
- return fmt.Errorf("delete old entry %v: %v", key, err)
+ return fmt.Errorf("delete old entry %v: %w", key, err)
}
glog.V(4).Infof("creating missing %v", key)
diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go
index 4bcbc7898..1f257941f 100644
--- a/weed/replication/sink/filersink/fetch_write.go
+++ b/weed/replication/sink/filersink/fetch_write.go
@@ -94,7 +94,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
uploader, err := operation.NewUploader()
if err != nil {
glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err)
- return "", fmt.Errorf("upload data: %v", err)
+ return "", fmt.Errorf("upload data: %w", err)
}
fileId, uploadResult, err, _ := uploader.UploadWithRetry(
@@ -128,7 +128,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
if err != nil {
glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err)
- return "", fmt.Errorf("upload data: %v", err)
+ return "", fmt.Errorf("upload data: %w", err)
}
if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v: %v", filename, err)
diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go
index 279108e16..28428545b 100644
--- a/weed/replication/sink/s3sink/s3_sink.go
+++ b/weed/replication/sink/s3sink/s3_sink.go
@@ -118,7 +118,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey string) erro
sess, err := session.NewSession(config)
if err != nil {
- return fmt.Errorf("create aws session: %v", err)
+ return fmt.Errorf("create aws session: %w", err)
}
s3sink.conn = s3.New(sess)
diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go
index 7fc5c3f46..5eb42c2aa 100644
--- a/weed/replication/sub/notification_aws_sqs.go
+++ b/weed/replication/sub/notification_aws_sqs.go
@@ -50,7 +50,7 @@ func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, que
sess, err := session.NewSession(config)
if err != nil {
- return fmt.Errorf("create aws session: %v", err)
+ return fmt.Errorf("create aws session: %w", err)
}
k.svc = sqs.New(sess)
diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go
index 92f7ce609..4f5304cf6 100644
--- a/weed/replication/sub/notification_kafka.go
+++ b/weed/replication/sub/notification_kafka.go
@@ -135,7 +135,7 @@ func loadProgress(offsetFile string) *KafkaProgress {
func (progress *KafkaProgress) saveProgress() error {
data, err := json.Marshal(progress)
if err != nil {
- return fmt.Errorf("failed to marshal progress: %v", err)
+ return fmt.Errorf("failed to marshal progress: %w", err)
}
err = util.WriteFile(progress.offsetFile, data, 0640)
if err != nil {
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
index 7c731539f..78217df9a 100644
--- a/weed/s3api/auth_credentials.go
+++ b/weed/s3api/auth_credentials.go
@@ -178,7 +178,7 @@ func (iam *IdentityAccessManagement) LoadS3ApiConfigurationFromBytes(content []b
s3ApiConfiguration := &iam_pb.S3ApiConfiguration{}
if err := filer.ParseS3ConfigurationFromBytes(content, s3ApiConfiguration); err != nil {
glog.Warningf("unmarshal error: %v", err)
- return fmt.Errorf("unmarshal error: %v", err)
+ return fmt.Errorf("unmarshal error: %w", err)
}
if err := filer.CheckDuplicateAccessKey(s3ApiConfiguration); err != nil {
@@ -534,7 +534,7 @@ func (iam *IdentityAccessManagement) GetCredentialManager() *credential.Credenti
func (iam *IdentityAccessManagement) LoadS3ApiConfigurationFromCredentialManager() error {
s3ApiConfiguration, err := iam.credentialManager.LoadConfiguration(context.Background())
if err != nil {
- return fmt.Errorf("failed to load configuration from credential manager: %v", err)
+ return fmt.Errorf("failed to load configuration from credential manager: %w", err)
}
if len(s3ApiConfiguration.Identities) == 0 {
diff --git a/weed/s3api/cors/cors.go b/weed/s3api/cors/cors.go
index 1eef71b72..c3fd5dd4b 100644
--- a/weed/s3api/cors/cors.go
+++ b/weed/s3api/cors/cors.go
@@ -540,7 +540,7 @@ func (s *Storage) Store(bucket string, config *CORSConfiguration) error {
metadataBytes, err := json.Marshal(metadata)
if err != nil {
- return fmt.Errorf("failed to marshal bucket metadata: %v", err)
+ return fmt.Errorf("failed to marshal bucket metadata: %w", err)
}
// Store metadata
@@ -579,7 +579,7 @@ func (s *Storage) Load(bucket string) (*CORSConfiguration, error) {
var metadata map[string]interface{}
if err := json.Unmarshal(entry.Content, &metadata); err != nil {
- return nil, fmt.Errorf("failed to unmarshal metadata: %v", err)
+ return nil, fmt.Errorf("failed to unmarshal metadata: %w", err)
}
corsData, exists := metadata["cors"]
@@ -590,12 +590,12 @@ func (s *Storage) Load(bucket string) (*CORSConfiguration, error) {
// Convert back to CORSConfiguration
corsBytes, err := json.Marshal(corsData)
if err != nil {
- return nil, fmt.Errorf("failed to marshal CORS data: %v", err)
+ return nil, fmt.Errorf("failed to marshal CORS data: %w", err)
}
var config CORSConfiguration
if err := json.Unmarshal(corsBytes, &config); err != nil {
- return nil, fmt.Errorf("failed to unmarshal CORS configuration: %v", err)
+ return nil, fmt.Errorf("failed to unmarshal CORS configuration: %w", err)
}
return &config, nil
@@ -613,7 +613,7 @@ func (s *Storage) Delete(bucket string) error {
var metadata map[string]interface{}
if len(entry.Content) > 0 {
if err := json.Unmarshal(entry.Content, &metadata); err != nil {
- return fmt.Errorf("failed to unmarshal metadata: %v", err)
+ return fmt.Errorf("failed to unmarshal metadata: %w", err)
}
} else {
return nil // No metadata to delete
@@ -624,7 +624,7 @@ func (s *Storage) Delete(bucket string) error {
metadataBytes, err := json.Marshal(metadata)
if err != nil {
- return fmt.Errorf("failed to marshal metadata: %v", err)
+ return fmt.Errorf("failed to marshal metadata: %w", err)
}
// Update metadata
diff --git a/weed/s3api/policy_engine/engine.go b/weed/s3api/policy_engine/engine.go
index 1e0126eb6..709fafda4 100644
--- a/weed/s3api/policy_engine/engine.go
+++ b/weed/s3api/policy_engine/engine.go
@@ -46,12 +46,12 @@ func NewPolicyEngine() *PolicyEngine {
func (engine *PolicyEngine) SetBucketPolicy(bucketName string, policyJSON string) error {
policy, err := ParsePolicy(policyJSON)
if err != nil {
- return fmt.Errorf("invalid policy: %v", err)
+ return fmt.Errorf("invalid policy: %w", err)
}
compiled, err := CompilePolicy(policy)
if err != nil {
- return fmt.Errorf("failed to compile policy: %v", err)
+ return fmt.Errorf("failed to compile policy: %w", err)
}
engine.mutex.Lock()
diff --git a/weed/s3api/policy_engine/types.go b/weed/s3api/policy_engine/types.go
index 134305183..953e89650 100644
--- a/weed/s3api/policy_engine/types.go
+++ b/weed/s3api/policy_engine/types.go
@@ -179,11 +179,11 @@ func validateStatement(stmt *PolicyStatement) error {
func ParsePolicy(policyJSON string) (*PolicyDocument, error) {
var policy PolicyDocument
if err := json.Unmarshal([]byte(policyJSON), &policy); err != nil {
- return nil, fmt.Errorf("failed to parse policy JSON: %v", err)
+ return nil, fmt.Errorf("failed to parse policy JSON: %w", err)
}
if err := ValidatePolicy(&policy); err != nil {
- return nil, fmt.Errorf("invalid policy: %v", err)
+ return nil, fmt.Errorf("invalid policy: %w", err)
}
return &policy, nil
diff --git a/weed/s3api/s3api_bucket_config.go b/weed/s3api/s3api_bucket_config.go
index f6de0529e..43c056973 100644
--- a/weed/s3api/s3api_bucket_config.go
+++ b/weed/s3api/s3api_bucket_config.go
@@ -274,7 +274,7 @@ func (s3a *S3ApiServer) loadCORSFromMetadata(bucket string) (*cors.CORSConfigura
var metadata map[string]json.RawMessage
if err := json.Unmarshal(entry.Content, &metadata); err != nil {
glog.Errorf("loadCORSFromMetadata: failed to unmarshal metadata for bucket %s: %v", bucket, err)
- return nil, fmt.Errorf("failed to unmarshal metadata: %v", err)
+ return nil, fmt.Errorf("failed to unmarshal metadata: %w", err)
}
corsData, exists := metadata["cors"]
@@ -287,7 +287,7 @@ func (s3a *S3ApiServer) loadCORSFromMetadata(bucket string) (*cors.CORSConfigura
var config cors.CORSConfiguration
if err := json.Unmarshal(corsData, &config); err != nil {
glog.Errorf("loadCORSFromMetadata: failed to unmarshal CORS configuration for bucket %s: %v", bucket, err)
- return nil, fmt.Errorf("failed to unmarshal CORS configuration: %v", err)
+ return nil, fmt.Errorf("failed to unmarshal CORS configuration: %w", err)
}
return &config, nil
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index e9dc10cff..0bc4a7b10 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -99,7 +99,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
IncludeNormalVolumes: true,
}); err != nil {
glog.Errorf("list collection: %v", err)
- return fmt.Errorf("list collections: %v", err)
+ return fmt.Errorf("list collections: %w", err)
} else {
for _, c := range resp.Collections {
if s3a.getCollectionName(bucket) == c.Name {
@@ -161,7 +161,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
// Store the configuration as XML in extended attributes
configXML, err := xml.Marshal(objectLockConfig)
if err != nil {
- return fmt.Errorf("failed to marshal Object Lock configuration to XML: %v", err)
+ return fmt.Errorf("failed to marshal Object Lock configuration to XML: %w", err)
}
bucketConfig.Entry.Extended[s3_constants.ExtObjectLockConfigKey] = configXML
diff --git a/weed/s3api/s3api_circuit_breaker.go b/weed/s3api/s3api_circuit_breaker.go
index 6e14da0af..f1d9d7f7c 100644
--- a/weed/s3api/s3api_circuit_breaker.go
+++ b/weed/s3api/s3api_circuit_breaker.go
@@ -36,7 +36,7 @@ func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker {
return nil
}
if err != nil {
- return fmt.Errorf("read S3 circuit breaker config: %v", err)
+ return fmt.Errorf("read S3 circuit breaker config: %w", err)
}
return cb.LoadS3ApiConfigurationFromBytes(content)
})
@@ -52,7 +52,7 @@ func (cb *CircuitBreaker) LoadS3ApiConfigurationFromBytes(content []byte) error
cbCfg := &s3_pb.S3CircuitBreakerConfig{}
if err := filer.ParseS3ConfigurationFromBytes(content, cbCfg); err != nil {
glog.Warningf("unmarshal error: %v", err)
- return fmt.Errorf("unmarshal error: %v", err)
+ return fmt.Errorf("unmarshal error: %w", err)
}
if err := cb.loadCircuitBreakerConfig(cbCfg); err != nil {
return err
diff --git a/weed/s3api/s3api_object_handlers_copy.go b/weed/s3api/s3api_object_handlers_copy.go
index 7ea73de5a..9ce8a6377 100644
--- a/weed/s3api/s3api_object_handlers_copy.go
+++ b/weed/s3api/s3api_object_handlers_copy.go
@@ -497,11 +497,11 @@ func (s3a *S3ApiServer) copySingleChunk(chunk *filer_pb.FileChunk, dstPath strin
// Download and upload the chunk
chunkData, err := s3a.downloadChunkData(srcUrl, 0, int64(chunk.Size))
if err != nil {
- return nil, fmt.Errorf("download chunk data: %v", err)
+ return nil, fmt.Errorf("download chunk data: %w", err)
}
if err := s3a.uploadChunkData(chunkData, assignResult); err != nil {
- return nil, fmt.Errorf("upload chunk data: %v", err)
+ return nil, fmt.Errorf("upload chunk data: %w", err)
}
return dstChunk, nil
@@ -531,11 +531,11 @@ func (s3a *S3ApiServer) copySingleChunkForRange(originalChunk, rangeChunk *filer
// Download and upload the chunk portion
chunkData, err := s3a.downloadChunkData(srcUrl, offsetInChunk, int64(rangeChunk.Size))
if err != nil {
- return nil, fmt.Errorf("download chunk range data: %v", err)
+ return nil, fmt.Errorf("download chunk range data: %w", err)
}
if err := s3a.uploadChunkData(chunkData, assignResult); err != nil {
- return nil, fmt.Errorf("upload chunk range data: %v", err)
+ return nil, fmt.Errorf("upload chunk range data: %w", err)
}
return dstChunk, nil
@@ -554,7 +554,7 @@ func (s3a *S3ApiServer) assignNewVolume(dstPath string) (*filer_pb.AssignVolumeR
Path: dstPath,
})
if err != nil {
- return fmt.Errorf("assign volume: %v", err)
+ return fmt.Errorf("assign volume: %w", err)
}
if resp.Error != "" {
return fmt.Errorf("assign volume: %v", resp.Error)
@@ -595,12 +595,12 @@ func parseRangeHeader(rangeHeader string) (startOffset, endOffset int64, err err
startOffset, err = strconv.ParseInt(parts[0], 10, 64)
if err != nil {
- return 0, 0, fmt.Errorf("invalid start offset: %v", err)
+ return 0, 0, fmt.Errorf("invalid start offset: %w", err)
}
endOffset, err = strconv.ParseInt(parts[1], 10, 64)
if err != nil {
- return 0, 0, fmt.Errorf("invalid end offset: %v", err)
+ return 0, 0, fmt.Errorf("invalid end offset: %w", err)
}
return startOffset, endOffset, nil
@@ -768,14 +768,14 @@ func (s3a *S3ApiServer) lookupVolumeUrl(fileId string) (string, error) {
err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
vid, _, err := operation.ParseFileId(fileId)
if err != nil {
- return fmt.Errorf("parse file ID: %v", err)
+ return fmt.Errorf("parse file ID: %w", err)
}
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
if err != nil {
- return fmt.Errorf("lookup volume: %v", err)
+ return fmt.Errorf("lookup volume: %w", err)
}
if locations, found := resp.LocationsMap[vid]; found && len(locations.Locations) > 0 {
@@ -787,7 +787,7 @@ func (s3a *S3ApiServer) lookupVolumeUrl(fileId string) (string, error) {
return nil
})
if err != nil {
- return "", fmt.Errorf("lookup volume URL: %v", err)
+ return "", fmt.Errorf("lookup volume URL: %w", err)
}
return srcUrl, nil
}
@@ -797,7 +797,7 @@ func (s3a *S3ApiServer) setChunkFileId(chunk *filer_pb.FileChunk, assignResult *
chunk.FileId = assignResult.FileId
fid, err := filer_pb.ToFileIdObject(assignResult.FileId)
if err != nil {
- return fmt.Errorf("parse file ID: %v", err)
+ return fmt.Errorf("parse file ID: %w", err)
}
chunk.Fid = fid
return nil
@@ -808,13 +808,13 @@ func (s3a *S3ApiServer) prepareChunkCopy(sourceFileId, dstPath string) (*filer_p
// Assign new volume
assignResult, err := s3a.assignNewVolume(dstPath)
if err != nil {
- return nil, "", fmt.Errorf("assign volume: %v", err)
+ return nil, "", fmt.Errorf("assign volume: %w", err)
}
// Look up source URL
srcUrl, err := s3a.lookupVolumeUrl(sourceFileId)
if err != nil {
- return nil, "", fmt.Errorf("lookup source URL: %v", err)
+ return nil, "", fmt.Errorf("lookup source URL: %w", err)
}
return assignResult, srcUrl, nil
@@ -834,11 +834,11 @@ func (s3a *S3ApiServer) uploadChunkData(chunkData []byte, assignResult *filer_pb
}
uploader, err := operation.NewUploader()
if err != nil {
- return fmt.Errorf("create uploader: %v", err)
+ return fmt.Errorf("create uploader: %w", err)
}
_, err = uploader.UploadData(context.Background(), chunkData, uploadOption)
if err != nil {
- return fmt.Errorf("upload chunk: %v", err)
+ return fmt.Errorf("upload chunk: %w", err)
}
return nil
@@ -851,7 +851,7 @@ func (s3a *S3ApiServer) downloadChunkData(srcUrl string, offset, size int64) ([]
chunkData = append(chunkData, data...)
})
if err != nil {
- return nil, fmt.Errorf("download chunk: %v", err)
+ return nil, fmt.Errorf("download chunk: %w", err)
}
if shouldRetry {
return nil, fmt.Errorf("download chunk: retry needed")
diff --git a/weed/s3api/s3api_object_handlers_list.go b/weed/s3api/s3api_object_handlers_list.go
index 5233b7c30..bbb67d391 100644
--- a/weed/s3api/s3api_object_handlers_list.go
+++ b/weed/s3api/s3api_object_handlers_list.go
@@ -397,7 +397,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
}
subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", cursor, "", delimiter, false, eachEntryFn)
if subErr != nil {
- err = fmt.Errorf("doListFilerEntries2: %v", subErr)
+ err = fmt.Errorf("doListFilerEntries2: %w", subErr)
return
}
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "subNextMarker", subNextMarker)
diff --git a/weed/s3api/s3api_object_handlers_put.go b/weed/s3api/s3api_object_handlers_put.go
index ebdfc8567..1d9fe9f92 100644
--- a/weed/s3api/s3api_object_handlers_put.go
+++ b/weed/s3api/s3api_object_handlers_put.go
@@ -349,7 +349,7 @@ func (s3a *S3ApiServer) updateLatestVersionInDirectory(bucket, object, versionId
versionsEntry, err := s3a.getEntry(bucketDir, versionsObjectPath)
if err != nil {
glog.Errorf("updateLatestVersionInDirectory: failed to get .versions entry: %v", err)
- return fmt.Errorf("failed to get .versions entry: %v", err)
+ return fmt.Errorf("failed to get .versions entry: %w", err)
}
// Add or update the latest version metadata
@@ -367,7 +367,7 @@ func (s3a *S3ApiServer) updateLatestVersionInDirectory(bucket, object, versionId
})
if err != nil {
glog.Errorf("updateLatestVersionInDirectory: failed to update .versions directory metadata: %v", err)
- return fmt.Errorf("failed to update .versions directory metadata: %v", err)
+ return fmt.Errorf("failed to update .versions directory metadata: %w", err)
}
return nil
diff --git a/weed/s3api/s3api_object_retention.go b/weed/s3api/s3api_object_retention.go
index 8ef80a885..88a5d1261 100644
--- a/weed/s3api/s3api_object_retention.go
+++ b/weed/s3api/s3api_object_retention.go
@@ -118,7 +118,7 @@ func parseXML[T any](request *http.Request, result *T) error {
decoder := xml.NewDecoder(request.Body)
if err := decoder.Decode(result); err != nil {
- return fmt.Errorf("error parsing XML: %v", err)
+ return fmt.Errorf("error parsing XML: %w", err)
}
return nil
@@ -249,7 +249,7 @@ func (s3a *S3ApiServer) getObjectEntry(bucket, object, versionId string) (*filer
// Check if versioning is enabled
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket)
if vErr != nil {
- return nil, fmt.Errorf("error checking versioning: %v", vErr)
+ return nil, fmt.Errorf("error checking versioning: %w", vErr)
}
if versioningEnabled {
@@ -316,7 +316,7 @@ func (s3a *S3ApiServer) setObjectRetention(bucket, object, versionId string, ret
// Check if versioning is enabled
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket)
if vErr != nil {
- return fmt.Errorf("error checking versioning: %v", vErr)
+ return fmt.Errorf("error checking versioning: %w", vErr)
}
if versioningEnabled {
@@ -427,7 +427,7 @@ func (s3a *S3ApiServer) setObjectLegalHold(bucket, object, versionId string, leg
// Check if versioning is enabled
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket)
if vErr != nil {
- return fmt.Errorf("error checking versioning: %v", vErr)
+ return fmt.Errorf("error checking versioning: %w", vErr)
}
if versioningEnabled {
@@ -601,7 +601,7 @@ func (s3a *S3ApiServer) isObjectLockAvailable(bucket string) error {
if errors.Is(err, filer_pb.ErrNotFound) {
return ErrBucketNotFound
}
- return fmt.Errorf("error checking versioning status: %v", err)
+ return fmt.Errorf("error checking versioning status: %w", err)
}
if !versioningEnabled {
diff --git a/weed/s3api/s3api_object_versioning.go b/weed/s3api/s3api_object_versioning.go
index 505605aa4..cfb3d597c 100644
--- a/weed/s3api/s3api_object_versioning.go
+++ b/weed/s3api/s3api_object_versioning.go
@@ -104,14 +104,14 @@ func (s3a *S3ApiServer) createDeleteMarker(bucket, object string) (string, error
entry.Extended[s3_constants.ExtDeleteMarkerKey] = []byte("true")
})
if err != nil {
- return "", fmt.Errorf("failed to create delete marker in .versions directory: %v", err)
+ return "", fmt.Errorf("failed to create delete marker in .versions directory: %w", err)
}
// Update the .versions directory metadata to indicate this delete marker is the latest version
err = s3a.updateLatestVersionInDirectory(bucket, cleanObject, versionId, versionFileName)
if err != nil {
glog.Errorf("createDeleteMarker: failed to update latest version in directory: %v", err)
- return "", fmt.Errorf("failed to update latest version in directory: %v", err)
+ return "", fmt.Errorf("failed to update latest version in directory: %w", err)
}
glog.V(2).Infof("createDeleteMarker: successfully created delete marker %s for %s/%s", versionId, bucket, object)
@@ -455,7 +455,7 @@ func (s3a *S3ApiServer) getLatestObjectVersion(bucket, object string) (*filer_pb
// Get the .versions directory entry to read latest version metadata
versionsEntry, err := s3a.getEntry(bucketDir, versionsObjectPath)
if err != nil {
- return nil, fmt.Errorf("failed to get .versions directory: %v", err)
+ return nil, fmt.Errorf("failed to get .versions directory: %w", err)
}
// Check if directory has latest version metadata
diff --git a/weed/server/common.go b/weed/server/common.go
index a5ebd45c8..cf65bd29d 100644
--- a/weed/server/common.go
+++ b/weed/server/common.go
@@ -310,13 +310,13 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
glog.Errorf("ProcessRangeRequest: %v", err)
w.Header().Del("Content-Length")
http.Error(w, err.Error(), http.StatusInternalServerError)
- return fmt.Errorf("ProcessRangeRequest: %v", err)
+ return fmt.Errorf("ProcessRangeRequest: %w", err)
}
if err = writeFn(bufferedWriter); err != nil {
glog.Errorf("ProcessRangeRequest: %v", err)
w.Header().Del("Content-Length")
http.Error(w, err.Error(), http.StatusInternalServerError)
- return fmt.Errorf("ProcessRangeRequest: %v", err)
+ return fmt.Errorf("ProcessRangeRequest: %w", err)
}
return nil
}
@@ -327,7 +327,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
if err != nil {
glog.Errorf("ProcessRangeRequest headers: %+v err: %v", w.Header(), err)
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
- return fmt.Errorf("ProcessRangeRequest header: %v", err)
+ return fmt.Errorf("ProcessRangeRequest header: %w", err)
}
if sumRangesSize(ranges) > totalSize {
// The total number of bytes in all the ranges
@@ -360,7 +360,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
glog.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err)
w.Header().Del("Content-Length")
http.Error(w, err.Error(), http.StatusInternalServerError)
- return fmt.Errorf("ProcessRangeRequest: %v", err)
+ return fmt.Errorf("ProcessRangeRequest: %w", err)
}
w.WriteHeader(http.StatusPartialContent)
err = writeFn(bufferedWriter)
@@ -368,7 +368,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
glog.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err)
w.Header().Del("Content-Length")
http.Error(w, err.Error(), http.StatusInternalServerError)
- return fmt.Errorf("ProcessRangeRequest range[0]: %v", err)
+ return fmt.Errorf("ProcessRangeRequest range[0]: %w", err)
}
return nil
}
@@ -379,7 +379,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
for i, ra := range ranges {
if ra.start > totalSize {
http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
- return fmt.Errorf("out of range: %v", err)
+ return fmt.Errorf("out of range: %w", err)
}
writeFn, err := prepareWriteFn(ra.start, ra.length)
if err != nil {
@@ -422,7 +422,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
if _, err := io.CopyN(bufferedWriter, sendContent, sendSize); err != nil {
glog.Errorf("ProcessRangeRequest err: %v", err)
http.Error(w, "Internal Error", http.StatusInternalServerError)
- return fmt.Errorf("ProcessRangeRequest err: %v", err)
+ return fmt.Errorf("ProcessRangeRequest err: %w", err)
}
return nil
}
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index f186ed676..a18c55bb1 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -207,7 +207,7 @@ func (fs *FilerServer) cleanupChunks(ctx context.Context, fullpath string, exist
if existingEntry != nil {
garbage, err = filer.MinusChunks(ctx, fs.lookupFileId, existingEntry.GetChunks(), newEntry.GetChunks())
if err != nil {
- return newEntry.GetChunks(), nil, fmt.Errorf("MinusChunks: %v", err)
+ return newEntry.GetChunks(), nil, fmt.Errorf("MinusChunks: %w", err)
}
}
diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go
index dfe594b46..00c2e0ff3 100644
--- a/weed/server/filer_grpc_server_sub_meta.go
+++ b/weed/server/filer_grpc_server_sub_meta.go
@@ -59,7 +59,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest,
processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn)
if readPersistedLogErr != nil {
- return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr)
+ return fmt.Errorf("reading from persisted logs: %w", readPersistedLogErr)
}
if isDone {
return nil
@@ -73,7 +73,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest,
position := log_buffer.NewMessagePosition(nextDayTs, -2)
found, err := fs.filer.HasPersistedLogFiles(position)
if err != nil {
- return fmt.Errorf("checking persisted log files: %v", err)
+ return fmt.Errorf("checking persisted log files: %w", err)
}
if found {
lastReadTime = position
@@ -157,7 +157,7 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn)
if readPersistedLogErr != nil {
glog.V(0).Infof("read on disk %v local subscribe %s from %+v: %v", clientName, req.PathPrefix, lastReadTime, readPersistedLogErr)
- return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr)
+ return fmt.Errorf("reading from persisted logs: %w", readPersistedLogErr)
}
if isDone {
return nil
@@ -219,7 +219,7 @@ func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotificati
event := &filer_pb.SubscribeMetadataResponse{}
if err := proto.Unmarshal(logEntry.Data, event); err != nil {
glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
- return false, fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
+ return false, fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %w", err)
}
if err := eachEventNotificationFn(event.Directory, event.EventNotification, event.TsNs); err != nil {
diff --git a/weed/server/filer_grpc_server_traverse_meta.go b/weed/server/filer_grpc_server_traverse_meta.go
index 4a924f065..841e7b88b 100644
--- a/weed/server/filer_grpc_server_traverse_meta.go
+++ b/weed/server/filer_grpc_server_traverse_meta.go
@@ -40,7 +40,7 @@ func (fs *FilerServer) TraverseBfsMetadata(req *filer_pb.TraverseBfsMetadataRequ
Directory: parent,
Entry: item.ToProtoEntry(),
}); err != nil {
- return fmt.Errorf("send traverse bfs metadata response: %v", err)
+ return fmt.Errorf("send traverse bfs metadata response: %w", err)
}
if !item.IsDirectory() {
diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go
index cad0d4fb7..2a3fb6b68 100644
--- a/weed/server/filer_server_handlers_write_cipher.go
+++ b/weed/server/filer_server_handlers_write_cipher.go
@@ -56,12 +56,12 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
uploader, uploaderErr := operation.NewUploader()
if uploaderErr != nil {
- return nil, fmt.Errorf("uploader initialization error: %v", uploaderErr)
+ return nil, fmt.Errorf("uploader initialization error: %w", uploaderErr)
}
uploadResult, uploadError := uploader.UploadData(ctx, uncompressedData, uploadOption)
if uploadError != nil {
- return nil, fmt.Errorf("upload to volume server: %v", uploadError)
+ return nil, fmt.Errorf("upload to volume server: %w", uploadError)
}
// Save to chunk manifest structure
diff --git a/weed/server/raft_hashicorp.go b/weed/server/raft_hashicorp.go
index 0c6a72d6f..348b243c2 100644
--- a/weed/server/raft_hashicorp.go
+++ b/weed/server/raft_hashicorp.go
@@ -139,7 +139,7 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
}
if err := raft.ValidateConfig(c); err != nil {
- return nil, fmt.Errorf(`raft.ValidateConfig: %v`, err)
+ return nil, fmt.Errorf("raft.ValidateConfig: %w", err)
}
if option.RaftBootstrap {
@@ -154,17 +154,17 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
ldb, err := boltdb.NewBoltStore(filepath.Join(baseDir, ldbFile))
if err != nil {
- return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "logs.dat"), err)
+ return nil, fmt.Errorf("boltdb.NewBoltStore(%q): %v", filepath.Join(baseDir, "logs.dat"), err)
}
sdb, err := boltdb.NewBoltStore(filepath.Join(baseDir, sdbFile))
if err != nil {
- return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "stable.dat"), err)
+ return nil, fmt.Errorf("boltdb.NewBoltStore(%q): %v", filepath.Join(baseDir, "stable.dat"), err)
}
fss, err := raft.NewFileSnapshotStore(baseDir, 3, os.Stderr)
if err != nil {
- return nil, fmt.Errorf(`raft.NewFileSnapshotStore(%q, ...): %v`, baseDir, err)
+ return nil, fmt.Errorf("raft.NewFileSnapshotStore(%q, ...): %v", baseDir, err)
}
s.TransportManager = transport.New(raft.ServerAddress(s.serverAddr), []grpc.DialOption{option.GrpcDialOption})
@@ -172,7 +172,7 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
stateMachine := StateMachine{topo: option.Topo}
s.RaftHashicorp, err = raft.NewRaft(c, &stateMachine, ldb, sdb, fss, s.TransportManager.Transport())
if err != nil {
- return nil, fmt.Errorf("raft.NewRaft: %v", err)
+ return nil, fmt.Errorf("raft.NewRaft: %w", err)
}
updatePeers := false
@@ -185,7 +185,7 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
time.Sleep(timeSleep)
f := s.RaftHashicorp.BootstrapCluster(cfg)
if err := f.Error(); err != nil {
- return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err)
+ return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %w", err)
}
} else {
updatePeers = true
@@ -214,12 +214,12 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
if sink, err := prometheus.NewPrometheusSinkFrom(prometheus.PrometheusOpts{
Registerer: stats.Gather,
}); err != nil {
- return nil, fmt.Errorf("NewPrometheusSink: %v", err)
+ return nil, fmt.Errorf("NewPrometheusSink: %w", err)
} else {
metricsConf := metrics.DefaultConfig(stats.Namespace)
metricsConf.EnableRuntimeMetrics = false
if _, err = metrics.NewGlobal(metricsConf, sink); err != nil {
- return nil, fmt.Errorf("metrics.NewGlobal: %v", err)
+ return nil, fmt.Errorf("metrics.NewGlobal: %w", err)
}
}
diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go
index 4fd951927..0e733fc0a 100644
--- a/weed/server/volume_grpc_copy.go
+++ b/weed/server/volume_grpc_copy.go
@@ -56,7 +56,7 @@ func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stre
VolumeId: req.VolumeId,
})
if nil != err {
- return fmt.Errorf("read volume file status failed, %v", err)
+ return fmt.Errorf("read volume file status failed, %w", err)
}
diskType := volFileInfoResp.DiskType
@@ -247,7 +247,7 @@ func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse
stat, err = os.Stat(datFileName)
if err != nil {
- return fmt.Errorf("get dat file info failed, %v", err)
+ return fmt.Errorf("get dat file info failed, %w", err)
}
if originFileInf.DatFileSize != uint64(stat.Size()) {
return fmt.Errorf("the dat file size [%v] is not same as origin file size [%v]",
diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go
index 642e8cce3..23cc29e0d 100644
--- a/weed/server/volume_grpc_erasure_coding.go
+++ b/weed/server/volume_grpc_erasure_coding.go
@@ -408,7 +408,7 @@ func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_serv
_, size, _, err := localEcVolume.LocateEcShardNeedle(types.NeedleId(req.FileKey), needle.Version(req.Version))
if err != nil {
- return nil, fmt.Errorf("locate in local ec volume: %v", err)
+ return nil, fmt.Errorf("locate in local ec volume: %w", err)
}
if size.IsDeleted() {
return resp, nil
diff --git a/weed/server/volume_grpc_remote.go b/weed/server/volume_grpc_remote.go
index 0b5fa1cfc..669858b6a 100644
--- a/weed/server/volume_grpc_remote.go
+++ b/weed/server/volume_grpc_remote.go
@@ -24,7 +24,7 @@ func (vs *VolumeServer) FetchAndWriteNeedle(ctx context.Context, req *volume_ser
client, getClientErr := remote_storage.GetRemoteStorage(remoteConf)
if getClientErr != nil {
- return nil, fmt.Errorf("get remote client: %v", getClientErr)
+ return nil, fmt.Errorf("get remote client: %w", getClientErr)
}
remoteStorageLocation := req.RemoteLocation
diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go
index 935635a83..cf3998e26 100644
--- a/weed/server/volume_grpc_tail.go
+++ b/weed/server/volume_grpc_tail.go
@@ -31,7 +31,7 @@ func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderR
lastProcessedTimestampNs, err := sendNeedlesSince(stream, v, lastTimestampNs)
if err != nil {
glog.Infof("sendNeedlesSince: %v", err)
- return fmt.Errorf("streamFollow: %v", err)
+ return fmt.Errorf("streamFollow: %w", err)
}
time.Sleep(2 * time.Second)
diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go
index 6a54fcfad..dc7f64f6c 100644
--- a/weed/server/volume_server_handlers_write.go
+++ b/weed/server/volume_server_handlers_write.go
@@ -144,7 +144,7 @@ func writeDeleteResult(err error, count int64, w http.ResponseWriter, r *http.Re
m["size"] = count
writeJsonQuiet(w, r, http.StatusAccepted, m)
} else {
- writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Deletion Failed: %v", err))
+ writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Deletion Failed: %w", err))
}
}
diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go
index 5ab264950..aa501b408 100644
--- a/weed/server/webdav_server.go
+++ b/weed/server/webdav_server.go
@@ -407,7 +407,7 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64
uploader, uploaderErr := operation.NewUploader()
if uploaderErr != nil {
glog.V(0).Infof("upload data %v: %v", f.name, uploaderErr)
- return nil, fmt.Errorf("upload data: %v", uploaderErr)
+ return nil, fmt.Errorf("upload data: %w", uploaderErr)
}
fileId, uploadResult, flushErr, _ := uploader.UploadWithRetry(
@@ -434,7 +434,7 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64
if flushErr != nil {
glog.V(0).Infof("upload data %v: %v", f.name, flushErr)
- return nil, fmt.Errorf("upload data: %v", flushErr)
+ return nil, fmt.Errorf("upload data: %w", flushErr)
}
if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v: %v", f.name, flushErr)
diff --git a/weed/sftpd/sftp_file_reader.go b/weed/sftpd/sftp_file_reader.go
index 5588d26d0..791f2d581 100644
--- a/weed/sftpd/sftp_file_reader.go
+++ b/weed/sftpd/sftp_file_reader.go
@@ -63,13 +63,13 @@ func (ra *SeaweedFileReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
}
if _, err := ra.reader.Seek(bufferKey, io.SeekStart); err != nil {
- return 0, fmt.Errorf("seek error: %v", err)
+ return 0, fmt.Errorf("seek error: %w", err)
}
buffer = make([]byte, readSize)
readBytes, err := io.ReadFull(ra.reader, buffer)
if err != nil && err != io.ErrUnexpectedEOF {
- return 0, fmt.Errorf("read error: %v", err)
+ return 0, fmt.Errorf("read error: %w", err)
}
buffer = buffer[:readBytes]
ra.cache.Put(bufferKey, buffer)
diff --git a/weed/sftpd/sftp_filer.go b/weed/sftpd/sftp_filer.go
index a5f8b8153..9baaf41d7 100644
--- a/weed/sftpd/sftp_filer.go
+++ b/weed/sftpd/sftp_filer.go
@@ -139,7 +139,7 @@ func (fs *SftpServer) newFileWriter(r *sftp.Request) (io.WriterAt, error) {
// Create a temporary file to buffer writes
tmpFile, err := os.CreateTemp("", "sftp-upload-*")
if err != nil {
- return nil, fmt.Errorf("failed to create temp file: %v", err)
+ return nil, fmt.Errorf("failed to create temp file: %w", err)
}
return &SeaweedSftpFileWriter{
@@ -303,19 +303,19 @@ func (fs *SftpServer) putFile(filepath string, reader io.Reader, user *user.User
// We can skip ContentLength if unknown (chunked transfer encoding)
req, err := http.NewRequest(http.MethodPut, uploadUrl, body)
if err != nil {
- return fmt.Errorf("create request: %v", err)
+ return fmt.Errorf("create request: %w", err)
}
req.Header.Set("Content-Type", "application/octet-stream")
resp, err := http.DefaultClient.Do(req)
if err != nil {
- return fmt.Errorf("upload to filer: %v", err)
+ return fmt.Errorf("upload to filer: %w", err)
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
- return fmt.Errorf("read response: %v", err)
+ return fmt.Errorf("read response: %w", err)
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
@@ -324,7 +324,7 @@ func (fs *SftpServer) putFile(filepath string, reader io.Reader, user *user.User
var result weed_server.FilerPostResult
if err := json.Unmarshal(respBody, &result); err != nil {
- return fmt.Errorf("parse response: %v", err)
+ return fmt.Errorf("parse response: %w", err)
}
if result.Error != "" {
return fmt.Errorf("filer error: %s", result.Error)
@@ -338,7 +338,7 @@ func (fs *SftpServer) putFile(filepath string, reader io.Reader, user *user.User
Name: filename,
})
if err != nil {
- return fmt.Errorf("lookup file for attribute update: %v", err)
+ return fmt.Errorf("lookup file for attribute update: %w", err)
}
if lookupResp.Entry == nil {
diff --git a/weed/sftpd/sftp_server.go b/weed/sftpd/sftp_server.go
index 1399b64c7..f158aeb64 100644
--- a/weed/sftpd/sftp_server.go
+++ b/weed/sftpd/sftp_server.go
@@ -98,7 +98,7 @@ func (fs *SftpServer) EnsureHomeDirectory() error {
})
if err != nil {
- return fmt.Errorf("failed to create home directory: %v", err)
+ return fmt.Errorf("failed to create home directory: %w", err)
}
glog.V(0).Infof("Successfully created home directory for user %s: %s", fs.user.Username, fs.user.HomeDir)
diff --git a/weed/sftpd/sftp_service.go b/weed/sftpd/sftp_service.go
index cd19b3f14..e50bd87ba 100644
--- a/weed/sftpd/sftp_service.go
+++ b/weed/sftpd/sftp_service.go
@@ -71,7 +71,7 @@ func (s *SFTPService) Serve(listener net.Listener) error {
// Build SSH server config
sshConfig, err := s.buildSSHConfig()
if err != nil {
- return fmt.Errorf("failed to create SSH config: %v", err)
+ return fmt.Errorf("failed to create SSH config: %w", err)
}
glog.V(0).Infof("Starting Seaweed SFTP service on %s", listener.Addr().String())
@@ -79,7 +79,7 @@ func (s *SFTPService) Serve(listener net.Listener) error {
for {
conn, err := listener.Accept()
if err != nil {
- return fmt.Errorf("failed to accept incoming connection: %v", err)
+ return fmt.Errorf("failed to accept incoming connection: %w", err)
}
go s.handleSSHConnection(conn, sshConfig)
}
@@ -110,7 +110,7 @@ func (s *SFTPService) buildSSHConfig() (*ssh.ServerConfig, error) {
if s.options.HostKeysFolder != "" {
files, err := os.ReadDir(s.options.HostKeysFolder)
if err != nil {
- return nil, fmt.Errorf("failed to read host keys folder: %v", err)
+ return nil, fmt.Errorf("failed to read host keys folder: %w", err)
}
for _, file := range files {
if file.IsDir() {
diff --git a/weed/sftpd/user/filestore.go b/weed/sftpd/user/filestore.go
index 615b2e286..c522a388a 100644
--- a/weed/sftpd/user/filestore.go
+++ b/weed/sftpd/user/filestore.go
@@ -61,7 +61,7 @@ func NewFileStore(filePath string) (*FileStore, error) {
if _, err := os.Stat(filePath); os.IsNotExist(err) {
// Create an empty users array
if err := os.WriteFile(filePath, []byte("[]"), 0600); err != nil {
- return nil, fmt.Errorf("failed to create user store file: %v", err)
+ return nil, fmt.Errorf("failed to create user store file: %w", err)
}
}
@@ -79,12 +79,12 @@ func (s *FileStore) loadUsers() error {
data, err := os.ReadFile(s.filePath)
if err != nil {
- return fmt.Errorf("failed to read user store file: %v", err)
+ return fmt.Errorf("failed to read user store file: %w", err)
}
var users []*User
if err := json.Unmarshal(data, &users); err != nil {
- return fmt.Errorf("failed to parse user store file: %v", err)
+ return fmt.Errorf("failed to parse user store file: %w", err)
}
// Clear existing users and add the loaded ones
@@ -119,11 +119,11 @@ func (s *FileStore) saveUsers() error {
data, err := json.MarshalIndent(users, "", " ")
if err != nil {
- return fmt.Errorf("failed to serialize users: %v", err)
+ return fmt.Errorf("failed to serialize users: %w", err)
}
if err := os.WriteFile(s.filePath, data, 0600); err != nil {
- return fmt.Errorf("failed to write user store file: %v", err)
+ return fmt.Errorf("failed to write user store file: %w", err)
}
return nil
diff --git a/weed/shell/command_cluster_raft_add.go b/weed/shell/command_cluster_raft_add.go
index 6089631b1..eb75f8b15 100644
--- a/weed/shell/command_cluster_raft_add.go
+++ b/weed/shell/command_cluster_raft_add.go
@@ -52,7 +52,7 @@ func (c *commandRaftServerAdd) Do(args []string, commandEnv *CommandEnv, writer
Voter: *serverVoter,
})
if err != nil {
- return fmt.Errorf("raft add server: %v", err)
+ return fmt.Errorf("raft add server: %w", err)
}
println("added server", *serverId)
return nil
diff --git a/weed/shell/command_cluster_raft_ps.go b/weed/shell/command_cluster_raft_ps.go
index c8324f635..73bfde8da 100644
--- a/weed/shell/command_cluster_raft_ps.go
+++ b/weed/shell/command_cluster_raft_ps.go
@@ -40,7 +40,7 @@ func (c *commandRaftClusterPs) Do(args []string, commandEnv *CommandEnv, writer
err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
resp, err := client.RaftListClusterServers(context.Background(), &master_pb.RaftListClusterServersRequest{})
if err != nil {
- return fmt.Errorf("raft list cluster: %v", err)
+ return fmt.Errorf("raft list cluster: %w", err)
}
fmt.Fprintf(writer, "the raft cluster has %d servers\n", len(resp.ClusterServers))
for _, server := range resp.ClusterServers {
diff --git a/weed/shell/command_cluster_raft_remove.go b/weed/shell/command_cluster_raft_remove.go
index 109125890..c9ada8bcd 100644
--- a/weed/shell/command_cluster_raft_remove.go
+++ b/weed/shell/command_cluster_raft_remove.go
@@ -49,7 +49,7 @@ func (c *commandRaftServerRemove) Do(args []string, commandEnv *CommandEnv, writ
Force: true,
})
if err != nil {
- return fmt.Errorf("raft remove server: %v", err)
+ return fmt.Errorf("raft remove server: %w", err)
}
println("removed server", *serverId)
return nil
diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go
index a6f27232e..0f8430cab 100644
--- a/weed/shell/command_ec_common.go
+++ b/weed/shell/command_ec_common.go
@@ -1049,7 +1049,7 @@ func EcBalance(commandEnv *CommandEnv, collections []string, dc string, ecReplic
}
if err := ecb.balanceEcRacks(); err != nil {
- return fmt.Errorf("balance ec racks: %v", err)
+ return fmt.Errorf("balance ec racks: %w", err)
}
return nil
diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go
index 499196e8a..da96492ae 100644
--- a/weed/shell/command_ec_encode.go
+++ b/weed/shell/command_ec_encode.go
@@ -121,21 +121,21 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
fmt.Printf("Collecting volume locations for %d volumes before EC encoding...\n", len(volumeIds))
volumeLocationsMap, err := volumeLocations(commandEnv, volumeIds)
if err != nil {
- return fmt.Errorf("failed to collect volume locations before EC encoding: %v", err)
+ return fmt.Errorf("failed to collect volume locations before EC encoding: %w", err)
}
// encode all requested volumes...
if err = doEcEncode(commandEnv, *collection, volumeIds, *maxParallelization); err != nil {
- return fmt.Errorf("ec encode for volumes %v: %v", volumeIds, err)
+ return fmt.Errorf("ec encode for volumes %v: %w", volumeIds, err)
}
// ...re-balance ec shards...
if err := EcBalance(commandEnv, balanceCollections, "", rp, *maxParallelization, *applyBalancing); err != nil {
- return fmt.Errorf("re-balance ec shards for collection(s) %v: %v", balanceCollections, err)
+ return fmt.Errorf("re-balance ec shards for collection(s) %v: %w", balanceCollections, err)
}
// ...then delete original volumes using pre-collected locations.
fmt.Printf("Deleting original volumes after EC encoding...\n")
if err := doDeleteVolumesWithLocations(commandEnv, volumeIds, volumeLocationsMap, *maxParallelization); err != nil {
- return fmt.Errorf("delete original volumes after EC encoding: %v", err)
+ return fmt.Errorf("delete original volumes after EC encoding: %w", err)
}
fmt.Printf("Successfully completed EC encoding for %d volumes\n", len(volumeIds))
@@ -161,7 +161,7 @@ func doEcEncode(commandEnv *CommandEnv, collection string, volumeIds []needle.Vo
}
locations, err := volumeLocations(commandEnv, volumeIds)
if err != nil {
- return fmt.Errorf("failed to get volume locations for EC encoding: %v", err)
+ return fmt.Errorf("failed to get volume locations for EC encoding: %w", err)
}
// mark volumes as readonly
diff --git a/weed/shell/command_mq_topic_list.go b/weed/shell/command_mq_topic_list.go
index 8da86f4a6..d8d7a08cb 100644
--- a/weed/shell/command_mq_topic_list.go
+++ b/weed/shell/command_mq_topic_list.go
@@ -59,7 +59,7 @@ func findBrokerBalancer(commandEnv *CommandEnv) (brokerBalancer string, err erro
Name: pub_balancer.LockBrokerBalancer,
})
if err != nil {
- return fmt.Errorf("FindLockOwner: %v", err)
+ return fmt.Errorf("FindLockOwner: %w", err)
}
brokerBalancer = resp.Owner
return nil
diff --git a/weed/shell/command_remote_cache.go b/weed/shell/command_remote_cache.go
index 23a1eccad..8b165b392 100644
--- a/weed/shell/command_remote_cache.go
+++ b/weed/shell/command_remote_cache.go
@@ -104,7 +104,7 @@ func (c *commandRemoteCache) doComprehensiveSync(commandEnv *CommandEnv, writer
return nil
})
if err != nil {
- return fmt.Errorf("failed to traverse remote storage: %v", err)
+ return fmt.Errorf("failed to traverse remote storage: %w", err)
}
fmt.Fprintf(writer, "Found %d files/directories in remote storage\n", len(remoteFiles))
@@ -120,7 +120,7 @@ func (c *commandRemoteCache) doComprehensiveSync(commandEnv *CommandEnv, writer
return true
})
if err != nil {
- return fmt.Errorf("failed to traverse local directory: %v", err)
+ return fmt.Errorf("failed to traverse local directory: %w", err)
}
fmt.Fprintf(writer, "Found %d files/directories in local storage\n", len(localFiles))
} else {
diff --git a/weed/shell/command_remote_meta_sync.go b/weed/shell/command_remote_meta_sync.go
index 4d430bc76..d42aaef95 100644
--- a/weed/shell/command_remote_meta_sync.go
+++ b/weed/shell/command_remote_meta_sync.go
@@ -66,7 +66,7 @@ func (c *commandRemoteMetaSync) Do(args []string, commandEnv *CommandEnv, writer
// pull metadata from remote
if err = pullMetadata(commandEnv, writer, util.FullPath(localMountedDir), remoteStorageMountedLocation, util.FullPath(*dir), remoteStorageConf); err != nil {
- return fmt.Errorf("cache meta data: %v", err)
+ return fmt.Errorf("cache meta data: %w", err)
}
return nil
diff --git a/weed/shell/command_remote_mount.go b/weed/shell/command_remote_mount.go
index 2892852e9..59ab75b4d 100644
--- a/weed/shell/command_remote_mount.go
+++ b/weed/shell/command_remote_mount.go
@@ -78,12 +78,12 @@ func (c *commandRemoteMount) Do(args []string, commandEnv *CommandEnv, writer io
// sync metadata from remote
if err = syncMetadata(commandEnv, writer, *dir, *nonEmpty, remoteConf, remoteStorageLocation); err != nil {
- return fmt.Errorf("pull metadata: %v", err)
+ return fmt.Errorf("pull metadata: %w", err)
}
// store a mount configuration in filer
if err = filer.InsertMountMapping(commandEnv, *dir, remoteStorageLocation); err != nil {
- return fmt.Errorf("save mount mapping: %v", err)
+ return fmt.Errorf("save mount mapping: %w", err)
}
return nil
@@ -161,7 +161,7 @@ func syncMetadata(commandEnv *CommandEnv, writer io.Writer, dir string, nonEmpty
// pull metadata from remote
if err = pullMetadata(commandEnv, writer, util.FullPath(dir), remote, util.FullPath(dir), remoteConf); err != nil {
- return fmt.Errorf("cache metadata: %v", err)
+ return fmt.Errorf("cache metadata: %w", err)
}
return nil
diff --git a/weed/shell/command_remote_mount_buckets.go b/weed/shell/command_remote_mount_buckets.go
index d8df09e60..6176aa3d7 100644
--- a/weed/shell/command_remote_mount_buckets.go
+++ b/weed/shell/command_remote_mount_buckets.go
@@ -79,7 +79,7 @@ func (c *commandRemoteMountBuckets) Do(args []string, commandEnv *CommandEnv, wr
fillerBucketsPath, err := readFilerBucketsPath(commandEnv)
if err != nil {
- return fmt.Errorf("read filer buckets path: %v", err)
+ return fmt.Errorf("read filer buckets path: %w", err)
}
hasSuffixPattern, _ := regexp.Compile(".+-[0-9][0-9][0-9][0-9]")
diff --git a/weed/shell/command_remote_uncache.go b/weed/shell/command_remote_uncache.go
index 86d992ef1..f8763485c 100644
--- a/weed/shell/command_remote_uncache.go
+++ b/weed/shell/command_remote_uncache.go
@@ -75,7 +75,7 @@ func (c *commandRemoteUncache) Do(args []string, commandEnv *CommandEnv, writer
// pull content from remote
if err = c.uncacheContentData(commandEnv, writer, util.FullPath(*dir), fileFiler); err != nil {
- return fmt.Errorf("uncache content data: %v", err)
+ return fmt.Errorf("uncache content data: %w", err)
}
return nil
}
diff --git a/weed/shell/command_remote_unmount.go b/weed/shell/command_remote_unmount.go
index 87487481a..2e1ba393c 100644
--- a/weed/shell/command_remote_unmount.go
+++ b/weed/shell/command_remote_unmount.go
@@ -67,13 +67,13 @@ func (c *commandRemoteUnmount) Do(args []string, commandEnv *CommandEnv, writer
// store a mount configuration in filer
fmt.Fprintf(writer, "deleting mount for %s ...\n", *dir)
if err = filer.DeleteMountMapping(commandEnv, *dir); err != nil {
- return fmt.Errorf("delete mount mapping: %v", err)
+ return fmt.Errorf("delete mount mapping: %w", err)
}
// purge mounted data
fmt.Fprintf(writer, "purge %s ...\n", *dir)
if err = c.purgeMountedData(commandEnv, *dir); err != nil {
- return fmt.Errorf("purge mounted data: %v", err)
+ return fmt.Errorf("purge mounted data: %w", err)
}
// reset remote sync offset in case the folder is mounted again
diff --git a/weed/shell/command_s3_bucket_create.go b/weed/shell/command_s3_bucket_create.go
index be17591e4..becbd96e7 100644
--- a/weed/shell/command_s3_bucket_create.go
+++ b/weed/shell/command_s3_bucket_create.go
@@ -55,7 +55,7 @@ func (c *commandS3BucketCreate) Do(args []string, commandEnv *CommandEnv, writer
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
- return fmt.Errorf("get filer configuration: %v", err)
+ return fmt.Errorf("get filer configuration: %w", err)
}
filerBucketsPath := resp.DirBuckets
diff --git a/weed/shell/command_s3_bucket_delete.go b/weed/shell/command_s3_bucket_delete.go
index ecd83d1c2..0227151fe 100644
--- a/weed/shell/command_s3_bucket_delete.go
+++ b/weed/shell/command_s3_bucket_delete.go
@@ -52,7 +52,7 @@ func (c *commandS3BucketDelete) Do(args []string, commandEnv *CommandEnv, writer
var filerBucketsPath string
filerBucketsPath, err = readFilerBucketsPath(commandEnv)
if err != nil {
- return fmt.Errorf("read buckets: %v", err)
+ return fmt.Errorf("read buckets: %w", err)
}
// delete the collection directly first
diff --git a/weed/shell/command_s3_bucket_list.go b/weed/shell/command_s3_bucket_list.go
index e6fa44303..031b22d2d 100644
--- a/weed/shell/command_s3_bucket_list.go
+++ b/weed/shell/command_s3_bucket_list.go
@@ -54,7 +54,7 @@ func (c *commandS3BucketList) Do(args []string, commandEnv *CommandEnv, writer i
var filerBucketsPath string
filerBucketsPath, err = readFilerBucketsPath(commandEnv)
if err != nil {
- return fmt.Errorf("read buckets: %v", err)
+ return fmt.Errorf("read buckets: %w", err)
}
err = filer_pb.List(context.Background(), commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error {
@@ -75,7 +75,7 @@ func (c *commandS3BucketList) Do(args []string, commandEnv *CommandEnv, writer i
return nil
}, "", false, math.MaxUint32)
if err != nil {
- return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err)
+ return fmt.Errorf("list buckets under %v: %w", filerBucketsPath, err)
}
return err
@@ -87,7 +87,7 @@ func readFilerBucketsPath(filerClient filer_pb.FilerClient) (filerBucketsPath st
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
- return fmt.Errorf("get filer configuration: %v", err)
+ return fmt.Errorf("get filer configuration: %w", err)
}
filerBucketsPath = resp.DirBuckets
diff --git a/weed/shell/command_s3_bucket_quota.go b/weed/shell/command_s3_bucket_quota.go
index f6562453f..05910c099 100644
--- a/weed/shell/command_s3_bucket_quota.go
+++ b/weed/shell/command_s3_bucket_quota.go
@@ -52,7 +52,7 @@ func (c *commandS3BucketQuota) Do(args []string, commandEnv *CommandEnv, writer
resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
- return fmt.Errorf("get filer configuration: %v", err)
+ return fmt.Errorf("get filer configuration: %w", err)
}
filerBucketsPath := resp.DirBuckets
diff --git a/weed/shell/command_s3_bucket_quota_check.go b/weed/shell/command_s3_bucket_quota_check.go
index 27b903405..bb54b73a4 100644
--- a/weed/shell/command_s3_bucket_quota_check.go
+++ b/weed/shell/command_s3_bucket_quota_check.go
@@ -55,7 +55,7 @@ func (c *commandS3BucketQuotaEnforce) Do(args []string, commandEnv *CommandEnv,
var filerBucketsPath string
filerBucketsPath, err = readFilerBucketsPath(commandEnv)
if err != nil {
- return fmt.Errorf("read buckets: %v", err)
+ return fmt.Errorf("read buckets: %w", err)
}
// read existing filer configuration
@@ -81,7 +81,7 @@ func (c *commandS3BucketQuotaEnforce) Do(args []string, commandEnv *CommandEnv,
return nil
}, "", false, math.MaxUint32)
if err != nil {
- return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err)
+ return fmt.Errorf("list buckets under %v: %w", filerBucketsPath, err)
}
// apply the configuration changes
diff --git a/weed/shell/command_s3_clean_uploads.go b/weed/shell/command_s3_clean_uploads.go
index c77f3cd74..b9d9bea5e 100644
--- a/weed/shell/command_s3_clean_uploads.go
+++ b/weed/shell/command_s3_clean_uploads.go
@@ -51,7 +51,7 @@ func (c *commandS3CleanUploads) Do(args []string, commandEnv *CommandEnv, writer
var filerBucketsPath string
filerBucketsPath, err = readFilerBucketsPath(commandEnv)
if err != nil {
- return fmt.Errorf("read buckets: %v", err)
+ return fmt.Errorf("read buckets: %w", err)
}
var buckets []string
@@ -60,7 +60,7 @@ func (c *commandS3CleanUploads) Do(args []string, commandEnv *CommandEnv, writer
return nil
}, "", false, math.MaxUint32)
if err != nil {
- return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err)
+ return fmt.Errorf("list buckets under %v: %w", filerBucketsPath, err)
}
for _, bucket := range buckets {
@@ -84,7 +84,7 @@ func (c *commandS3CleanUploads) cleanupUploads(commandEnv *CommandEnv, writer io
return nil
}, "", false, math.MaxUint32)
if err != nil {
- return fmt.Errorf("list uploads under %v: %v", uploadsDir, err)
+ return fmt.Errorf("list uploads under %v: %w", uploadsDir, err)
}
var encodedJwt security.EncodedJwt
diff --git a/weed/shell/command_volume_check_disk.go b/weed/shell/command_volume_check_disk.go
index 06154b92b..2f3ccfdc6 100644
--- a/weed/shell/command_volume_check_disk.go
+++ b/weed/shell/command_volume_check_disk.go
@@ -394,7 +394,7 @@ func writeToBuffer(client volume_server_pb.VolumeServer_CopyFileClient, buf *byt
break
}
if receiveErr != nil {
- return fmt.Errorf("receiving: %v", receiveErr)
+ return fmt.Errorf("receiving: %w", receiveErr)
}
buf.Write(resp.FileContent)
}
diff --git a/weed/shell/command_volume_configure_replication.go b/weed/shell/command_volume_configure_replication.go
index bc1ca5304..c54efd05c 100644
--- a/weed/shell/command_volume_configure_replication.go
+++ b/weed/shell/command_volume_configure_replication.go
@@ -60,7 +60,7 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
replicaPlacement, err := super_block.NewReplicaPlacementFromString(*replicationString)
if err != nil {
- return fmt.Errorf("replication format: %v", err)
+ return fmt.Errorf("replication format: %w", err)
}
// collect topology information
diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
index 8a76722d4..e8140d3aa 100644
--- a/weed/shell/command_volume_fsck.go
+++ b/weed/shell/command_volume_fsck.go
@@ -120,13 +120,13 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
c.bucketsPath, err = readFilerBucketsPath(commandEnv)
if err != nil {
- return fmt.Errorf("read filer buckets path: %v", err)
+ return fmt.Errorf("read filer buckets path: %w", err)
}
// create a temp folder
c.tempFolder, err = os.MkdirTemp(*tempPath, "sw_fsck")
if err != nil {
- return fmt.Errorf("failed to create temp folder: %v", err)
+ return fmt.Errorf("failed to create temp folder: %w", err)
}
if *c.verbose {
fmt.Fprintf(c.writer, "working directory: %s\n", c.tempFolder)
@@ -136,11 +136,11 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
// collect all volume id locations
dataNodeVolumeIdToVInfo, err := c.collectVolumeIds()
if err != nil {
- return fmt.Errorf("failed to collect all volume locations: %v", err)
+ return fmt.Errorf("failed to collect all volume locations: %w", err)
}
if err != nil {
- return fmt.Errorf("read filer buckets path: %v", err)
+ return fmt.Errorf("read filer buckets path: %w", err)
}
var collectCutoffFromAtNs int64 = 0
@@ -189,22 +189,22 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
// collect all filer file ids and paths
if err = c.collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo, *purgeAbsent, collectModifyFromAtNs, collectCutoffFromAtNs); err != nil {
- return fmt.Errorf("collectFilerFileIdAndPaths: %v", err)
+ return fmt.Errorf("collectFilerFileIdAndPaths: %w", err)
}
for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
// for each volume, check filer file ids
if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, dataNodeId, *applyPurging); err != nil {
- return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
+ return fmt.Errorf("findFilerChunksMissingInVolumeServers: %w", err)
}
}
} else {
// collect all filer file ids
if err = c.collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo, false, 0, 0); err != nil {
- return fmt.Errorf("failed to collect file ids from filer: %v", err)
+ return fmt.Errorf("failed to collect file ids from filer: %w", err)
}
// volume file ids subtract filer file ids
if err = c.findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo, *applyPurging, uint64(collectModifyFromAtNs), uint64(collectCutoffFromAtNs)); err != nil {
- return fmt.Errorf("findExtraChunksInVolumeServers: %v", err)
+ return fmt.Errorf("findExtraChunksInVolumeServers: %w", err)
}
}
diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go
index 072b1a0b5..e46480060 100644
--- a/weed/storage/disk_location_ec.go
+++ b/weed/storage/disk_location_ec.go
@@ -121,12 +121,12 @@ func (l *DiskLocation) loadEcShards(shards []string, collection string, vid need
for _, shard := range shards {
shardId, err := strconv.ParseInt(path.Ext(shard)[3:], 10, 64)
if err != nil {
- return fmt.Errorf("failed to parse ec shard name %v: %v", shard, err)
+ return fmt.Errorf("failed to parse ec shard name %v: %w", shard, err)
}
_, err = l.LoadEcShard(collection, vid, erasure_coding.ShardId(shardId))
if err != nil {
- return fmt.Errorf("failed to load ec shard %v: %v", shard, err)
+ return fmt.Errorf("failed to load ec shard %v: %w", shard, err)
}
}
diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go
index 6578f823d..5db65a2c8 100644
--- a/weed/storage/erasure_coding/ec_encoder.go
+++ b/weed/storage/erasure_coding/ec_encoder.go
@@ -31,12 +31,12 @@ func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) {
defer nm.Close()
}
if err != nil {
- return fmt.Errorf("readNeedleMap: %v", err)
+ return fmt.Errorf("readNeedleMap: %w", err)
}
ecxFile, err := os.OpenFile(baseFileName+ext, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
- return fmt.Errorf("failed to open ecx file: %v", err)
+ return fmt.Errorf("failed to open ecx file: %w", err)
}
defer ecxFile.Close()
@@ -47,7 +47,7 @@ func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) {
})
if err != nil {
- return fmt.Errorf("failed to visit idx file: %v", err)
+ return fmt.Errorf("failed to visit idx file: %w", err)
}
return nil
@@ -69,19 +69,19 @@ func ToExt(ecIndex int) string {
func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64) error {
file, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0)
if err != nil {
- return fmt.Errorf("failed to open dat file: %v", err)
+ return fmt.Errorf("failed to open dat file: %w", err)
}
defer file.Close()
fi, err := file.Stat()
if err != nil {
- return fmt.Errorf("failed to stat dat file: %v", err)
+ return fmt.Errorf("failed to stat dat file: %w", err)
}
glog.V(0).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size())
err = encodeDatFile(fi.Size(), baseFileName, bufferSize, largeBlockSize, file, smallBlockSize)
if err != nil {
- return fmt.Errorf("encodeDatFile: %v", err)
+ return fmt.Errorf("encodeDatFile: %w", err)
}
return nil
}
@@ -112,7 +112,7 @@ func generateMissingEcFiles(baseFileName string, bufferSize int, largeBlockSize
err = rebuildEcFiles(shardHasData, inputFiles, outputFiles)
if err != nil {
- return nil, fmt.Errorf("rebuildEcFiles: %v", err)
+ return nil, fmt.Errorf("rebuildEcFiles: %w", err)
}
return
}
@@ -201,7 +201,7 @@ func encodeDatFile(remainingSize int64, baseFileName string, bufferSize int, lar
enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount)
if err != nil {
- return fmt.Errorf("failed to create encoder: %v", err)
+ return fmt.Errorf("failed to create encoder: %w", err)
}
buffers := make([][]byte, TotalShardsCount)
@@ -218,7 +218,7 @@ func encodeDatFile(remainingSize int64, baseFileName string, bufferSize int, lar
for remainingSize > largeBlockSize*DataShardsCount {
err = encodeData(file, enc, processedSize, largeBlockSize, buffers, outputs)
if err != nil {
- return fmt.Errorf("failed to encode large chunk data: %v", err)
+ return fmt.Errorf("failed to encode large chunk data: %w", err)
}
remainingSize -= largeBlockSize * DataShardsCount
processedSize += largeBlockSize * DataShardsCount
@@ -226,7 +226,7 @@ func encodeDatFile(remainingSize int64, baseFileName string, bufferSize int, lar
for remainingSize > 0 {
err = encodeData(file, enc, processedSize, smallBlockSize, buffers, outputs)
if err != nil {
- return fmt.Errorf("failed to encode small chunk data: %v", err)
+ return fmt.Errorf("failed to encode small chunk data: %w", err)
}
remainingSize -= smallBlockSize * DataShardsCount
processedSize += smallBlockSize * DataShardsCount
@@ -238,7 +238,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o
enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount)
if err != nil {
- return fmt.Errorf("failed to create encoder: %v", err)
+ return fmt.Errorf("failed to create encoder: %w", err)
}
buffers := make([][]byte, TotalShardsCount)
@@ -273,7 +273,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o
// encode the data
err = enc.Reconstruct(buffers)
if err != nil {
- return fmt.Errorf("reconstruct: %v", err)
+ return fmt.Errorf("reconstruct: %w", err)
}
// write the data to output files
diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go
index b3744807a..228faf640 100644
--- a/weed/storage/erasure_coding/ec_volume.go
+++ b/weed/storage/erasure_coding/ec_volume.go
@@ -220,7 +220,7 @@ func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.
// find the needle from ecx file
offset, size, err = ev.FindNeedleFromEcx(needleId)
if err != nil {
- return types.Offset{}, 0, nil, fmt.Errorf("FindNeedleFromEcx: %v", err)
+ return types.Offset{}, 0, nil, fmt.Errorf("FindNeedleFromEcx: %w", err)
}
intervals = ev.LocateEcShardNeedleInterval(version, offset.ToActualOffset(), types.Size(needle.GetActualSize(size, version)))
diff --git a/weed/storage/erasure_coding/ec_volume_delete.go b/weed/storage/erasure_coding/ec_volume_delete.go
index f1a58c009..076176bea 100644
--- a/weed/storage/erasure_coding/ec_volume_delete.go
+++ b/weed/storage/erasure_coding/ec_volume_delete.go
@@ -15,7 +15,7 @@ var (
types.SizeToBytes(b, types.TombstoneFileSize)
n, err := file.WriteAt(b, offset+types.NeedleIdSize+types.OffsetSize)
if err != nil {
- return fmt.Errorf("sorted needle write error: %v", err)
+ return fmt.Errorf("sorted needle write error: %w", err)
}
if n != types.SizeSize {
return fmt.Errorf("sorted needle written %d bytes, expecting %d", n, types.SizeSize)
@@ -56,7 +56,7 @@ func RebuildEcxFile(baseFileName string) error {
ecxFile, err := os.OpenFile(baseFileName+".ecx", os.O_RDWR, 0644)
if err != nil {
- return fmt.Errorf("rebuild: failed to open ecx file: %v", err)
+ return fmt.Errorf("rebuild: failed to open ecx file: %w", err)
}
defer ecxFile.Close()
@@ -69,7 +69,7 @@ func RebuildEcxFile(baseFileName string) error {
ecjFile, err := os.OpenFile(baseFileName+".ecj", os.O_RDWR, 0644)
if err != nil {
- return fmt.Errorf("rebuild: failed to open ecj file: %v", err)
+ return fmt.Errorf("rebuild: failed to open ecj file: %w", err)
}
buf := make([]byte, types.NeedleIdSize)
diff --git a/weed/storage/needle/needle.go b/weed/storage/needle/needle.go
index 9aa684b3c..7c0ce1c20 100644
--- a/weed/storage/needle/needle.go
+++ b/weed/storage/needle/needle.go
@@ -160,11 +160,11 @@ func ParseNeedleIdCookie(key_hash_string string) (NeedleId, Cookie, error) {
split := len(key_hash_string) - CookieSize*2
needleId, err := ParseNeedleId(key_hash_string[:split])
if err != nil {
- return NeedleIdEmpty, 0, fmt.Errorf("Parse needleId error: %v", err)
+ return NeedleIdEmpty, 0, fmt.Errorf("Parse needleId error: %w", err)
}
cookie, err := ParseCookie(key_hash_string[split:])
if err != nil {
- return NeedleIdEmpty, 0, fmt.Errorf("Parse cookie error: %v", err)
+ return NeedleIdEmpty, 0, fmt.Errorf("Parse cookie error: %w", err)
}
return needleId, cookie, nil
}
diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go
index d3d47b605..e348a42ce 100644
--- a/weed/storage/needle_map/memdb.go
+++ b/weed/storage/needle_map/memdb.go
@@ -38,7 +38,7 @@ func (cm *MemDb) Set(key NeedleId, offset Offset, size Size) error {
bytes := ToBytes(key, offset, size)
if err := cm.db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil {
- return fmt.Errorf("failed to write temp leveldb: %v", err)
+ return fmt.Errorf("failed to write temp leveldb: %w", err)
}
return nil
}
diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go
index a5a543ba2..3439e0361 100644
--- a/weed/storage/needle_map_leveldb.go
+++ b/weed/storage/needle_map_leveldb.go
@@ -194,7 +194,7 @@ func setWatermark(db *leveldb.DB, watermark uint64) error {
var wmBytes = make([]byte, 8)
util.Uint64toBytes(wmBytes, watermark)
if err := db.Put(watermarkKey, wmBytes, nil); err != nil {
- return fmt.Errorf("failed to setWatermark: %v", err)
+ return fmt.Errorf("failed to setWatermark: %w", err)
}
return nil
}
@@ -204,7 +204,7 @@ func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size Size, update
bytes := needle_map.ToBytes(key, offset, size)
if err := db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil {
- return fmt.Errorf("failed to write leveldb: %v", err)
+ return fmt.Errorf("failed to write leveldb: %w", err)
}
// set watermark
if updateWatermark {
diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go
index 38cf41550..a915e1dbd 100644
--- a/weed/storage/store_ec.go
+++ b/weed/storage/store_ec.go
@@ -140,7 +140,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle, onReadS
offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n.Id, localEcVolume.Version)
if err != nil {
- return 0, fmt.Errorf("locate in local ec volume: %v", err)
+ return 0, fmt.Errorf("locate in local ec volume: %w", err)
}
if size.IsDeleted() {
return 0, ErrorDeleted
@@ -157,7 +157,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle, onReadS
}
bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals)
if err != nil {
- return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
+ return 0, fmt.Errorf("ReadEcShardIntervals: %w", err)
}
if isDeleted {
return 0, ErrorDeleted
@@ -165,7 +165,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle, onReadS
err = n.ReadBytes(bytes, offset.ToActualOffset(), size, localEcVolume.Version)
if err != nil {
- return 0, fmt.Errorf("readbytes: %v", err)
+ return 0, fmt.Errorf("readbytes: %w", err)
}
return len(bytes), nil
@@ -345,7 +345,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum
enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount)
if err != nil {
- return 0, false, fmt.Errorf("failed to create encoder: %v", err)
+ return 0, false, fmt.Errorf("failed to create encoder: %w", err)
}
bufs := make([][]byte, erasure_coding.TotalShardsCount)
diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go
index 6d2335f70..6f21c7dcd 100644
--- a/weed/storage/volume_checking.go
+++ b/weed/storage/volume_checking.go
@@ -142,7 +142,7 @@ func verifyDeletedNeedleIntegrity(datFile backend.BackendStorageFile, v needle.V
var fileSize int64
fileSize, _, err = datFile.GetStat()
if err != nil {
- return 0, fmt.Errorf("GetStat: %v", err)
+ return 0, fmt.Errorf("GetStat: %w", err)
}
if err = n.ReadData(datFile, fileSize-size, Size(0), v); err != nil {
return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", fileSize-size, size, err)
diff --git a/weed/storage/volume_info/volume_info.go b/weed/storage/volume_info/volume_info.go
index 24e2b17bc..2ac4cc493 100644
--- a/weed/storage/volume_info/volume_info.go
+++ b/weed/storage/volume_info/volume_info.go
@@ -46,7 +46,7 @@ func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeIn
if err = jsonpb.Unmarshal(fileData, volumeInfo); err != nil {
if oldVersionErr := tryOldVersionVolumeInfo(fileData, volumeInfo); oldVersionErr != nil {
glog.Warningf("unmarshal error: %v oldFormat: %v", err, oldVersionErr)
- err = fmt.Errorf("unmarshal error: %v oldFormat: %v", err, oldVersionErr)
+ err = fmt.Errorf("unmarshal error: %w oldFormat: %v", err, oldVersionErr)
return
} else {
err = nil
@@ -89,7 +89,7 @@ func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) er
func tryOldVersionVolumeInfo(data []byte, volumeInfo *volume_server_pb.VolumeInfo) error {
oldVersionVolumeInfo := &volume_server_pb.OldVersionVolumeInfo{}
if err := jsonpb.Unmarshal(data, oldVersionVolumeInfo); err != nil {
- return fmt.Errorf("failed to unmarshal old version volume info: %v", err)
+ return fmt.Errorf("failed to unmarshal old version volume info: %w", err)
}
volumeInfo.Files = oldVersionVolumeInfo.Files
volumeInfo.Version = oldVersionVolumeInfo.Version
diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go
index ca690618b..471401c6f 100644
--- a/weed/storage/volume_loading.go
+++ b/weed/storage/volume_loading.go
@@ -53,7 +53,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
v.noWriteOrDelete = false
glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo)
if err := v.LoadRemoteFile(); err != nil {
- return fmt.Errorf("load remote file %v: %v", v.volumeInfo, err)
+ return fmt.Errorf("load remote file %v: %w", v.volumeInfo, err)
}
alreadyHasSuperBlock = true
} else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(v.FileName(".dat")); exists {
diff --git a/weed/storage/volume_read.go b/weed/storage/volume_read.go
index 26aa8ac8a..626704fe1 100644
--- a/weed/storage/volume_read.go
+++ b/weed/storage/volume_read.go
@@ -176,7 +176,7 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr
return fmt.Errorf("ReadNeedleData checksum %v expected %v for Needle: %v,%v", crc, n.Checksum, v.Id, n)
}
if _, err = writer.Write(buf[0:toWrite]); err != nil {
- return fmt.Errorf("ReadNeedleData write: %v", err)
+ return fmt.Errorf("ReadNeedleData write: %w", err)
}
}
if err != nil {
@@ -184,7 +184,7 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr
err = nil
break
}
- return fmt.Errorf("ReadNeedleData: %v", err)
+ return fmt.Errorf("ReadNeedleData: %w", err)
}
if count <= 0 {
break
@@ -265,7 +265,7 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag
}
if err != nil {
glog.V(0).Infof("visit needle error: %v", err)
- return fmt.Errorf("visit needle error: %v", err)
+ return fmt.Errorf("visit needle error: %w", err)
}
offset += NeedleHeaderSize + rest
glog.V(4).Infof("==> new entry offset %d", offset)
diff --git a/weed/storage/volume_write.go b/weed/storage/volume_write.go
index 34c86010e..2dc94851c 100644
--- a/weed/storage/volume_write.go
+++ b/weed/storage/volume_write.go
@@ -149,7 +149,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle, checkCookie bool) (offset uint
if ok {
existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToActualOffset())
if existingNeedleReadErr != nil {
- err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr)
+ err = fmt.Errorf("reading existing needle: %w", existingNeedleReadErr)
return
}
if n.Cookie == 0 && !checkCookie {
diff --git a/weed/telemetry/client.go b/weed/telemetry/client.go
index 528984d4d..684ae4bae 100644
--- a/weed/telemetry/client.go
+++ b/weed/telemetry/client.go
@@ -69,12 +69,12 @@ func (c *Client) sendProtobuf(data *proto.TelemetryData) error {
body, err := protobuf.Marshal(req)
if err != nil {
- return fmt.Errorf("failed to marshal protobuf: %v", err)
+ return fmt.Errorf("failed to marshal protobuf: %w", err)
}
httpReq, err := http.NewRequest("POST", c.url, bytes.NewBuffer(body))
if err != nil {
- return fmt.Errorf("failed to create request: %v", err)
+ return fmt.Errorf("failed to create request: %w", err)
}
httpReq.Header.Set("Content-Type", "application/x-protobuf")
@@ -82,7 +82,7 @@ func (c *Client) sendProtobuf(data *proto.TelemetryData) error {
resp, err := c.httpClient.Do(httpReq)
if err != nil {
- return fmt.Errorf("failed to send request: %v", err)
+ return fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
diff --git a/weed/topology/cluster_commands.go b/weed/topology/cluster_commands.go
index 6432828e8..2546702e5 100644
--- a/weed/topology/cluster_commands.go
+++ b/weed/topology/cluster_commands.go
@@ -37,12 +37,12 @@ func (c *MaxVolumeIdCommand) Apply(server raft.Server) (interface{}, error) {
func (s *MaxVolumeIdCommand) Persist(sink hashicorpRaft.SnapshotSink) error {
b, err := json.Marshal(s)
if err != nil {
- return fmt.Errorf("marshal: %v", err)
+ return fmt.Errorf("marshal: %w", err)
}
_, err = sink.Write(b)
if err != nil {
sink.Cancel()
- return fmt.Errorf("sink.Write(): %v", err)
+ return fmt.Errorf("sink.Write(): %w", err)
}
return sink.Close()
}
diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go
index 10fe35f0a..c6386eeb7 100644
--- a/weed/topology/store_replicate.go
+++ b/weed/topology/store_replicate.go
@@ -57,7 +57,7 @@ func ReplicatedWrite(ctx context.Context, masterFn operation.GetMasterFn, grpcDi
stats.VolumeServerRequestHistogram.WithLabelValues(stats.WriteToLocalDisk).Observe(time.Since(start).Seconds())
if err != nil {
stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorWriteToLocalDisk).Inc()
- err = fmt.Errorf("failed to write to local disk: %v", err)
+ err = fmt.Errorf("failed to write to local disk: %w", err)
glog.V(0).Infoln(err)
return
}
diff --git a/weed/wdclient/net2/managed_connection.go b/weed/wdclient/net2/managed_connection.go
index ab92c3fd1..a0cc4f358 100644
--- a/weed/wdclient/net2/managed_connection.go
+++ b/weed/wdclient/net2/managed_connection.go
@@ -144,7 +144,7 @@ func (c *managedConnImpl) Write(b []byte) (n int, err error) {
}
n, err = conn.Write(b)
if err != nil {
- err = fmt.Errorf("Write error: %v", err)
+ err = fmt.Errorf("Write error: %w", err)
}
return
}
diff --git a/weed/wdclient/resource_pool/simple_resource_pool.go b/weed/wdclient/resource_pool/simple_resource_pool.go
index b0c539100..99f555a02 100644
--- a/weed/wdclient/resource_pool/simple_resource_pool.go
+++ b/weed/wdclient/resource_pool/simple_resource_pool.go
@@ -238,7 +238,7 @@ func (p *simpleResourcePool) Discard(handle ManagedHandle) error {
if h != nil {
atomic.AddInt32(p.numActive, -1)
if err := p.options.Close(h); err != nil {
- return fmt.Errorf("failed to close resource handle: %v", err)
+ return fmt.Errorf("failed to close resource handle: %w", err)
}
}
return nil
diff --git a/weed/worker/client.go b/weed/worker/client.go
index f9b42087c..60b33fb31 100644
--- a/weed/worker/client.go
+++ b/weed/worker/client.go
@@ -83,7 +83,7 @@ func (c *GrpcAdminClient) Connect() error {
// Detect TLS support and create appropriate connection
conn, err := c.createConnection()
if err != nil {
- return fmt.Errorf("failed to connect to admin server: %v", err)
+ return fmt.Errorf("failed to connect to admin server: %w", err)
}
c.conn = conn
@@ -94,7 +94,7 @@ func (c *GrpcAdminClient) Connect() error {
stream, err := c.client.WorkerStream(c.streamCtx)
if err != nil {
c.conn.Close()
- return fmt.Errorf("failed to create worker stream: %v", err)
+ return fmt.Errorf("failed to create worker stream: %w", err)
}
c.stream = stream
@@ -116,7 +116,7 @@ func (c *GrpcAdminClient) createConnection() (*grpc.ClientConn, error) {
conn, err := pb.GrpcDial(ctx, c.adminAddress, false, c.dialOption)
if err != nil {
- return nil, fmt.Errorf("failed to connect to admin server: %v", err)
+ return nil, fmt.Errorf("failed to connect to admin server: %w", err)
}
glog.Infof("Connected to admin server at %s", c.adminAddress)
@@ -273,7 +273,7 @@ func (c *GrpcAdminClient) reconnect() error {
// Create new connection
conn, err := c.createConnection()
if err != nil {
- return fmt.Errorf("failed to create connection: %v", err)
+ return fmt.Errorf("failed to create connection: %w", err)
}
client := worker_pb.NewWorkerServiceClient(conn)
@@ -284,7 +284,7 @@ func (c *GrpcAdminClient) reconnect() error {
if err != nil {
conn.Close()
streamCancel()
- return fmt.Errorf("failed to create stream: %v", err)
+ return fmt.Errorf("failed to create stream: %w", err)
}
// Update client state
@@ -440,7 +440,7 @@ func (c *GrpcAdminClient) SendHeartbeat(workerID string, status *types.WorkerSta
if !c.connected {
// Wait for reconnection for a short time
if err := c.waitForConnection(10 * time.Second); err != nil {
- return fmt.Errorf("not connected to admin server: %v", err)
+ return fmt.Errorf("not connected to admin server: %w", err)
}
}
@@ -479,7 +479,7 @@ func (c *GrpcAdminClient) RequestTask(workerID string, capabilities []types.Task
if !c.connected {
// Wait for reconnection for a short time
if err := c.waitForConnection(5 * time.Second); err != nil {
- return nil, fmt.Errorf("not connected to admin server: %v", err)
+ return nil, fmt.Errorf("not connected to admin server: %w", err)
}
}
@@ -545,7 +545,7 @@ func (c *GrpcAdminClient) CompleteTask(taskID string, success bool, errorMsg str
if !c.connected {
// Wait for reconnection for a short time
if err := c.waitForConnection(5 * time.Second); err != nil {
- return fmt.Errorf("not connected to admin server: %v", err)
+ return fmt.Errorf("not connected to admin server: %w", err)
}
}
@@ -576,7 +576,7 @@ func (c *GrpcAdminClient) UpdateTaskProgress(taskID string, progress float64) er
if !c.connected {
// Wait for reconnection for a short time
if err := c.waitForConnection(5 * time.Second); err != nil {
- return fmt.Errorf("not connected to admin server: %v", err)
+ return fmt.Errorf("not connected to admin server: %w", err)
}
}
diff --git a/weed/worker/tasks/balance/ui.go b/weed/worker/tasks/balance/ui.go
index 88f7bb4a9..2cea20a76 100644
--- a/weed/worker/tasks/balance/ui.go
+++ b/weed/worker/tasks/balance/ui.go
@@ -203,7 +203,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
if values, ok := formData["imbalance_threshold"]; ok && len(values) > 0 {
threshold, err := strconv.ParseFloat(values[0], 64)
if err != nil {
- return nil, fmt.Errorf("invalid imbalance threshold: %v", err)
+ return nil, fmt.Errorf("invalid imbalance threshold: %w", err)
}
if threshold < 0 || threshold > 1 {
return nil, fmt.Errorf("imbalance threshold must be between 0.0 and 1.0")
@@ -215,7 +215,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
if values, ok := formData["scan_interval"]; ok && len(values) > 0 {
duration, err := time.ParseDuration(values[0])
if err != nil {
- return nil, fmt.Errorf("invalid scan interval: %v", err)
+ return nil, fmt.Errorf("invalid scan interval: %w", err)
}
config.ScanIntervalSeconds = int(duration.Seconds())
}
@@ -224,7 +224,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
if values, ok := formData["max_concurrent"]; ok && len(values) > 0 {
maxConcurrent, err := strconv.Atoi(values[0])
if err != nil {
- return nil, fmt.Errorf("invalid max concurrent: %v", err)
+ return nil, fmt.Errorf("invalid max concurrent: %w", err)
}
if maxConcurrent < 1 {
return nil, fmt.Errorf("max concurrent must be at least 1")
@@ -236,7 +236,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
if values, ok := formData["min_server_count"]; ok && len(values) > 0 {
minServerCount, err := strconv.Atoi(values[0])
if err != nil {
- return nil, fmt.Errorf("invalid min server count: %v", err)
+ return nil, fmt.Errorf("invalid min server count: %w", err)
}
if minServerCount < 2 {
return nil, fmt.Errorf("min server count must be at least 2")
@@ -259,7 +259,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
if values, ok := formData["min_interval"]; ok && len(values) > 0 {
duration, err := time.ParseDuration(values[0])
if err != nil {
- return nil, fmt.Errorf("invalid min interval: %v", err)
+ return nil, fmt.Errorf("invalid min interval: %w", err)
}
config.MinIntervalSeconds = int(duration.Seconds())
}
diff --git a/weed/worker/tasks/erasure_coding/ui.go b/weed/worker/tasks/erasure_coding/ui.go
index 8a4640cf8..e17cba89a 100644
--- a/weed/worker/tasks/erasure_coding/ui.go
+++ b/weed/worker/tasks/erasure_coding/ui.go
@@ -189,7 +189,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
if values, ok := formData["volume_age_hours_seconds"]; ok && len(values) > 0 {
hours, err := strconv.Atoi(values[0])
if err != nil {
- return nil, fmt.Errorf("invalid volume age hours: %v", err)
+ return nil, fmt.Errorf("invalid volume age hours: %w", err)
}
config.VolumeAgeHoursSeconds = hours
}
@@ -198,7 +198,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
if values, ok := formData["scan_interval_seconds"]; ok && len(values) > 0 {
interval, err := strconv.Atoi(values[0])
if err != nil {
- return nil, fmt.Errorf("invalid scan interval: %v", err)
+ return nil, fmt.Errorf("invalid scan interval: %w", err)
}
config.ScanIntervalSeconds = interval
}
@@ -207,7 +207,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
if values, ok := formData["max_concurrent"]; ok && len(values) > 0 {
maxConcurrent, err := strconv.Atoi(values[0])
if err != nil {
- return nil, fmt.Errorf("invalid max concurrent: %v", err)
+ return nil, fmt.Errorf("invalid max concurrent: %w", err)
}
if maxConcurrent < 1 {
return nil, fmt.Errorf("max concurrent must be at least 1")
@@ -219,7 +219,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
if values, ok := formData["shard_count"]; ok && len(values) > 0 {
shardCount, err := strconv.Atoi(values[0])
if err != nil {
- return nil, fmt.Errorf("invalid shard count: %v", err)
+ return nil, fmt.Errorf("invalid shard count: %w", err)
}
if shardCount < 1 {
return nil, fmt.Errorf("shard count must be at least 1")
@@ -231,7 +231,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
if values, ok := formData["parity_count"]; ok && len(values) > 0 {
parityCount, err := strconv.Atoi(values[0])
if err != nil {
- return nil, fmt.Errorf("invalid parity count: %v", err)
+ return nil, fmt.Errorf("invalid parity count: %w", err)
}
if parityCount < 1 {
return nil, fmt.Errorf("parity count must be at least 1")
diff --git a/weed/worker/tasks/vacuum/ui.go b/weed/worker/tasks/vacuum/ui.go
index a315dde88..6f67a801a 100644
--- a/weed/worker/tasks/vacuum/ui.go
+++ b/weed/worker/tasks/vacuum/ui.go
@@ -180,7 +180,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
// Parse garbage threshold
if thresholdStr := formData["garbage_threshold"]; len(thresholdStr) > 0 {
if threshold, err := strconv.ParseFloat(thresholdStr[0], 64); err != nil {
- return nil, fmt.Errorf("invalid garbage threshold: %v", err)
+ return nil, fmt.Errorf("invalid garbage threshold: %w", err)
} else if threshold < 0 || threshold > 1 {
return nil, fmt.Errorf("garbage threshold must be between 0.0 and 1.0")
} else {
@@ -191,7 +191,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
// Parse scan interval
if intervalStr := formData["scan_interval"]; len(intervalStr) > 0 {
if interval, err := time.ParseDuration(intervalStr[0]); err != nil {
- return nil, fmt.Errorf("invalid scan interval: %v", err)
+ return nil, fmt.Errorf("invalid scan interval: %w", err)
} else {
config.ScanIntervalSeconds = durationToSeconds(interval)
}
@@ -200,7 +200,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
// Parse min volume age
if ageStr := formData["min_volume_age"]; len(ageStr) > 0 {
if age, err := time.ParseDuration(ageStr[0]); err != nil {
- return nil, fmt.Errorf("invalid min volume age: %v", err)
+ return nil, fmt.Errorf("invalid min volume age: %w", err)
} else {
config.MinVolumeAgeSeconds = durationToSeconds(age)
}
@@ -209,7 +209,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
// Parse max concurrent
if concurrentStr := formData["max_concurrent"]; len(concurrentStr) > 0 {
if concurrent, err := strconv.Atoi(concurrentStr[0]); err != nil {
- return nil, fmt.Errorf("invalid max concurrent: %v", err)
+ return nil, fmt.Errorf("invalid max concurrent: %w", err)
} else if concurrent < 1 {
return nil, fmt.Errorf("max concurrent must be at least 1")
} else {
@@ -220,7 +220,7 @@ func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}
// Parse min interval
if intervalStr := formData["min_interval"]; len(intervalStr) > 0 {
if interval, err := time.ParseDuration(intervalStr[0]); err != nil {
- return nil, fmt.Errorf("invalid min interval: %v", err)
+ return nil, fmt.Errorf("invalid min interval: %w", err)
} else {
config.MinIntervalSeconds = durationToSeconds(interval)
}
diff --git a/weed/worker/worker.go b/weed/worker/worker.go
index 7050d21c9..3b7899f07 100644
--- a/weed/worker/worker.go
+++ b/weed/worker/worker.go
@@ -92,7 +92,7 @@ func (w *Worker) Start() error {
// Connect to admin server
if err := w.adminClient.Connect(); err != nil {
- return fmt.Errorf("failed to connect to admin server: %v", err)
+ return fmt.Errorf("failed to connect to admin server: %w", err)
}
w.running = true
@@ -111,7 +111,7 @@ func (w *Worker) Start() error {
if err := w.adminClient.RegisterWorker(workerInfo); err != nil {
w.running = false
w.adminClient.Disconnect()
- return fmt.Errorf("failed to register worker: %v", err)
+ return fmt.Errorf("failed to register worker: %w", err)
}
// Start worker loops