aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2020-11-16 22:26:58 -0800
committerChris Lu <chris.lu@gmail.com>2020-11-16 22:26:58 -0800
commit6c9156b25f8b1c28fb0cc909310a20aeeec0e087 (patch)
tree343e30d98e46a081aa57adfc334b807d0b3255dc
parent9add554feb53706d1d878cc9636d234e622b8a80 (diff)
downloadseaweedfs-origin/logrus.tar.xz
seaweedfs-origin/logrus.zip
switch to logrusorigin/logrus
losing filename and line number. Critical for debugging.
-rw-r--r--go.mod2
-rw-r--r--go.sum2
-rw-r--r--unmaintained/change_superblock/change_superblock.go12
-rw-r--r--unmaintained/diff_volume_servers/diff_volume_servers.go8
-rw-r--r--unmaintained/fix_dat/fix_dat.go10
-rw-r--r--unmaintained/remove_duplicate_fids/remove_duplicate_fids.go10
-rw-r--r--unmaintained/see_dat/see_dat.go6
-rw-r--r--unmaintained/see_idx/see_idx.go4
-rw-r--r--weed/command/benchmark.go8
-rw-r--r--weed/command/compact.go8
-rw-r--r--weed/command/export.go16
-rw-r--r--weed/command/filer.go18
-rw-r--r--weed/command/filer_replication.go26
-rw-r--r--weed/command/filer_sync.go10
-rw-r--r--weed/command/fix.go12
-rw-r--r--weed/command/master.go20
-rw-r--r--weed/command/mount_std.go14
-rw-r--r--weed/command/msg_broker.go10
-rw-r--r--weed/command/s3.go22
-rw-r--r--weed/command/server.go8
-rw-r--r--weed/command/volume.go44
-rw-r--r--weed/command/volume_test.go4
-rw-r--r--weed/command/webdav.go22
-rw-r--r--weed/filer/abstract_sql/abstract_sql_store.go8
-rw-r--r--weed/filer/abstract_sql/abstract_sql_store_kv.go4
-rw-r--r--weed/filer/cassandra/cassandra_store.go8
-rw-r--r--weed/filer/configuration.go8
-rw-r--r--weed/filer/elastic/v7/elastic_store.go20
-rw-r--r--weed/filer/elastic/v7/elastic_store_kv.go8
-rw-r--r--weed/filer/etcd/etcd_store.go6
-rw-r--r--weed/filer/filechunk_manifest.go8
-rw-r--r--weed/filer/filechunks.go14
-rw-r--r--weed/filer/filechunks2_test.go4
-rw-r--r--weed/filer/filer.go40
-rw-r--r--weed/filer/filer_buckets.go6
-rw-r--r--weed/filer/filer_conf.go10
-rw-r--r--weed/filer/filer_delete_entry.go14
-rw-r--r--weed/filer/filer_deletion.go8
-rw-r--r--weed/filer/filer_notify.go8
-rw-r--r--weed/filer/filer_on_meta_event.go8
-rw-r--r--weed/filer/filerstore_hardlink.go6
-rw-r--r--weed/filer/leveldb/leveldb_store.go8
-rw-r--r--weed/filer/leveldb2/leveldb2_store.go8
-rw-r--r--weed/filer/meta_aggregator.go24
-rw-r--r--weed/filer/meta_replay.go6
-rw-r--r--weed/filer/mongodb/mongodb_store.go8
-rw-r--r--weed/filer/mongodb/mongodb_store_kv.go4
-rw-r--r--weed/filer/reader_at.go24
-rw-r--r--weed/filer/redis/universal_redis_store.go4
-rw-r--r--weed/filer/redis2/universal_redis_store.go4
-rw-r--r--weed/filer/stream.go16
-rw-r--r--weed/filesys/dir.go70
-rw-r--r--weed/filesys/dir_link.go16
-rw-r--r--weed/filesys/dir_rename.go14
-rw-r--r--weed/filesys/dirty_page.go8
-rw-r--r--weed/filesys/dirty_page_interval.go8
-rw-r--r--weed/filesys/file.go40
-rw-r--r--weed/filesys/filehandle.go34
-rw-r--r--weed/filesys/meta_cache/meta_cache.go8
-rw-r--r--weed/filesys/meta_cache/meta_cache_init.go6
-rw-r--r--weed/filesys/meta_cache/meta_cache_subscribe.go10
-rw-r--r--weed/filesys/wfs.go16
-rw-r--r--weed/filesys/wfs_deletion.go6
-rw-r--r--weed/filesys/wfs_write.go8
-rw-r--r--weed/filesys/xattr.go2
-rw-r--r--weed/glog/README2
-rw-r--r--weed/glog/glog.go2
-rw-r--r--weed/images/resizing.go4
-rw-r--r--weed/messaging/broker/broker_append.go8
-rw-r--r--weed/messaging/broker/broker_grpc_server_discovery.go10
-rw-r--r--weed/messaging/broker/broker_grpc_server_publish.go10
-rw-r--r--weed/messaging/broker/broker_grpc_server_subscribe.go10
-rw-r--r--weed/messaging/broker/broker_server.go12
-rw-r--r--weed/messaging/broker/topic_manager.go4
-rw-r--r--weed/notification/aws_sqs/aws_sqs_pub.go6
-rw-r--r--weed/notification/configuration.go8
-rw-r--r--weed/notification/gocdk_pub_sub/gocdk_pub_sub.go6
-rw-r--r--weed/notification/google_pub_sub/google_pub_sub.go14
-rw-r--r--weed/notification/kafka/kafka_queue.go10
-rw-r--r--weed/notification/log/log_queue.go4
-rw-r--r--weed/operation/chunked_file.go8
-rw-r--r--weed/operation/grpc_client.go4
-rw-r--r--weed/operation/lookup_vid_cache.go6
-rw-r--r--weed/operation/submit.go10
-rw-r--r--weed/operation/upload_content.go16
-rw-r--r--weed/pb/filer_pb/filer_client.go22
-rw-r--r--weed/pb/filer_pb/filer_pb_helper.go10
-rw-r--r--weed/pb/volume_info.go14
-rw-r--r--weed/replication/repl_util/replication_utli.go6
-rw-r--r--weed/replication/replicator.go16
-rw-r--r--weed/replication/sink/azuresink/azure_sink.go4
-rw-r--r--weed/replication/sink/filersink/fetch_write.go10
-rw-r--r--weed/replication/sink/filersink/filer_sink.go28
-rw-r--r--weed/replication/sink/gcssink/gcs_sink.go6
-rw-r--r--weed/replication/sink/s3sink/s3_sink.go10
-rw-r--r--weed/replication/sink/s3sink/s3_write.go34
-rw-r--r--weed/replication/source/filer_source.go10
-rw-r--r--weed/replication/sub/notification_aws_sqs.go8
-rw-r--r--weed/replication/sub/notification_gocdk_pub_sub.go4
-rw-r--r--weed/replication/sub/notification_google_pub_sub.go18
-rw-r--r--weed/replication/sub/notification_kafka.go14
-rw-r--r--weed/s3api/auth_credentials.go24
-rw-r--r--weed/s3api/filer_multipart.go30
-rw-r--r--weed/s3api/filer_util.go6
-rw-r--r--weed/s3api/s3api_bucket_handlers.go8
-rw-r--r--weed/s3api/s3api_handlers.go8
-rw-r--r--weed/s3api/s3api_object_copy_handlers.go6
-rw-r--r--weed/s3api/s3api_object_handlers.go18
-rw-r--r--weed/s3api/s3api_object_multipart_handlers.go12
-rw-r--r--weed/s3api/s3api_object_tagging_handlers.go24
-rw-r--r--weed/security/guard.go4
-rw-r--r--weed/security/jwt.go4
-rw-r--r--weed/security/tls.go10
-rw-r--r--weed/sequence/etcd_sequencer.go18
-rw-r--r--weed/server/common.go10
-rw-r--r--weed/server/filer_grpc_server.go46
-rw-r--r--weed/server/filer_grpc_server_rename.go10
-rw-r--r--weed/server/filer_grpc_server_sub_meta.go20
-rw-r--r--weed/server/filer_server.go8
-rw-r--r--weed/server/filer_server_handlers_read.go10
-rw-r--r--weed/server/filer_server_handlers_read_dir.go6
-rw-r--r--weed/server/filer_server_handlers_tagging.go6
-rw-r--r--weed/server/filer_server_handlers_write.go10
-rw-r--r--weed/server/filer_server_handlers_write_autochunk.go20
-rw-r--r--weed/server/filer_server_handlers_write_cipher.go4
-rw-r--r--weed/server/master_grpc_server.go40
-rw-r--r--weed/server/master_server.go30
-rw-r--r--weed/server/master_server_handlers_admin.go4
-rw-r--r--weed/server/raft_server.go22
-rw-r--r--weed/server/volume_grpc_admin.go36
-rw-r--r--weed/server/volume_grpc_client_to_master.go50
-rw-r--r--weed/server/volume_grpc_copy.go8
-rw-r--r--weed/server/volume_grpc_erasure_coding.go26
-rw-r--r--weed/server/volume_grpc_query.go8
-rw-r--r--weed/server/volume_grpc_tail.go12
-rw-r--r--weed/server/volume_grpc_vacuum.go16
-rw-r--r--weed/server/volume_server.go6
-rw-r--r--weed/server/volume_server_handlers.go10
-rw-r--r--weed/server/volume_server_handlers_read.go34
-rw-r--r--weed/server/volume_server_handlers_write.go10
-rw-r--r--weed/server/webdav_server.go40
-rw-r--r--weed/shell/command_ec_common.go4
-rw-r--r--weed/stats/disk.go4
-rw-r--r--weed/stats/metrics.go7
-rw-r--r--weed/storage/backend/backend.go10
-rw-r--r--weed/storage/backend/s3_backend/s3_backend.go16
-rw-r--r--weed/storage/backend/s3_backend/s3_download.go4
-rw-r--r--weed/storage/backend/s3_backend/s3_upload.go4
-rw-r--r--weed/storage/backend/volume_create.go4
-rw-r--r--weed/storage/backend/volume_create_linux.go4
-rw-r--r--weed/storage/backend/volume_create_windows.go4
-rw-r--r--weed/storage/disk_location.go20
-rw-r--r--weed/storage/erasure_coding/ec_encoder.go6
-rw-r--r--weed/storage/idx/walk.go6
-rw-r--r--weed/storage/needle/needle_parse_upload.go10
-rw-r--r--weed/storage/needle/needle_read_write.go6
-rw-r--r--weed/storage/needle_map/compact_map_test.go2
-rw-r--r--weed/storage/needle_map/memdb.go4
-rw-r--r--weed/storage/needle_map_leveldb.go18
-rw-r--r--weed/storage/needle_map_memory.go6
-rw-r--r--weed/storage/needle_map_metric.go2
-rw-r--r--weed/storage/needle_map_metric_test.go12
-rw-r--r--weed/storage/needle_map_sorted_file.go12
-rw-r--r--weed/storage/store.go24
-rw-r--r--weed/storage/store_ec.go32
-rw-r--r--weed/storage/store_ec_delete.go6
-rw-r--r--weed/storage/store_vacuum.go4
-rw-r--r--weed/storage/super_block/super_block.go6
-rw-r--r--weed/storage/volume.go10
-rw-r--r--weed/storage/volume_checking.go10
-rw-r--r--weed/storage/volume_loading.go30
-rw-r--r--weed/storage/volume_read_write.go34
-rw-r--r--weed/storage/volume_super_block.go4
-rw-r--r--weed/storage/volume_tier.go4
-rw-r--r--weed/storage/volume_vacuum.go48
-rw-r--r--weed/topology/cluster_commands.go4
-rw-r--r--weed/topology/data_node.go4
-rw-r--r--weed/topology/node.go8
-rw-r--r--weed/topology/store_replicate.go14
-rw-r--r--weed/topology/topology.go10
-rw-r--r--weed/topology/topology_ec.go4
-rw-r--r--weed/topology/topology_event_handling.go4
-rw-r--r--weed/topology/topology_vacuum.go26
-rw-r--r--weed/topology/volume_growth.go8
-rw-r--r--weed/topology/volume_layout.go18
-rw-r--r--weed/util/bounded_tree/bounded_tree.go4
-rw-r--r--weed/util/chunk_cache/chunk_cache.go8
-rw-r--r--weed/util/chunk_cache/chunk_cache_on_disk.go4
-rw-r--r--weed/util/chunk_cache/on_disk_cache_layer.go10
-rw-r--r--weed/util/cipher.go4
-rw-r--r--weed/util/compression.go10
-rw-r--r--weed/util/config.go12
-rw-r--r--weed/util/file_util.go6
-rw-r--r--weed/util/grace/pprof.go6
-rw-r--r--weed/util/http_util.go6
-rw-r--r--weed/util/log/logger.go91
-rw-r--r--weed/util/log_buffer/log_buffer.go6
-rw-r--r--weed/util/log_buffer/log_read.go6
-rw-r--r--weed/util/network.go4
-rw-r--r--weed/util/retry.go6
-rw-r--r--weed/util/throttler.go2
-rw-r--r--weed/wdclient/exclusive_locks/exclusive_locker.go4
-rw-r--r--weed/wdclient/masterclient.go28
-rw-r--r--weed/wdclient/vid_map.go6
-rw-r--r--weed/weed.go8
205 files changed, 1340 insertions, 1246 deletions
diff --git a/go.mod b/go.mod
index 51c24bcf0..19adf4103 100644
--- a/go.mod
+++ b/go.mod
@@ -9,6 +9,7 @@ require (
github.com/OneOfOne/xxhash v1.2.2
github.com/Shopify/sarama v1.23.1
github.com/aws/aws-sdk-go v1.33.5
+ github.com/banzaicloud/logrus-runtime-formatter v0.0.0-20190729070250-5ae5475bae5e
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/cespare/xxhash v1.1.0
github.com/chrislusf/raft v1.0.3
@@ -60,6 +61,7 @@ require (
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
github.com/seaweedfs/fuse v1.0.7
github.com/seaweedfs/goexif v1.0.2
+ github.com/sirupsen/logrus v1.4.2
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
diff --git a/go.sum b/go.sum
index 51d96d997..c0883ddde 100644
--- a/go.sum
+++ b/go.sum
@@ -62,6 +62,8 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U=
github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/banzaicloud/logrus-runtime-formatter v0.0.0-20190729070250-5ae5475bae5e h1:ZOnKnYG1LLgq4W7wZUYj9ntn3RxQ65EZyYqdtFpP2Dw=
+github.com/banzaicloud/logrus-runtime-formatter v0.0.0-20190729070250-5ae5475bae5e/go.mod h1:hEvEpPmuwKO+0TbrDQKIkmX0gW2s2waZHF8pIhEEmpM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go
index afe651c4e..d07ab4552 100644
--- a/unmaintained/change_superblock/change_superblock.go
+++ b/unmaintained/change_superblock/change_superblock.go
@@ -7,7 +7,7 @@ import (
"path"
"strconv"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@@ -46,7 +46,7 @@ func main() {
}
datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDWR, 0644)
if err != nil {
- glog.Fatalf("Open Volume Data File [ERROR]: %v", err)
+ log.Fatalf("Open Volume Data File [ERROR]: %v", err)
}
datBackend := backend.NewDiskFile(datFile)
defer datBackend.Close()
@@ -54,7 +54,7 @@ func main() {
superBlock, err := super_block.ReadSuperBlock(datBackend)
if err != nil {
- glog.Fatalf("cannot parse existing super block: %v", err)
+ log.Fatalf("cannot parse existing super block: %v", err)
}
fmt.Printf("Current Volume Replication: %s\n", superBlock.ReplicaPlacement)
@@ -66,7 +66,7 @@ func main() {
replica, err := super_block.NewReplicaPlacementFromString(*targetReplica)
if err != nil {
- glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
+ log.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
}
fmt.Printf("Changing replication to: %s\n", replica)
@@ -79,7 +79,7 @@ func main() {
ttl, err := needle.ReadTTL(*targetTTL)
if err != nil {
- glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)
+ log.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)
}
fmt.Printf("Changing ttl to: %s\n", ttl)
@@ -93,7 +93,7 @@ func main() {
header := superBlock.Bytes()
if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil {
- glog.Fatalf("cannot write super block: %v", e)
+ log.Fatalf("cannot write super block: %v", e)
}
fmt.Println("Change Applied.")
diff --git a/unmaintained/diff_volume_servers/diff_volume_servers.go b/unmaintained/diff_volume_servers/diff_volume_servers.go
index 6107f3d48..1fe4973ae 100644
--- a/unmaintained/diff_volume_servers/diff_volume_servers.go
+++ b/unmaintained/diff_volume_servers/diff_volume_servers.go
@@ -11,7 +11,7 @@ import (
"os"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@@ -47,7 +47,7 @@ func main() {
vid := uint32(*volumeId)
servers := strings.Split(*serversStr, ",")
if len(servers) < 2 {
- glog.Fatalf("You must specify more than 1 server\n")
+ log.Fatalf("You must specify more than 1 server\n")
}
var referenceServer string
var maxOffset int64
@@ -55,7 +55,7 @@ func main() {
for _, addr := range servers {
files, offset, err := getVolumeFiles(vid, addr)
if err != nil {
- glog.Fatalf("Failed to copy idx from volume server %s\n", err)
+ log.Fatalf("Failed to copy idx from volume server %s\n", err)
}
allFiles[addr] = files
if offset > maxOffset {
@@ -101,7 +101,7 @@ func main() {
id, err = getNeedleFileId(vid, nid, addr)
}
if err != nil {
- glog.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
+ log.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
}
fmt.Println(id, addr, diffMsg)
}
diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go
index 70bce3bf9..f08dc04de 100644
--- a/unmaintained/fix_dat/fix_dat.go
+++ b/unmaintained/fix_dat/fix_dat.go
@@ -8,7 +8,7 @@ import (
"path"
"strconv"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@@ -42,26 +42,26 @@ func main() {
}
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
if err != nil {
- glog.Fatalf("Read Volume Index %v", err)
+ log.Fatalf("Read Volume Index %v", err)
}
defer indexFile.Close()
datFileName := path.Join(*fixVolumePath, fileName+".dat")
datFile, err := os.OpenFile(datFileName, os.O_RDONLY, 0644)
if err != nil {
- glog.Fatalf("Read Volume Data %v", err)
+ log.Fatalf("Read Volume Data %v", err)
}
datBackend := backend.NewDiskFile(datFile)
defer datBackend.Close()
newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed"))
if err != nil {
- glog.Fatalf("Write New Volume Data %v", err)
+ log.Fatalf("Write New Volume Data %v", err)
}
defer newDatFile.Close()
superBlock, err := super_block.ReadSuperBlock(datBackend)
if err != nil {
- glog.Fatalf("Read Volume Data superblock %v", err)
+ log.Fatalf("Read Volume Data superblock %v", err)
}
newDatFile.Write(superBlock.Bytes())
diff --git a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go
index 84173a663..dae189610 100644
--- a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go
+++ b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go
@@ -6,7 +6,7 @@ import (
"os"
"path/filepath"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -49,7 +49,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in
newFileName := filepath.Join(*volumePath, "dat_fixed")
newDatFile, err := os.Create(newFileName)
if err != nil {
- glog.Fatalf("Write New Volume Data %v", err)
+ log.Fatalf("Write New Volume Data %v", err)
}
scanner.datBackend = backend.NewDiskFile(newDatFile)
scanner.datBackend.WriteAt(scanner.block.Bytes(), 0)
@@ -58,7 +58,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in
checksum := Checksum(n)
if scanner.hashes[checksum] {
- glog.V(0).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset)
+ log.Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset)
return nil
}
scanner.hashes[checksum] = true
@@ -83,13 +83,13 @@ func main() {
if _, err := os.Stat(scanner.dir); err != nil {
if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil {
- glog.Fatalf("could not create output dir : %s", err)
+ log.Fatalf("could not create output dir : %s", err)
}
}
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
if err != nil {
- glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
+ log.Fatalf("Reading Volume File [ERROR] %s\n", err)
}
}
diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go
index 17c494841..a666c461c 100644
--- a/unmaintained/see_dat/see_dat.go
+++ b/unmaintained/see_dat/see_dat.go
@@ -5,7 +5,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@@ -32,7 +32,7 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
- glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
+ log.Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
*volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t)
return nil
}
@@ -45,6 +45,6 @@ func main() {
scanner := &VolumeFileScanner4SeeDat{}
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
if err != nil {
- glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
+ log.Fatalf("Reading Volume File [ERROR] %s\n", err)
}
}
diff --git a/unmaintained/see_idx/see_idx.go b/unmaintained/see_idx/see_idx.go
index 22c659351..afc5667eb 100644
--- a/unmaintained/see_idx/see_idx.go
+++ b/unmaintained/see_idx/see_idx.go
@@ -8,7 +8,7 @@ import (
"path"
"strconv"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -32,7 +32,7 @@ func main() {
}
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
if err != nil {
- glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
+ log.Fatalf("Create Volume Index [ERROR] %s\n", err)
}
defer indexFile.Close()
diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go
index 080312aa8..904ed6494 100644
--- a/weed/command/benchmark.go
+++ b/weed/command/benchmark.go
@@ -16,7 +16,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -119,7 +119,7 @@ func runBenchmark(cmd *Command, args []string) bool {
if *b.cpuprofile != "" {
f, err := os.Create(*b.cpuprofile)
if err != nil {
- glog.Fatal(err)
+ log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
@@ -310,7 +310,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) {
file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
- glog.Fatalf("File to create file %s: %s\n", fileName, err)
+ log.Fatalf("File to create file %s: %s\n", fileName, err)
}
defer file.Close()
@@ -329,7 +329,7 @@ func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan b
func readFileIds(fileName string, fileIdLineChan chan string) {
file, err := os.Open(fileName) // For read access.
if err != nil {
- glog.Fatalf("File to read file %s: %s\n", fileName, err)
+ log.Fatalf("File to read file %s: %s\n", fileName, err)
}
defer file.Close()
diff --git a/weed/command/compact.go b/weed/command/compact.go
index 6117cf9f3..625a44b5b 100644
--- a/weed/command/compact.go
+++ b/weed/command/compact.go
@@ -1,7 +1,7 @@
package command
import (
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -44,15 +44,15 @@ func runCompact(cmd *Command, args []string) bool {
v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid,
storage.NeedleMapInMemory, nil, nil, preallocate, 0)
if err != nil {
- glog.Fatalf("Load Volume [ERROR] %s\n", err)
+ log.Fatalf("Load Volume [ERROR] %s\n", err)
}
if *compactMethod == 0 {
if err = v.Compact(preallocate, 0); err != nil {
- glog.Fatalf("Compact Volume [ERROR] %s\n", err)
+ log.Fatalf("Compact Volume [ERROR] %s\n", err)
}
} else {
if err = v.Compact2(preallocate, 0); err != nil {
- glog.Fatalf("Compact Volume [ERROR] %s\n", err)
+ log.Fatalf("Compact Volume [ERROR] %s\n", err)
}
}
diff --git a/weed/command/export.go b/weed/command/export.go
index 78d75ef52..cf1e953e9 100644
--- a/weed/command/export.go
+++ b/weed/command/export.go
@@ -13,7 +13,7 @@ import (
"text/template"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
@@ -111,11 +111,11 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
vid := scanner.vid
nv, ok := needleMap.Get(n.Id)
- glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
+ log.Tracef("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv)
if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToAcutalOffset() == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
- glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
+ log.Tracef("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix)
return nil
}
@@ -139,9 +139,9 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version))
}
}
- glog.V(2).Infof("This seems deleted %d size %d", n.Id, n.Size)
+ log.Debugf("This seems deleted %d size %d", n.Id, n.Size)
} else {
- glog.V(2).Infof("Skipping later-updated Id %d size %d", n.Id, n.Size)
+ log.Debugf("Skipping later-updated Id %d size %d", n.Id, n.Size)
}
return nil
}
@@ -178,7 +178,7 @@ func runExport(cmd *Command, args []string) bool {
outputFile = os.Stdout
} else {
if outputFile, err = os.Create(*output); err != nil {
- glog.Fatalf("cannot open output tar %s: %s", *output, err)
+ log.Fatalf("cannot open output tar %s: %s", *output, err)
}
}
defer outputFile.Close()
@@ -201,7 +201,7 @@ func runExport(cmd *Command, args []string) bool {
defer needleMap.Close()
if err := needleMap.LoadFromIdx(path.Join(util.ResolvePath(*export.dir), fileName+".idx")); err != nil {
- glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
+ log.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
}
volumeFileScanner := &VolumeFileScanner4Export{
@@ -215,7 +215,7 @@ func runExport(cmd *Command, args []string) bool {
err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner)
if err != nil && err != io.EOF {
- glog.Fatalf("Export Volume File [ERROR] %s\n", err)
+ log.Fatalf("Export Volume File [ERROR] %s\n", err)
}
return true
}
diff --git a/weed/command/filer.go b/weed/command/filer.go
index 017427335..08d010e12 100644
--- a/weed/command/filer.go
+++ b/weed/command/filer.go
@@ -9,7 +9,7 @@ import (
"google.golang.org/grpc/reflection"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@@ -152,37 +152,37 @@ func (fo *FilerOptions) startFiler() {
Filers: peers,
})
if nfs_err != nil {
- glog.Fatalf("Filer startup error: %v", nfs_err)
+ log.Fatalf("Filer startup error: %v", nfs_err)
}
if *fo.publicPort != 0 {
publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort)
- glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
+ log.Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, 0)
if e != nil {
- glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
+ log.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
}
go func() {
if e := http.Serve(publicListener, publicVolumeMux); e != nil {
- glog.Fatalf("Volume server fail to serve public: %v", e)
+ log.Fatalf("Volume server fail to serve public: %v", e)
}
}()
}
- glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
+ log.Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
filerListener, e := util.NewListener(
*fo.bindIp+":"+strconv.Itoa(*fo.port),
time.Duration(10)*time.Second,
)
if e != nil {
- glog.Fatalf("Filer listener error: %v", e)
+ log.Fatalf("Filer listener error: %v", e)
}
// starting grpc server
grpcPort := *fo.port + 10000
grpcL, err := util.NewListener(*fo.bindIp+":"+strconv.Itoa(grpcPort), 0)
if err != nil {
- glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
+ log.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
@@ -191,7 +191,7 @@ func (fo *FilerOptions) startFiler() {
httpS := &http.Server{Handler: defaultMux}
if err := httpS.Serve(filerListener); err != nil {
- glog.Fatalf("Filer Fail to serve: %v", e)
+ log.Fatalf("Filer Fail to serve: %v", e)
}
}
diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go
index 40f2b570b..1b154270e 100644
--- a/weed/command/filer_replication.go
+++ b/weed/command/filer_replication.go
@@ -4,7 +4,7 @@ import (
"context"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/replication"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
@@ -48,10 +48,10 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for _, input := range sub.NotificationInputs {
if config.GetBool("notification." + input.GetName() + ".enabled") {
if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil {
- glog.Fatalf("Failed to initialize notification input for %s: %+v",
+ log.Fatalf("Failed to initialize notification input for %s: %+v",
input.GetName(), err)
}
- glog.V(0).Infof("Configure notification input to %s", input.GetName())
+ log.Infof("Configure notification input to %s", input.GetName())
notificationInput = input
break
}
@@ -69,7 +69,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
fromDir := config.GetString("source.filer.directory")
toDir := config.GetString("sink.filer.directory")
if strings.HasPrefix(toDir, fromDir) {
- glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir)
+ log.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir)
}
}
}
@@ -78,10 +78,10 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for _, sk := range sink.Sinks {
if config.GetBool("sink." + sk.GetName() + ".enabled") {
if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil {
- glog.Fatalf("Failed to initialize sink for %s: %+v",
+ log.Fatalf("Failed to initialize sink for %s: %+v",
sk.GetName(), err)
}
- glog.V(0).Infof("Configure sink to %s", sk.GetName())
+ log.Infof("Configure sink to %s", sk.GetName())
dataSink = sk
break
}
@@ -100,7 +100,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for {
key, m, err := notificationInput.ReceiveMessage()
if err != nil {
- glog.Errorf("receive %s: %+v", key, err)
+ log.Errorf("receive %s: %+v", key, err)
continue
}
if key == "" {
@@ -108,16 +108,16 @@ func runFilerReplicate(cmd *Command, args []string) bool {
continue
}
if m.OldEntry != nil && m.NewEntry == nil {
- glog.V(1).Infof("delete: %s", key)
+ log.Debugf("delete: %s", key)
} else if m.OldEntry == nil && m.NewEntry != nil {
- glog.V(1).Infof(" add: %s", key)
+ log.Debugf(" add: %s", key)
} else {
- glog.V(1).Infof("modify: %s", key)
+ log.Debugf("modify: %s", key)
}
if err = replicator.Replicate(context.Background(), key, m); err != nil {
- glog.Errorf("replicate %s: %+v", key, err)
+ log.Errorf("replicate %s: %+v", key, err)
} else {
- glog.V(1).Infof("replicated %s", key)
+ log.Debugf("replicated %s", key)
}
}
@@ -130,7 +130,7 @@ func validateOneEnabledInput(config *viper.Viper) {
if enabledInput == "" {
enabledInput = input.GetName()
} else {
- glog.Fatalf("Notification input is enabled for both %s and %s", enabledInput, input.GetName())
+ log.Fatalf("Notification input is enabled for both %s and %s", enabledInput, input.GetName())
}
}
}
diff --git a/weed/command/filer_sync.go b/weed/command/filer_sync.go
index af0a624b1..de369a72c 100644
--- a/weed/command/filer_sync.go
+++ b/weed/command/filer_sync.go
@@ -4,7 +4,7 @@ import (
"context"
"errors"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication"
@@ -89,7 +89,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.filerB,
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bDebug)
if err != nil {
- glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
+ log.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
time.Sleep(1747 * time.Millisecond)
}
}
@@ -101,7 +101,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.filerA,
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aDebug)
if err != nil {
- glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
+ log.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
time.Sleep(2147 * time.Millisecond)
}
}
@@ -134,7 +134,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
return err
}
- glog.V(0).Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs)
+ log.Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs)
// create filer sink
filerSource := &source.FilerSource{}
@@ -264,7 +264,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
counter++
if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
- glog.V(0).Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
+ log.Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
counter = 0
lastWriteTime = time.Now()
if err := writeSyncOffset(grpcDialOption, targetFiler, sourceFilerSignature, resp.TsNs); err != nil {
diff --git a/weed/command/fix.go b/weed/command/fix.go
index ae9a051b8..03599f909 100644
--- a/weed/command/fix.go
+++ b/weed/command/fix.go
@@ -5,7 +5,7 @@ import (
"path"
"strconv"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
@@ -47,12 +47,12 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool {
}
func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
- glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
+ log.Debugf("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
if n.Size.IsValid() {
pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size)
- glog.V(2).Infof("saved %d with error %v", n.Size, pe)
+ log.Debugf("saved %d with error %v", n.Size, pe)
} else {
- glog.V(2).Infof("skipping deleted file ...")
+ log.Debugf("skipping deleted file ...")
return scanner.nm.Delete(n.Id)
}
return nil
@@ -79,12 +79,12 @@ func runFix(cmd *Command, args []string) bool {
}
if err := storage.ScanVolumeFile(util.ResolvePath(*fixVolumePath), *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil {
- glog.Fatalf("scan .dat File: %v", err)
+ log.Fatalf("scan .dat File: %v", err)
os.Remove(indexFileName)
}
if err := nm.SaveToIdx(indexFileName); err != nil {
- glog.Fatalf("save to .idx File: %v", err)
+ log.Fatalf("save to .idx File: %v", err)
os.Remove(indexFileName)
}
diff --git a/weed/command/master.go b/weed/command/master.go
index c03da7f5d..912211ff5 100644
--- a/weed/command/master.go
+++ b/weed/command/master.go
@@ -14,7 +14,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util/grace"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@@ -94,7 +94,7 @@ func runMaster(cmd *Command, args []string) bool {
os.MkdirAll(*m.metaFolder, 0755)
}
if err := util.TestFolderWritable(util.ResolvePath(*m.metaFolder)); err != nil {
- glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
+ log.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
}
var masterWhiteList []string
@@ -102,7 +102,7 @@ func runMaster(cmd *Command, args []string) bool {
masterWhiteList = strings.Split(*m.whiteList, ",")
}
if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
- glog.Fatalf("volumeSizeLimitMB should be smaller than 30000")
+ log.Fatalf("volumeSizeLimitMB should be smaller than 30000")
}
startMaster(m, masterWhiteList)
@@ -119,16 +119,16 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port)
- glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
+ log.Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
masterListener, e := util.NewListener(listeningAddress, 0)
if e != nil {
- glog.Fatalf("Master startup error: %v", e)
+ log.Fatalf("Master startup error: %v", e)
}
// start raftServer
raftServer, err := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"),
peers, myMasterAddress, util.ResolvePath(*masterOption.metaFolder), ms.Topo, *masterOption.raftResumeState)
if raftServer == nil {
- glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err)
+ log.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err)
}
ms.SetRaftServer(raftServer)
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
@@ -136,14 +136,14 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
grpcPort := *masterOption.port + 10000
grpcL, err := util.NewListener(*masterOption.ipBind+":"+strconv.Itoa(grpcPort), 0)
if err != nil {
- glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
+ log.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
}
// Create your protocol servers.
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
master_pb.RegisterSeaweedServer(grpcS, ms)
protobuf.RegisterRaftServer(grpcS, raftServer)
reflection.Register(grpcS)
- glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
+ log.Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
go grpcS.Serve(grpcL)
go func() {
@@ -165,7 +165,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
}
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
- glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
+ log.Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
masterAddress = masterIp + ":" + strconv.Itoa(masterPort)
if peers != "" {
cleanedPeers = strings.Split(peers, ",")
@@ -183,7 +183,7 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
cleanedPeers = append(cleanedPeers, masterAddress)
}
if len(cleanedPeers)%2 == 0 {
- glog.Fatalf("Only odd number of masters are supported!")
+ log.Fatalf("Only odd number of masters are supported!")
}
return
}
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 83cb352ff..66e12757a 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -19,7 +19,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filesys"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@@ -54,7 +54,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// parse filer grpc address
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer)
if err != nil {
- glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
+ log.Infof("ParseFilerGrpcAddress: %v", err)
return true
}
@@ -70,7 +70,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
return nil
})
if err != nil {
- glog.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err)
+ log.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err)
return true
}
@@ -130,7 +130,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// Ensure target mount point availability
if isValid := checkMountPointAvailable(dir); !isValid {
- glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir)
+ log.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir)
return true
}
@@ -194,7 +194,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// mount
c, err := fuse.Mount(dir, options...)
if err != nil {
- glog.V(0).Infof("mount: %v", err)
+ log.Infof("mount: %v", err)
return true
}
defer fuse.Unmount(dir)
@@ -204,13 +204,13 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
c.Close()
})
- glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir)
+ log.Infof("mounted %s%s to %s", filer, mountRoot, dir)
err = fs.Serve(c, seaweedFileSystem)
// check if the mount process has an error to report
<-c.Ready
if err := c.MountError; err != nil {
- glog.V(0).Infof("mount process: %v", err)
+ log.Infof("mount process: %v", err)
return true
}
diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go
index b4b5855ff..036eb959f 100644
--- a/weed/command/msg_broker.go
+++ b/weed/command/msg_broker.go
@@ -10,7 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util/grace"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/messaging/broker"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -65,7 +65,7 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer)
if err != nil {
- glog.Fatal(err)
+ log.Fatal(err)
return false
}
@@ -82,10 +82,10 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
return nil
})
if err != nil {
- glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
+ log.Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
time.Sleep(time.Second)
} else {
- glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
+ log.Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
break
}
}
@@ -102,7 +102,7 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
// start grpc listener
grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0)
if err != nil {
- glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
+ log.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
}
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker"))
messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs)
diff --git a/weed/command/s3.go b/weed/command/s3.go
index ed5bb0b80..59fea2cd5 100644
--- a/weed/command/s3.go
+++ b/weed/command/s3.go
@@ -12,7 +12,7 @@ import (
"github.com/gorilla/mux"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/s3api"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -137,7 +137,7 @@ func (s3opt *S3Options) startS3Server() bool {
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer)
if err != nil {
- glog.Fatal(err)
+ log.Fatal(err)
return false
}
@@ -157,14 +157,14 @@ func (s3opt *S3Options) startS3Server() bool {
}
filerBucketsPath = resp.DirBuckets
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
- glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
+ log.Infof("S3 read filer buckets dir: %s", filerBucketsPath)
return nil
})
if err != nil {
- glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
+ log.Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
time.Sleep(time.Second)
} else {
- glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
+ log.Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
break
}
}
@@ -183,7 +183,7 @@ func (s3opt *S3Options) startS3Server() bool {
GrpcDialOption: grpcDialOption,
})
if s3ApiServer_err != nil {
- glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
+ log.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
}
httpS := &http.Server{Handler: router}
@@ -191,18 +191,18 @@ func (s3opt *S3Options) startS3Server() bool {
listenAddress := fmt.Sprintf(":%d", *s3opt.port)
s3ApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
if err != nil {
- glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
+ log.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
}
if *s3opt.tlsPrivateKey != "" {
- glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
+ log.Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil {
- glog.Fatalf("S3 API Server Fail to serve: %v", err)
+ log.Fatalf("S3 API Server Fail to serve: %v", err)
}
} else {
- glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
+ log.Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
if err = httpS.Serve(s3ApiListener); err != nil {
- glog.Fatalf("S3 API Server Fail to serve: %v", err)
+ log.Fatalf("S3 API Server Fail to serve: %v", err)
}
}
diff --git a/weed/command/server.go b/weed/command/server.go
index 4a9c2411a..a66f71d20 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -10,7 +10,7 @@ import (
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -124,7 +124,7 @@ func runServer(cmd *Command, args []string) bool {
if *serverOptions.cpuprofile != "" {
f, err := os.Create(*serverOptions.cpuprofile)
if err != nil {
- glog.Fatal(err)
+ log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
@@ -175,14 +175,14 @@ func runServer(cmd *Command, args []string) bool {
folders := strings.Split(*volumeDataFolders, ",")
if *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
- glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000")
+ log.Fatalf("masterVolumeSizeLimitMB should be less than 30000")
}
if *masterOptions.metaFolder == "" {
*masterOptions.metaFolder = folders[0]
}
if err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil {
- glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
+ log.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
}
filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder
diff --git a/weed/command/volume.go b/weed/command/volume.go
index ce5992665..9fe779460 100644
--- a/weed/command/volume.go
+++ b/weed/command/volume.go
@@ -22,7 +22,7 @@ import (
"google.golang.org/grpc/reflection"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/server"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
@@ -125,7 +125,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
v.folders = strings.Split(volumeFolders, ",")
for _, folder := range v.folders {
if err := util.TestFolderWritable(util.ResolvePath(folder)); err != nil {
- glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
+ log.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
}
}
@@ -135,7 +135,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if max, e := strconv.Atoi(maxString); e == nil {
v.folderMaxLimits = append(v.folderMaxLimits, max)
} else {
- glog.Fatalf("The max specified in -max not a valid number %s", maxString)
+ log.Fatalf("The max specified in -max not a valid number %s", maxString)
}
}
if len(v.folderMaxLimits) == 1 && len(v.folders) > 1 {
@@ -144,7 +144,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
}
}
if len(v.folders) != len(v.folderMaxLimits) {
- glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
+ log.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
}
// set minFreeSpacePercent
@@ -153,7 +153,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if value, e := strconv.ParseFloat(freeString, 32); e == nil {
v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value))
} else {
- glog.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString)
+ log.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString)
}
}
if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 {
@@ -162,7 +162,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
}
}
if len(v.folders) != len(v.minFreeSpacePercents) {
- glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
+ log.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
}
// security related white list configuration
@@ -172,7 +172,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if *v.ip == "" {
*v.ip = util.DetectedHostAddress()
- glog.V(0).Infof("detected volume server ip address: %v", *v.ip)
+ log.Infof("detected volume server ip address: %v", *v.ip)
}
if *v.publicPort == 0 {
@@ -226,7 +226,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if v.isSeparatedPublicPort() {
publicHttpDown = v.startPublicHttpService(publicVolumeMux)
if nil == publicHttpDown {
- glog.Fatalf("start public http service failed")
+ log.Fatalf("start public http service failed")
}
}
@@ -239,7 +239,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
// Stop heartbeats
if !volumeServer.StopHeartbeat() {
- glog.V(0).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
+ log.Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
time.Sleep(time.Duration(*v.preStopSeconds) * time.Second)
}
@@ -257,18 +257,18 @@ func shutdown(publicHttpDown httpdown.Server, clusterHttpServer httpdown.Server,
// firstly, stop the public http service to prevent from receiving new user request
if nil != publicHttpDown {
- glog.V(0).Infof("stop public http server ... ")
+ log.Infof("stop public http server ... ")
if err := publicHttpDown.Stop(); err != nil {
- glog.Warningf("stop the public http server failed, %v", err)
+ log.Warnf("stop the public http server failed, %v", err)
}
}
- glog.V(0).Infof("graceful stop cluster http server ... ")
+ log.Infof("graceful stop cluster http server ... ")
if err := clusterHttpServer.Stop(); err != nil {
- glog.Warningf("stop the cluster http server failed, %v", err)
+ log.Warnf("stop the cluster http server failed, %v", err)
}
- glog.V(0).Infof("graceful stop gRPC ...")
+ log.Infof("graceful stop gRPC ...")
grpcS.GracefulStop()
volumeServer.Shutdown()
@@ -286,14 +286,14 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
grpcPort := *v.port + 10000
grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0)
if err != nil {
- glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
+ log.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume"))
volume_server_pb.RegisterVolumeServerServer(grpcS, vs)
reflection.Register(grpcS)
go func() {
if err := grpcS.Serve(grpcL); err != nil {
- glog.Fatalf("start gRPC service failed, %s", err)
+ log.Fatalf("start gRPC service failed, %s", err)
}
}()
return grpcS
@@ -301,17 +301,17 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
- glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
+ log.Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
- glog.Fatalf("Volume server listener error:%v", e)
+ log.Fatalf("Volume server listener error:%v", e)
}
pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute}
publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener)
go func() {
if err := publicHttpDown.Wait(); err != nil {
- glog.Errorf("public http down wait failed, %v", err)
+ log.Errorf("public http down wait failed, %v", err)
}
}()
@@ -328,10 +328,10 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
}
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
- glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
+ log.Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
- glog.Fatalf("Volume server listener error:%v", e)
+ log.Fatalf("Volume server listener error:%v", e)
}
httpDown := httpdown.HTTP{
@@ -342,7 +342,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
clusterHttpServer := httpDown.Serve(&http.Server{Handler: handler}, listener)
go func() {
if e := clusterHttpServer.Wait(); e != nil {
- glog.Fatalf("Volume server fail to serve: %v", e)
+ log.Fatalf("Volume server fail to serve: %v", e)
}
}()
return clusterHttpServer
diff --git a/weed/command/volume_test.go b/weed/command/volume_test.go
index 7399f1248..bcb5b0f59 100644
--- a/weed/command/volume_test.go
+++ b/weed/command/volume_test.go
@@ -5,9 +5,9 @@ import (
"testing"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func TestXYZ(t *testing.T) {
- glog.V(0).Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
+ log.Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
}
diff --git a/weed/command/webdav.go b/weed/command/webdav.go
index dc84b1fd0..3fbfcfc7d 100644
--- a/weed/command/webdav.go
+++ b/weed/command/webdav.go
@@ -9,7 +9,7 @@ import (
"strconv"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@@ -54,7 +54,7 @@ func runWebDav(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
- glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
+ log.Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
return webDavStandaloneOptions.startWebDav()
@@ -76,7 +76,7 @@ func (wo *WebDavOption) startWebDav() bool {
// parse filer grpc address
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer)
if err != nil {
- glog.Fatal(err)
+ log.Fatal(err)
return false
}
@@ -94,10 +94,10 @@ func (wo *WebDavOption) startWebDav() bool {
return nil
})
if err != nil {
- glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
+ log.Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
time.Sleep(time.Second)
} else {
- glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
+ log.Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
break
}
}
@@ -114,7 +114,7 @@ func (wo *WebDavOption) startWebDav() bool {
CacheSizeMB: *wo.cacheSizeMB,
})
if webdavServer_err != nil {
- glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
+ log.Fatalf("WebDav Server startup error: %v", webdavServer_err)
}
httpS := &http.Server{Handler: ws.Handler}
@@ -122,18 +122,18 @@ func (wo *WebDavOption) startWebDav() bool {
listenAddress := fmt.Sprintf(":%d", *wo.port)
webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
if err != nil {
- glog.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err)
+ log.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err)
}
if *wo.tlsPrivateKey != "" {
- glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port)
+ log.Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port)
if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil {
- glog.Fatalf("WebDav Server Fail to serve: %v", err)
+ log.Fatalf("WebDav Server Fail to serve: %v", err)
}
} else {
- glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port)
+ log.Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port)
if err = httpS.Serve(webDavListener); err != nil {
- glog.Fatalf("WebDav Server Fail to serve: %v", err)
+ log.Fatalf("WebDav Server Fail to serve: %v", err)
}
}
diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go
index 7c95ffb57..3acfbbcee 100644
--- a/weed/filer/abstract_sql/abstract_sql_store.go
+++ b/weed/filer/abstract_sql/abstract_sql_store.go
@@ -5,7 +5,7 @@ import (
"database/sql"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"strings"
@@ -81,7 +81,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
}
// now the insert failed possibly due to duplication constraints
- glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err)
+ log.Debugf("insert %s falls back to update: %v", entry.FullPath, err)
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
if err != nil {
@@ -187,7 +187,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
var name string
var data []byte
if err = rows.Scan(&name, &data); err != nil {
- glog.V(0).Infof("scan %s : %v", fullpath, err)
+ log.Infof("scan %s : %v", fullpath, err)
return nil, fmt.Errorf("scan %s: %v", fullpath, err)
}
@@ -195,7 +195,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
FullPath: util.NewFullPath(string(fullpath), name),
}
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
- glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
+ log.Infof("scan decode %s : %v", entry.FullPath, err)
return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
}
diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go
index c368059df..17634fa3e 100644
--- a/weed/filer/abstract_sql/abstract_sql_store_kv.go
+++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go
@@ -8,7 +8,7 @@ import (
"strings"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -24,7 +24,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
}
// now the insert failed possibly due to duplication constraints
- glog.V(1).Infof("kv insert falls back to update: %s", err)
+ log.Debugf("kv insert falls back to update: %s", err)
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr)
if err != nil {
diff --git a/weed/filer/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go
index ae8cb7a86..681c12d57 100644
--- a/weed/filer/cassandra/cassandra_store.go
+++ b/weed/filer/cassandra/cassandra_store.go
@@ -6,7 +6,7 @@ import (
"github.com/gocql/gocql"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -42,7 +42,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam
store.cluster.Consistency = gocql.LocalQuorum
store.session, err = store.cluster.CreateSession()
if err != nil {
- glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
+ log.Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
}
return
}
@@ -155,13 +155,13 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath
}
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
err = decodeErr
- glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ log.Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)
}
if err := iter.Close(); err != nil {
- glog.V(0).Infof("list iterator close: %v", err)
+ log.Infof("list iterator close: %v", err)
}
return entries, err
diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go
index 3dce67d6d..27c5d9344 100644
--- a/weed/filer/configuration.go
+++ b/weed/filer/configuration.go
@@ -3,7 +3,7 @@ package filer
import (
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/spf13/viper"
)
@@ -18,11 +18,11 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) {
for _, store := range Stores {
if config.GetBool(store.GetName() + ".enabled") {
if err := store.Initialize(config, store.GetName()+"."); err != nil {
- glog.Fatalf("Failed to initialize store for %s: %+v",
+ log.Fatalf("Failed to initialize store for %s: %+v",
store.GetName(), err)
}
f.SetStore(store)
- glog.V(0).Infof("Configure filer for %s", store.GetName())
+ log.Infof("Configure filer for %s", store.GetName())
return
}
}
@@ -43,7 +43,7 @@ func validateOneEnabledStore(config *viper.Viper) {
if enabledStore == "" {
enabledStore = store.GetName()
} else {
- glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName())
+ log.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName())
}
}
}
diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go
index ec88e10a5..9363e1265 100644
--- a/weed/filer/elastic/v7/elastic_store.go
+++ b/weed/filer/elastic/v7/elastic_store.go
@@ -7,7 +7,7 @@ import (
"strings"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
jsoniter "github.com/json-iterator/go"
@@ -67,7 +67,7 @@ func (store *ElasticStore) Initialize(configuration weed_util.Configuration, pre
if store.maxPageSize <= 0 {
store.maxPageSize = 10000
}
- glog.Infof("filer store elastic endpoints: %v.", servers)
+ log.Infof("filer store elastic endpoints: %v.", servers)
return store.initialize(options)
}
@@ -110,7 +110,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
}
value, err := jsoniter.Marshal(esEntry)
if err != nil {
- glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
+ log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry %v.", err)
}
_, err = store.client.Index().
@@ -120,7 +120,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
BodyJson(string(value)).
Do(ctx)
if err != nil {
- glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
+ log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry %v.", err)
}
return nil
@@ -149,7 +149,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
err := jsoniter.Unmarshal(searchResult.Source, esEntry)
return esEntry.Entry, err
}
- glog.Errorf("find entry(%s),%v.", string(fullpath), err)
+ log.Errorf("find entry(%s),%v.", string(fullpath), err)
return nil, filer_pb.ErrNotFound
}
@@ -167,7 +167,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e
if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) {
return nil
}
- glog.Errorf("delete index(%s) %v.", index, err)
+ log.Errorf("delete index(%s) %v.", index, err)
return err
}
@@ -182,7 +182,7 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
return nil
}
}
- glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
+ log.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
return fmt.Errorf("delete entry %v.", err)
}
@@ -207,7 +207,7 @@ func (store *ElasticStore) ListDirectoryEntries(
func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
indexResult, err := store.client.CatIndices().Do(ctx)
if err != nil {
- glog.Errorf("list indices %v.", err)
+ log.Errorf("list indices %v.", err)
return entries, err
}
for _, index := range indexResult {
@@ -249,7 +249,7 @@ func (store *ElasticStore) listDirectoryEntries(
result := &elastic.SearchResult{}
if (startFileName == "" && first) || inclusive {
if result, err = store.search(ctx, index, parentId); err != nil {
- glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
+ log.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return entries, err
}
} else {
@@ -259,7 +259,7 @@ func (store *ElasticStore) listDirectoryEntries(
}
after := weed_util.Md5String([]byte(fullPath))
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
- glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
+ log.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return entries, err
}
}
diff --git a/weed/filer/elastic/v7/elastic_store_kv.go b/weed/filer/elastic/v7/elastic_store_kv.go
index 99c03314e..4a8c32c34 100644
--- a/weed/filer/elastic/v7/elastic_store_kv.go
+++ b/weed/filer/elastic/v7/elastic_store_kv.go
@@ -6,7 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
jsoniter "github.com/json-iterator/go"
elastic "github.com/olivere/elastic/v7"
)
@@ -22,7 +22,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error)
return nil
}
}
- glog.Errorf("delete key(id:%s) %v.", string(key), err)
+ log.Errorf("delete key(id:%s) %v.", string(key), err)
return fmt.Errorf("delete key %v.", err)
}
@@ -41,7 +41,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte,
return esEntry.Value, nil
}
}
- glog.Errorf("find key(%s),%v.", string(key), err)
+ log.Errorf("find key(%s),%v.", string(key), err)
return value, filer.ErrKvNotFound
}
@@ -49,7 +49,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
esEntry := &ESKVEntry{value}
val, err := jsoniter.Marshal(esEntry)
if err != nil {
- glog.Errorf("insert key(%s) %v.", string(key), err)
+ log.Errorf("insert key(%s) %v.", string(key), err)
return fmt.Errorf("insert key %v.", err)
}
_, err = store.client.Index().
diff --git a/weed/filer/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go
index 634fba1eb..9a07f9b65 100644
--- a/weed/filer/etcd/etcd_store.go
+++ b/weed/filer/etcd/etcd_store.go
@@ -9,7 +9,7 @@ import (
"go.etcd.io/etcd/clientv3"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
@@ -45,7 +45,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix
}
func (store *EtcdStore) initialize(servers string, timeout string) (err error) {
- glog.Infof("filer store etcd: %s", servers)
+ log.Infof("filer store etcd: %s", servers)
to, err := time.ParseDuration(timeout)
if err != nil {
@@ -169,7 +169,7 @@ func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_
}
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
err = decodeErr
- glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ log.Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)
diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go
index f5ab36d37..3e64fd8a6 100644
--- a/weed/filer/filechunk_manifest.go
+++ b/weed/filer/filechunk_manifest.go
@@ -9,7 +9,7 @@ import (
"github.com/golang/protobuf/proto"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -87,7 +87,7 @@ func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *fil
func fetchChunk(lookupFileIdFn LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
urlStrings, err := lookupFileIdFn(fileId)
if err != nil {
- glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
+ log.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
return nil, err
}
return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, 0, 0)
@@ -108,14 +108,14 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool
break
}
if err != nil {
- glog.V(0).Infof("read %s failed, err: %v", urlString, err)
+ log.Infof("read %s failed, err: %v", urlString, err)
buffer.Reset()
} else {
break
}
}
if err != nil && shouldRetry {
- glog.V(0).Infof("retry reading in %v", waitTime)
+ log.Infof("retry reading in %v", waitTime)
time.Sleep(waitTime)
} else {
break
diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go
index c75a35f79..2adf4a300 100644
--- a/weed/filer/filechunks.go
+++ b/weed/filer/filechunks.go
@@ -158,9 +158,9 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int
func logPrintf(name string, visibles []VisibleInterval) {
/*
- glog.V(0).Infof("%s len %d", name, len(visibles))
+ log.Infof("%s len %d", name, len(visibles))
for _, v := range visibles {
- glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
+ log.Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
}
*/
}
@@ -185,22 +185,22 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n
}
logPrintf(" before", visibles)
- // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
+ // log.Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
chunkStop := chunk.Offset + int64(chunk.Size)
for _, v := range visibles {
if v.start < chunk.Offset && chunk.Offset < v.stop {
t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
newVisibles = append(newVisibles, t)
- // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
+ // log.Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
}
if v.start < chunkStop && chunkStop < v.stop {
t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
newVisibles = append(newVisibles, t)
- // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
+ // log.Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
}
if chunkStop <= v.start || v.stop <= chunk.Offset {
newVisibles = append(newVisibles, v)
- // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
+ // log.Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
}
}
newVisibles = append(newVisibles, newV)
@@ -240,7 +240,7 @@ func NonOverlappingVisibleIntervals(lookupFileIdFn LookupFileIdFunctionType, chu
for _, chunk := range chunks {
- // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
+ // log.Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
visibles = MergeIntoVisibles(visibles, chunk)
logPrintf("add", visibles)
diff --git a/weed/filer/filechunks2_test.go b/weed/filer/filechunks2_test.go
index 9f9566d9b..f8c2c812c 100644
--- a/weed/filer/filechunks2_test.go
+++ b/weed/filer/filechunks2_test.go
@@ -4,7 +4,7 @@ import (
"sort"
"testing"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -41,6 +41,6 @@ func printChunks(name string, chunks []*filer_pb.FileChunk) {
return chunks[i].Offset < chunks[j].Offset
})
for _, chunk := range chunks {
- glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
+ log.Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
}
}
diff --git a/weed/filer/filer.go b/weed/filer/filer.go
index 105c8e04f..71314136d 100644
--- a/weed/filer/filer.go
+++ b/weed/filer/filer.go
@@ -9,7 +9,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
@@ -93,14 +93,14 @@ func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) {
storeIdBytes = make([]byte, 4)
util.Uint32toBytes(storeIdBytes, uint32(f.Signature))
if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil {
- glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
+ log.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
}
- glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature)
+ log.Infof("create %s to %d", FilerStoreId, f.Signature)
} else if err == nil && len(storeIdBytes) == 4 {
f.Signature = int32(util.BytesToUint32(storeIdBytes))
- glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature)
+ log.Infof("existing %s = %d", FilerStoreId, f.Signature)
} else {
- glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
+ log.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
}
}
@@ -145,7 +145,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
// fmt.Printf("%d directory: %+v\n", i, dirPath)
// check the store directly
- glog.V(4).Infof("find uncached directory: %s", dirPath)
+ log.Tracef("find uncached directory: %s", dirPath)
dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
// no such existing directory
@@ -169,11 +169,11 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
},
}
- glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
+ log.Debugf("create directory: %s %v", dirPath, dirEntry.Mode)
mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
if mkdirErr != nil {
if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
- glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
+ log.Tracef("mkdir %s: %v", dirPath, mkdirErr)
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
}
} else {
@@ -182,7 +182,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
}
} else if !dirEntry.IsDirectory() {
- glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
+ log.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
return fmt.Errorf("%s is a file", dirPath)
}
@@ -194,13 +194,13 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
}
if lastDirectoryEntry == nil {
- glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
+ log.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
return fmt.Errorf("parent folder not found: %v", entry.FullPath)
}
/*
if !hasWritePermission(lastDirectoryEntry, entry) {
- glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
+ log.Infof("directory %s: %v, entry: uid=%d gid=%d",
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
}
@@ -209,19 +209,19 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
if oldEntry == nil {
- glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
+ log.Tracef("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
if err := f.Store.InsertEntry(ctx, entry); err != nil {
- glog.Errorf("insert entry %s: %v", entry.FullPath, err)
+ log.Errorf("insert entry %s: %v", entry.FullPath, err)
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
}
} else {
if o_excl {
- glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
+ log.Tracef("EEXIST: entry %s already exists", entry.FullPath)
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
}
- glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
+ log.Tracef("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
- glog.Errorf("update entry %s: %v", entry.FullPath, err)
+ log.Errorf("update entry %s: %v", entry.FullPath, err)
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
}
}
@@ -231,7 +231,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
f.deleteChunksIfNotNew(oldEntry, entry)
- glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
+ log.Tracef("CreateEntry %s: created", entry.FullPath)
return nil
}
@@ -239,11 +239,11 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
if oldEntry != nil {
if oldEntry.IsDirectory() && !entry.IsDirectory() {
- glog.Errorf("existing %s is a directory", entry.FullPath)
+ log.Errorf("existing %s is a directory", entry.FullPath)
return fmt.Errorf("existing %s is a directory", entry.FullPath)
}
if !oldEntry.IsDirectory() && entry.IsDirectory() {
- glog.Errorf("existing %s is a file", entry.FullPath)
+ log.Errorf("existing %s is a file", entry.FullPath)
return fmt.Errorf("existing %s is a file", entry.FullPath)
}
}
@@ -321,7 +321,7 @@ func (f *Filer) Shutdown() {
func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) {
for _, hardLinkId := range hardLinkIds {
if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil {
- glog.Errorf("delete hard link id %d : %v", hardLinkId, err)
+ log.Errorf("delete hard link id %d : %v", hardLinkId, err)
}
}
}
diff --git a/weed/filer/filer_buckets.go b/weed/filer/filer_buckets.go
index 4d4f4abc3..b0dba7955 100644
--- a/weed/filer/filer_buckets.go
+++ b/weed/filer/filer_buckets.go
@@ -5,7 +5,7 @@ import (
"math"
"sync"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -32,7 +32,7 @@ func (f *Filer) LoadBuckets() {
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "")
if err != nil {
- glog.V(1).Infof("no buckets found: %v", err)
+ log.Debugf("no buckets found: %v", err)
return
}
@@ -41,7 +41,7 @@ func (f *Filer) LoadBuckets() {
shouldFsyncMap[bucket] = true
}
- glog.V(1).Infof("buckets found: %d", len(entries))
+ log.Debugf("buckets found: %d", len(entries))
f.buckets.Lock()
for _, entry := range entries {
diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go
index 5fd8e5b49..0abdc807e 100644
--- a/weed/filer/filer_conf.go
+++ b/weed/filer/filer_conf.go
@@ -5,7 +5,7 @@ import (
"context"
"io"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/jsonpb"
@@ -36,7 +36,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
if err == filer_pb.ErrNotFound {
return nil
}
- glog.Errorf("read filer conf entry %s: %v", filerConfPath, err)
+ log.Errorf("read filer conf entry %s: %v", filerConfPath, err)
return
}
@@ -46,7 +46,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
func (fc *FilerConf) loadFromChunks(filer *Filer, chunks []*filer_pb.FileChunk) (err error) {
data, err := filer.readEntry(chunks)
if err != nil {
- glog.Errorf("read filer conf content: %v", err)
+ log.Errorf("read filer conf content: %v", err)
return
}
@@ -60,7 +60,7 @@ func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
err = proto.UnmarshalText(string(data), conf)
if err != nil {
- glog.Errorf("unable to parse filer conf: %v", err)
+ log.Errorf("unable to parse filer conf: %v", err)
// this is not recoverable
return nil
}
@@ -85,7 +85,7 @@ func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) {
func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) {
err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
if err != nil {
- glog.Errorf("put location prefix: %v", err)
+ log.Errorf("put location prefix: %v", err)
}
return
}
diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go
index 69219fbfa..603312995 100644
--- a/weed/filer/filer_delete_entry.go
+++ b/weed/filer/filer_delete_entry.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -33,7 +33,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
var dirHardLinkIds []HardLinkId
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection, isFromOtherCluster, signatures)
if err != nil {
- glog.V(0).Infof("delete directory %s: %v", p, err)
+ log.Infof("delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err)
}
chunks = append(chunks, dirChunks...)
@@ -71,12 +71,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
for {
entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "")
if err != nil {
- glog.Errorf("list folder %s: %v", entry.FullPath, err)
+ log.Errorf("list folder %s: %v", entry.FullPath, err)
return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
}
if lastFileName == "" && !isRecursive && len(entries) > 0 {
// only for first iteration in the loop
- glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
+ log.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
return nil, nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
}
@@ -107,7 +107,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
}
}
- glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
+ log.Tracef("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
@@ -120,7 +120,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
- glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
+ log.Tracef("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
@@ -139,7 +139,7 @@ func (f *Filer) doDeleteCollection(collectionName string) (err error) {
Name: collectionName,
})
if err != nil {
- glog.Infof("delete collection %s: %v", collectionName, err)
+ log.Infof("delete collection %s: %v", collectionName, err)
}
return err
})
diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go
index 126d162ec..619ec2d66 100644
--- a/weed/filer/filer_deletion.go
+++ b/weed/filer/filer_deletion.go
@@ -4,7 +4,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/wdclient"
@@ -54,10 +54,10 @@ func (f *Filer) loopProcessingDeletion() {
_, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
if err != nil {
if !strings.Contains(err.Error(), "already deleted") {
- glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
+ log.Infof("deleting fileIds len=%d error: %v", deletionCount, err)
}
} else {
- glog.V(1).Infof("deleting fileIds len=%d", deletionCount)
+ log.Debugf("deleting fileIds len=%d", deletionCount)
}
}
})
@@ -76,7 +76,7 @@ func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
}
dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk)
if manifestResolveErr != nil {
- glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
+ log.Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
}
for _, dChunk := range dataChunks {
f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString())
diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go
index 40755e6a7..51e7f8669 100644
--- a/weed/filer/filer_notify.go
+++ b/weed/filer/filer_notify.go
@@ -9,7 +9,7 @@ import (
"github.com/golang/protobuf/proto"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -54,7 +54,7 @@ func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry
}
if notification.Queue != nil {
- glog.V(3).Infof("notifying entry update %v", fullpath)
+ log.Tracef("notifying entry update %v", fullpath)
notification.Queue.SendMessage(fullpath, eventNotification)
}
@@ -73,7 +73,7 @@ func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotifica
}
data, err := proto.Marshal(event)
if err != nil {
- glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
+ log.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
return
}
@@ -96,7 +96,7 @@ func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
for {
if err := f.appendToFile(targetFile, buf); err != nil {
- glog.V(1).Infof("log write failed %s: %v", targetFile, err)
+ log.Debugf("log write failed %s: %v", targetFile, err)
time.Sleep(737 * time.Millisecond)
} else {
break
diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go
index 3de27da6e..f7036d236 100644
--- a/weed/filer/filer_on_meta_event.go
+++ b/weed/filer/filer_on_meta_event.go
@@ -4,7 +4,7 @@ import (
"bytes"
"math"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -22,7 +22,7 @@ func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse)
return
}
- glog.V(0).Infof("procesing %v", event)
+ log.Infof("procesing %v", event)
if entry.Name == FilerConfName {
f.reloadFilerConfiguration(entry)
}
@@ -42,7 +42,7 @@ func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
fc := NewFilerConf()
err := fc.loadFromChunks(f, entry.Chunks)
if err != nil {
- glog.Errorf("read filer conf chunks: %v", err)
+ log.Errorf("read filer conf chunks: %v", err)
return
}
f.FilerConf = fc
@@ -54,7 +54,7 @@ func (f *Filer) LoadFilerConf() {
return fc.loadFromFiler(f)
})
if err != nil {
- glog.Errorf("read filer conf: %v", err)
+ log.Errorf("read filer conf: %v", err)
return
}
f.FilerConf = fc
diff --git a/weed/filer/filerstore_hardlink.go b/weed/filer/filerstore_hardlink.go
index 0fbf8310e..6d89c20f9 100644
--- a/weed/filer/filerstore_hardlink.go
+++ b/weed/filer/filerstore_hardlink.go
@@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -54,12 +54,12 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr
value, err := fsw.KvGet(ctx, key)
if err != nil {
- glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
+ log.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
return err
}
if err = entry.DecodeAttributesAndChunks(value); err != nil {
- glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
+ log.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
return err
}
diff --git a/weed/filer/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go
index 4b8dd5ea9..2613c7996 100644
--- a/weed/filer/leveldb/leveldb_store.go
+++ b/weed/filer/leveldb/leveldb_store.go
@@ -10,7 +10,7 @@ import (
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
@@ -37,7 +37,7 @@ func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, pre
}
func (store *LevelDBStore) initialize(dir string) (err error) {
- glog.Infof("filer store dir: %s", dir)
+ log.Infof("filer store dir: %s", dir)
if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
@@ -53,7 +53,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
store.db, err = leveldb.RecoverFile(dir, opts)
}
if err != nil {
- glog.Infof("filer store open dir %s: %v", dir, err)
+ log.Infof("filer store open dir %s: %v", dir, err)
return
}
}
@@ -193,7 +193,7 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath we
}
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
- glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ log.Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)
diff --git a/weed/filer/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go
index 2ad0dd648..1299121c4 100644
--- a/weed/filer/leveldb2/leveldb2_store.go
+++ b/weed/filer/leveldb2/leveldb2_store.go
@@ -13,7 +13,7 @@ import (
"os"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
@@ -37,7 +37,7 @@ func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, pr
}
func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
- glog.Infof("filer store leveldb2 dir: %s", dir)
+ log.Infof("filer store leveldb2 dir: %s", dir)
if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
@@ -56,7 +56,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
db, dbErr = leveldb.RecoverFile(dbFolder, opts)
}
if dbErr != nil {
- glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
+ log.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
return dbErr
}
store.dbs = append(store.dbs, db)
@@ -205,7 +205,7 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath w
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
- glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ log.Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)
diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go
index 9437e9992..3bd4d36f9 100644
--- a/weed/filer/meta_aggregator.go
+++ b/weed/filer/meta_aggregator.go
@@ -11,7 +11,7 @@ import (
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
@@ -64,7 +64,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
peerSignature, err := ma.readFilerStoreSignature(peer)
for err != nil {
- glog.V(0).Infof("connecting to peer filer %s: %v", peer, err)
+ log.Infof("connecting to peer filer %s: %v", peer, err)
time.Sleep(1357 * time.Millisecond)
peerSignature, err = ma.readFilerStoreSignature(peer)
}
@@ -74,27 +74,27 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
lastTsNs = prevTsNs
}
- glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
+ log.Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
var counter int64
var synced bool
maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) {
if err := Replay(f.Store, event); err != nil {
- glog.Errorf("failed to reply metadata change from %v: %v", peer, err)
+ log.Errorf("failed to reply metadata change from %v: %v", peer, err)
return
}
counter++
if lastPersistTime.Add(time.Minute).Before(time.Now()) {
if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil {
if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() {
- glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
+ log.Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
} else if !synced {
synced = true
- glog.V(0).Infof("synced with %s", peer)
+ log.Infof("synced with %s", peer)
}
lastPersistTime = time.Now()
counter = 0
} else {
- glog.V(0).Infof("failed to update offset for %v: %v", peer, err)
+ log.Infof("failed to update offset for %v: %v", peer, err)
}
}
}
@@ -103,7 +103,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error {
data, err := proto.Marshal(event)
if err != nil {
- glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
+ log.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
return err
}
dir := event.Directory
@@ -147,7 +147,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
}
})
if err != nil {
- glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err)
+ log.Infof("subscribing remote %s meta change: %v", peer, err)
time.Sleep(1733 * time.Millisecond)
}
}
@@ -177,7 +177,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32)
value, err := f.Store.KvGet(context.Background(), key)
if err == ErrKvNotFound {
- glog.Warningf("readOffset %s not found", peer)
+ log.Warnf("readOffset %s not found", peer)
return 0, nil
}
@@ -187,7 +187,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32)
lastTsNs = int64(util.BytesToUint64(value))
- glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs)
+ log.Infof("readOffset %s : %d", peer, lastTsNs)
return
}
@@ -206,7 +206,7 @@ func (ma *MetaAggregator) updateOffset(f *Filer, peer string, peerSignature int3
return fmt.Errorf("updateOffset %s : %v", peer, err)
}
- glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs)
+ log.Tracef("updateOffset %s : %d", peer, lastTsNs)
return
}
diff --git a/weed/filer/meta_replay.go b/weed/filer/meta_replay.go
index feb76278b..ecf47d576 100644
--- a/weed/filer/meta_replay.go
+++ b/weed/filer/meta_replay.go
@@ -3,7 +3,7 @@ package filer
import (
"context"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -14,7 +14,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err
var newEntry *Entry
if message.OldEntry != nil {
oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name)
- glog.V(4).Infof("deleting %v", oldPath)
+ log.Tracef("deleting %v", oldPath)
if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil {
return err
}
@@ -26,7 +26,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err
dir = message.NewParentPath
}
key := util.NewFullPath(dir, message.NewEntry.Name)
- glog.V(4).Infof("creating %v", key)
+ log.Tracef("creating %v", key)
newEntry = FromPbEntry(dir, message.NewEntry)
if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil {
return err
diff --git a/weed/filer/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go
index d20c6477a..9d3798620 100644
--- a/weed/filer/mongodb/mongodb_store.go
+++ b/weed/filer/mongodb/mongodb_store.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"go.mongodb.org/mongo-driver/bson"
@@ -134,7 +134,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil {
- glog.Errorf("find %s: %v", fullpath, err)
+ log.Errorf("find %s: %v", fullpath, err)
return nil, filer_pb.ErrNotFound
}
@@ -205,7 +205,7 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut
}
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
err = decodeErr
- glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ log.Infof("list %s : %v", entry.FullPath, err)
break
}
@@ -213,7 +213,7 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut
}
if err := cur.Close(ctx); err != nil {
- glog.V(0).Infof("list iterator close: %v", err)
+ log.Infof("list iterator close: %v", err)
}
return entries, err
diff --git a/weed/filer/mongodb/mongodb_store_kv.go b/weed/filer/mongodb/mongodb_store_kv.go
index 4aa9c3a33..c7ea69534 100644
--- a/weed/filer/mongodb/mongodb_store_kv.go
+++ b/weed/filer/mongodb/mongodb_store_kv.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
@@ -36,7 +36,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte,
var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil {
- glog.Errorf("kv get: %v", err)
+ log.Errorf("kv get: %v", err)
return nil, filer.ErrKvNotFound
}
diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go
index ccc746b90..ca083803e 100644
--- a/weed/filer/reader_at.go
+++ b/weed/filer/reader_at.go
@@ -7,7 +7,7 @@ import (
"math/rand"
"sync"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
@@ -54,7 +54,7 @@ func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
locations = resp.LocationsMap[vid]
if locations == nil || len(locations.Locations) == 0 {
- glog.V(0).Infof("failed to locate %s", fileId)
+ log.Infof("failed to locate %s", fileId)
return fmt.Errorf("failed to locate %s", fileId)
}
vicCacheLock.Lock()
@@ -101,7 +101,7 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
c.readerLock.Lock()
defer c.readerLock.Unlock()
- glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
+ log.Tracef("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
return c.doReadAt(p[n:], offset+int64(n))
}
@@ -121,7 +121,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
}
if startOffset < chunk.LogicOffset {
gap := int(chunk.LogicOffset - startOffset)
- glog.V(4).Infof("zero [%d,%d)", startOffset, startOffset+int64(gap))
+ log.Tracef("zero [%d,%d)", startOffset, startOffset+int64(gap))
n += int(min(int64(gap), remaining))
startOffset, remaining = chunk.LogicOffset, remaining-int64(gap)
if remaining <= 0 {
@@ -133,10 +133,10 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
if chunkStart >= chunkStop {
continue
}
- glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
+ log.Tracef("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
buffer, err = c.readFromWholeChunkData(chunk, nextChunk)
if err != nil {
- glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
+ log.Errorf("fetching chunk %+v: %v\n", chunk, err)
return
}
bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset
@@ -145,11 +145,11 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
startOffset, remaining = startOffset+int64(copied), remaining-int64(copied)
}
- glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
+ log.Tracef("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
if err == nil && remaining > 0 && c.fileSize > startOffset {
delta := int(min(remaining, c.fileSize-startOffset))
- glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
+ log.Tracef("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
n += delta
}
@@ -194,11 +194,11 @@ func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, erro
return c.fetchGroup.Do(chunkView.FileId, func() (interface{}, error) {
- glog.V(4).Infof("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize)
+ log.Tracef("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize)
data := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
if data != nil {
- glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data)))
+ log.Tracef("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data)))
} else {
var err error
data, err = c.doFetchFullChunkData(chunkView)
@@ -213,11 +213,11 @@ func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, erro
func (c *ChunkReadAt) doFetchFullChunkData(chunkView *ChunkView) ([]byte, error) {
- glog.V(4).Infof("+ doFetchFullChunkData %s", chunkView.FileId)
+ log.Tracef("+ doFetchFullChunkData %s", chunkView.FileId)
data, err := fetchChunk(c.lookupFileId, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
- glog.V(4).Infof("- doFetchFullChunkData %s", chunkView.FileId)
+ log.Tracef("- doFetchFullChunkData %s", chunkView.FileId)
return data, err
diff --git a/weed/filer/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go
index 0de9924a3..3abe0bca9 100644
--- a/weed/filer/redis/universal_redis_store.go
+++ b/weed/filer/redis/universal_redis_store.go
@@ -10,7 +10,7 @@ import (
"github.com/go-redis/redis"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -170,7 +170,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
path := util.NewFullPath(string(fullpath), fileName)
entry, err := store.FindEntry(ctx, path)
if err != nil {
- glog.V(0).Infof("list %s : %v", path, err)
+ log.Infof("list %s : %v", path, err)
} else {
if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go
index 0374314c0..2bc0a1a36 100644
--- a/weed/filer/redis2/universal_redis_store.go
+++ b/weed/filer/redis2/universal_redis_store.go
@@ -8,7 +8,7 @@ import (
"github.com/go-redis/redis"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -149,7 +149,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, ful
path := util.NewFullPath(string(fullpath), fileName)
entry, err := store.FindEntry(ctx, path)
if err != nil {
- glog.V(0).Infof("list %s : %v", path, err)
+ log.Infof("list %s : %v", path, err)
} else {
if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
diff --git a/weed/filer/stream.go b/weed/filer/stream.go
index cffdc8303..bd03222f1 100644
--- a/weed/filer/stream.go
+++ b/weed/filer/stream.go
@@ -7,7 +7,7 @@ import (
"math"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
@@ -24,7 +24,7 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
urlStrings, err := masterClient.LookupFileId(chunkView.FileId)
if err != nil {
- glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err
}
fileId2Url[chunkView.FileId] = urlStrings
@@ -36,12 +36,12 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
if err != nil {
- glog.Errorf("read chunk: %v", err)
+ log.Errorf("read chunk: %v", err)
return fmt.Errorf("read chunk: %v", err)
}
_, err = w.Write(data)
if err != nil {
- glog.Errorf("write chunk: %v", err)
+ log.Errorf("write chunk: %v", err)
return fmt.Errorf("write chunk: %v", err)
}
}
@@ -65,7 +65,7 @@ func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk)
for _, chunkView := range chunkViews {
urlStrings, err := lookupFileIdFn(chunkView.FileId)
if err != nil {
- glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return nil, err
}
@@ -175,7 +175,7 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
urlStrings, err := c.lookupFileId(chunkView.FileId)
if err != nil {
- glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err
}
var buffer bytes.Buffer
@@ -188,7 +188,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
break
}
if err != nil {
- glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
+ log.Debugf("read %s failed, err: %v", chunkView.FileId, err)
buffer.Reset()
} else {
break
@@ -201,7 +201,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
c.bufferPos = 0
c.bufferOffset = chunkView.LogicOffset
- // glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+ // log.Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
return nil
}
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index a8481a435..8a411b60b 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -13,7 +13,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -48,12 +48,12 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
dir.setRootDirAttributes(attr)
- glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
+ log.Tracef("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
if err := dir.maybeLoadEntry(); err != nil {
- glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
+ log.Tracef("dir Attr %s,err: %+v", dir.FullPath(), err)
return err
}
@@ -64,14 +64,14 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Gid = dir.entry.Attributes.Gid
attr.Uid = dir.entry.Attributes.Uid
- glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
+ log.Tracef("dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
- glog.V(4).Infof("dir Getxattr %s", dir.FullPath())
+ log.Tracef("dir Getxattr %s", dir.FullPath())
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -96,7 +96,7 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filerGrpcAddress
- glog.V(3).Infof("dir %s fsync %+v", dir.FullPath(), req)
+ log.Tracef("dir %s fsync %+v", dir.FullPath(), req)
return nil
}
@@ -146,7 +146,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
OExcl: req.Flags&fuse.OpenExclusive != 0,
Signatures: []int32{dir.wfs.signature},
}
- glog.V(1).Infof("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
+ log.Debugf("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@@ -157,7 +157,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
if strings.Contains(err.Error(), "EEXIST") {
return fuse.EEXIST
}
- glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
+ log.Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
return fuse.EIO
}
@@ -182,21 +182,21 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
if req.Mode&os.ModeNamedPipe != 0 {
- glog.V(1).Infof("mknod named pipe %s", req.String())
+ log.Debugf("mknod named pipe %s", req.String())
return nil, fuse.ENOSYS
}
if req.Mode&req.Mode&os.ModeSocket != 0 {
- glog.V(1).Infof("mknod socket %s", req.String())
+ log.Debugf("mknod socket %s", req.String())
return nil, fuse.ENOSYS
}
// not going to support mknod for normal files either
- glog.V(1).Infof("mknod %s", req.String())
+ log.Debugf("mknod %s", req.String())
return nil, fuse.ENOSYS
}
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
- glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name)
+ log.Tracef("mkdir %s: %s", dir.FullPath(), req.Name)
newEntry := &filer_pb.Entry{
Name: req.Name,
@@ -221,9 +221,9 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
Signatures: []int32{dir.wfs.signature},
}
- glog.V(1).Infof("mkdir: %v", request)
+ log.Debugf("mkdir: %v", request)
if err := filer_pb.CreateEntry(client, request); err != nil {
- glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
+ log.Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
return err
}
@@ -238,20 +238,20 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
return node, nil
}
- glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
+ log.Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
return nil, fuse.EIO
}
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
- glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
+ log.Tracef("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
dirPath := util.FullPath(dir.FullPath())
visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath)
if visitErr != nil {
- glog.Errorf("dir Lookup %s: %v", dirPath, visitErr)
+ log.Errorf("dir Lookup %s: %v", dirPath, visitErr)
return nil, fuse.EIO
}
cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
@@ -261,14 +261,14 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
entry := cachedEntry.ToProtoEntry()
if entry == nil {
- // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath)
+ // log.Tracef("dir Lookup cache miss %s", fullFilePath)
entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath)
if err != nil {
- glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err)
+ log.Debugf("dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
}
} else {
- glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath)
+ log.Tracef("dir Lookup cache hit %s", fullFilePath)
}
if entry != nil {
@@ -293,13 +293,13 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
return node, nil
}
- glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err)
+ log.Tracef("not found dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
}
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
- glog.V(4).Infof("dir ReadDirAll %s", dir.FullPath())
+ log.Tracef("dir ReadDirAll %s", dir.FullPath())
processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
fullpath := util.NewFullPath(dir.FullPath(), entry.Name)
@@ -316,12 +316,12 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
dirPath := util.FullPath(dir.FullPath())
if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil {
- glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
+ log.Errorf("dir ReadDirAll %s: %v", dirPath, err)
return nil, fuse.EIO
}
listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(math.MaxInt32))
if listErr != nil {
- glog.Errorf("list meta cache: %v", listErr)
+ log.Errorf("list meta cache: %v", listErr)
return nil, fuse.EIO
}
for _, cachedEntry := range listedEntries {
@@ -352,11 +352,11 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
}
// first, ensure the filer store can correctly delete
- glog.V(3).Infof("remove file: %v", req)
+ log.Tracef("remove file: %v", req)
isDeleteData := entry.HardLinkCounter <= 1
err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature})
if err != nil {
- glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
+ log.Tracef("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
return fuse.ENOENT
}
@@ -389,11 +389,11 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
- glog.V(3).Infof("remove directory entry: %v", req)
+ log.Tracef("remove directory entry: %v", req)
ignoreRecursiveErr := true // ignore recursion error since the OS should manage it
err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, ignoreRecursiveErr, false, []int32{dir.wfs.signature})
if err != nil {
- glog.V(0).Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
+ log.Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
if strings.Contains(err.Error(), "non-empty") {
return fuse.EEXIST
}
@@ -410,7 +410,7 @@ func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req)
+ log.Tracef("%v dir setattr %+v", dir.FullPath(), req)
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -438,7 +438,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus
func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
- glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name)
+ log.Tracef("dir Setxattr %s: %s", dir.FullPath(), req.Name)
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -454,7 +454,7 @@ func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
- glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name)
+ log.Tracef("dir Removexattr %s: %s", dir.FullPath(), req.Name)
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -470,7 +470,7 @@ func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) e
func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
- glog.V(4).Infof("dir Listxattr %s", dir.FullPath())
+ log.Tracef("dir Listxattr %s", dir.FullPath())
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -485,7 +485,7 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
}
func (dir *Dir) Forget() {
- glog.V(4).Infof("Forget dir %s", dir.FullPath())
+ log.Tracef("Forget dir %s", dir.FullPath())
dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath()))
}
@@ -517,10 +517,10 @@ func (dir *Dir) saveEntry() error {
Signatures: []int32{dir.wfs.signature},
}
- glog.V(1).Infof("save dir entry: %v", request)
+ log.Debugf("save dir entry: %v", request)
_, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
+ log.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
return fuse.EIO
}
diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go
index ba3280f03..3a5d4d12e 100644
--- a/weed/filesys/dir_link.go
+++ b/weed/filesys/dir_link.go
@@ -8,7 +8,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
@@ -26,10 +26,10 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
oldFile, ok := old.(*File)
if !ok {
- glog.Errorf("old node is not a file: %+v", old)
+ log.Errorf("old node is not a file: %+v", old)
}
- glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
+ log.Tracef("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
if _, err := oldFile.maybeLoadEntry(ctx); err != nil {
return nil, err
@@ -69,13 +69,13 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.UpdateEntry(client, updateOldEntryRequest); err != nil {
- glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
+ log.Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
return fuse.EIO
}
dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(updateOldEntryRequest.Directory, updateOldEntryRequest.Entry))
if err := filer_pb.CreateEntry(client, request); err != nil {
- glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
+ log.Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
return fuse.EIO
}
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
@@ -96,7 +96,7 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
- glog.V(4).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
+ log.Tracef("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
request := &filer_pb.CreateEntryRequest{
Directory: dir.FullPath(),
@@ -121,7 +121,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.CreateEntry(client, request); err != nil {
- glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
+ log.Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
return fuse.EIO
}
@@ -147,7 +147,7 @@ func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (stri
return "", fuse.Errno(syscall.EINVAL)
}
- glog.V(4).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget)
+ log.Tracef("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget)
return entry.Attributes.SymlinkTarget, nil
diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go
index 3f73d0eb6..4907d658a 100644
--- a/weed/filesys/dir_rename.go
+++ b/weed/filesys/dir_rename.go
@@ -6,7 +6,7 @@ import (
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -18,12 +18,12 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
newPath := util.NewFullPath(newDir.FullPath(), req.NewName)
oldPath := util.NewFullPath(dir.FullPath(), req.OldName)
- glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath)
+ log.Tracef("dir Rename %s => %s", oldPath, newPath)
// find local old entry
oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath)
if err != nil {
- glog.Errorf("dir Rename can not find source %s : %v", oldPath, err)
+ log.Errorf("dir Rename can not find source %s : %v", oldPath, err)
return fuse.ENOENT
}
@@ -41,7 +41,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
_, err := client.AtomicRenameEntry(ctx, request)
if err != nil {
- glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
+ log.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
@@ -49,18 +49,18 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
})
if err != nil {
- glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
+ log.Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
// TODO: replicate renaming logic on filer
if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil {
- glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
+ log.Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
oldEntry.FullPath = newPath
if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil {
- glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
+ log.Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 11089186f..451bc0d4b 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -7,7 +7,7 @@ import (
"sync"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -41,7 +41,7 @@ func newDirtyPages(file *File) *ContinuousDirtyPages {
func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) {
- glog.V(4).Infof("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
+ log.Tracef("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
// this is more than what buffer can hold.
@@ -111,7 +111,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
reader = io.LimitReader(reader, size)
chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset)
if err != nil {
- glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
+ log.Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
pages.chunkSaveErrChan <- err
return
}
@@ -120,7 +120,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
pages.chunkAddLock.Lock()
defer pages.chunkAddLock.Unlock()
pages.f.addChunks([]*filer_pb.FileChunk{chunk})
- glog.V(3).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
+ log.Tracef("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
}
if pages.f.wfs.concurrentWriters != nil {
diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go
index 1404bf78c..5757c00a8 100644
--- a/weed/filesys/dirty_page_interval.go
+++ b/weed/filesys/dirty_page_interval.go
@@ -30,12 +30,12 @@ func (list *IntervalLinkedList) Size() int64 {
return list.Tail.Offset + list.Tail.Size - list.Head.Offset
}
func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) {
- // glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size)
+ // log.Tracef("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size)
list.Tail.Next = node
list.Tail = node
}
func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) {
- // glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size)
+ // log.Tracef("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size)
node.Next = list.Head
list.Head = node
}
@@ -46,7 +46,7 @@ func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) {
nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size)
if nodeStart < nodeStop {
- // glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop)
+ // log.Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop)
copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset])
}
@@ -144,7 +144,7 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) {
}
if prevList != nil && nextList != nil {
- // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size)
+ // log.Tracef("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size)
prevList.Tail.Next = nextList.Head
prevList.Tail = nextList.Tail
c.removeList(nextList)
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index 3bffa156e..6dcc7ac7c 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -11,7 +11,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -45,7 +45,7 @@ func (file *File) fullpath() util.FullPath {
func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
- glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
+ log.Tracef("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
entry := file.entry
if file.isOpen <= 0 || entry == nil {
@@ -60,7 +60,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
attr.Size = filer.FileSize(entry)
if file.isOpen > 0 {
attr.Size = entry.Attributes.FileSize
- glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
+ log.Tracef("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
}
attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
@@ -78,7 +78,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
- glog.V(4).Infof("file Getxattr %s", file.fullpath())
+ log.Tracef("file Getxattr %s", file.fullpath())
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@@ -90,13 +90,13 @@ func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
- glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
+ log.Tracef("file %v open %+v", file.fullpath(), req)
handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
resp.Handle = fuse.HandleID(handle.handle)
- glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle)
+ log.Tracef("%v file open handle id = %d", file.fullpath(), handle.handle)
return handle, nil
@@ -104,7 +104,7 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req)
+ log.Tracef("%v file setattr %+v", file.fullpath(), req)
_, err := file.maybeLoadEntry(ctx)
if err != nil {
@@ -123,7 +123,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
if req.Valid.Size() {
- glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
+ log.Tracef("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
if req.Size < filer.FileSize(file.entry) {
// fmt.Printf("truncate %v \n", fullPath)
var chunks []*filer_pb.FileChunk
@@ -135,10 +135,10 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
int64Size = int64(req.Size) - chunk.Offset
if int64Size > 0 {
chunks = append(chunks, chunk)
- glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
+ log.Tracef("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
chunk.Size = uint64(int64Size)
} else {
- glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString())
+ log.Tracef("truncated whole chunk %+v\n", chunk.GetFileIdString())
truncatedChunks = append(truncatedChunks, chunk)
}
}
@@ -195,7 +195,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
- glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
+ log.Tracef("file Setxattr %s: %s", file.fullpath(), req.Name)
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@@ -212,7 +212,7 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
- glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
+ log.Tracef("file Removexattr %s: %s", file.fullpath(), req.Name)
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@@ -229,7 +229,7 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest)
func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
- glog.V(4).Infof("file Listxattr %s", file.fullpath())
+ log.Tracef("file Listxattr %s", file.fullpath())
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@@ -247,14 +247,14 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filerGrpcAddress
- glog.V(4).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
+ log.Tracef("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
return nil
}
func (file *File) Forget() {
t := util.NewFullPath(file.dir.FullPath(), file.Name)
- glog.V(4).Infof("Forget file %s", t)
+ log.Tracef("Forget file %s", t)
file.wfs.fsNodeCache.DeleteFsNode(t)
}
@@ -271,13 +271,13 @@ func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, er
}
entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
if err != nil {
- glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ log.Tracef("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return entry, err
}
if entry != nil {
file.setEntry(entry)
} else {
- glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ log.Warnf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
}
return entry, nil
}
@@ -319,7 +319,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
file.reader = nil
- glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
+ log.Tracef("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
file.entry.Chunks = append(file.entry.Chunks, newChunks...)
}
@@ -348,10 +348,10 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error {
Signatures: []int32{file.wfs.signature},
}
- glog.V(4).Infof("save file entry: %v", request)
+ log.Tracef("save file entry: %v", request)
_, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ log.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return fuse.EIO
}
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 54410a0ba..80ae6ae99 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -14,7 +14,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -57,7 +57,7 @@ var _ = fs.HandleReleaser(&FileHandle{})
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
- glog.V(4).Infof("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data))
+ log.Tracef("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data))
fh.RLock()
defer fh.RUnlock()
@@ -82,12 +82,12 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
}
if err != nil {
- glog.Warningf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err)
+ log.Warnf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err)
return fuse.EIO
}
if totalRead > int64(len(buff)) {
- glog.Warningf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
+ log.Warnf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
totalRead = min(int64(len(buff)), totalRead)
}
// resp.Data = buff[:totalRead]
@@ -106,7 +106,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
fileSize := int64(filer.FileSize(fh.f.entry))
if fileSize == 0 {
- glog.V(1).Infof("empty fh %v", fh.f.fullpath())
+ log.Debugf("empty fh %v", fh.f.fullpath())
return 0, io.EOF
}
@@ -127,10 +127,10 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
totalRead, err := fh.f.reader.ReadAt(buff, offset)
if err != nil && err != io.EOF {
- glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
+ log.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
}
- glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
+ log.Tracef("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
return int64(totalRead), err
}
@@ -150,7 +150,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
}
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize)))
- glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
+ log.Tracef("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
fh.dirtyPages.AddPage(req.Offset, data)
@@ -169,7 +169,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
- glog.V(4).Infof("Release %v fh %d", fh.f.fullpath(), fh.handle)
+ log.Tracef("Release %v fh %d", fh.f.fullpath(), fh.handle)
fh.Lock()
defer fh.Unlock()
@@ -177,7 +177,7 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
fh.f.isOpen--
if fh.f.isOpen < 0 {
- glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
+ log.Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
fh.f.isOpen = 0
return nil
}
@@ -185,7 +185,7 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
if fh.f.isOpen == 0 {
if err := fh.doFlush(ctx, req.Header); err != nil {
- glog.Errorf("Release doFlush %s: %v", fh.f.Name, err)
+ log.Errorf("Release doFlush %s: %v", fh.f.Name, err)
}
// stop the goroutine
@@ -211,7 +211,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
// flush works at fh level
// send the data to the OS
- glog.V(4).Infof("doFlush %s fh %d", fh.f.fullpath(), fh.handle)
+ log.Tracef("doFlush %s fh %d", fh.f.fullpath(), fh.handle)
fh.dirtyPages.saveExistingPagesToStorage()
@@ -250,9 +250,9 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
Signatures: []int32{fh.f.wfs.signature},
}
- glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
+ log.Tracef("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
for i, chunk := range fh.f.entry.Chunks {
- glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
+ log.Tracef("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
}
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(fh.f.entry.Chunks)
@@ -261,7 +261,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks)
if manifestErr != nil {
// not good, but should be ok
- glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
+ log.Infof("MaybeManifestize: %v", manifestErr)
}
fh.f.entry.Chunks = append(chunks, manifestChunks...)
@@ -269,7 +269,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.CreateEntry(client, request); err != nil {
- glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
+ log.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
}
@@ -283,7 +283,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
}
if err != nil {
- glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
+ log.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
return fuse.EIO
}
diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go
index 4b282253d..2db307cd9 100644
--- a/weed/filesys/meta_cache/meta_cache.go
+++ b/weed/filesys/meta_cache/meta_cache.go
@@ -8,7 +8,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/leveldb"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/bounded_tree"
)
@@ -44,7 +44,7 @@ func openMetaStore(dbFolder string) filer.VirtualFilerStore {
}
if err := store.Initialize(config, ""); err != nil {
- glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
+ log.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
}
return filer.NewFilerStoreWrapper(store)
@@ -72,7 +72,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
// skip the unnecessary deletion
// leave the update to the following InsertEntry operation
} else {
- glog.V(3).Infof("DeleteEntry %s/%s", oldPath, oldPath.Name())
+ log.Tracef("DeleteEntry %s/%s", oldPath, oldPath.Name())
if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil {
return err
}
@@ -85,7 +85,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
if newEntry != nil {
newDir, _ := newEntry.DirAndName()
if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) {
- glog.V(3).Infof("InsertEntry %s/%s", newDir, newEntry.Name())
+ log.Tracef("InsertEntry %s/%s", newDir, newEntry.Name())
if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil {
return err
}
diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go
index 4089cea28..5c2a8fc46 100644
--- a/weed/filesys/meta_cache/meta_cache_init.go
+++ b/weed/filesys/meta_cache/meta_cache_init.go
@@ -5,7 +5,7 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -14,13 +14,13 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
return mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) {
- glog.V(4).Infof("ReadDirAllEntries %s ...", path)
+ log.Tracef("ReadDirAllEntries %s ...", path)
util.Retry("ReadDirAllEntries", func() error {
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
entry := filer.FromPbEntry(string(dirPath), pbEntry)
if err := mc.doInsertEntry(context.Background(), entry); err != nil {
- glog.V(0).Infof("read %s: %v", entry.FullPath, err)
+ log.Infof("read %s: %v", entry.FullPath, err)
return err
}
if entry.IsDirectory() {
diff --git a/weed/filesys/meta_cache/meta_cache_subscribe.go b/weed/filesys/meta_cache/meta_cache_subscribe.go
index f9973f436..2e9475675 100644
--- a/weed/filesys/meta_cache/meta_cache_subscribe.go
+++ b/weed/filesys/meta_cache/meta_cache_subscribe.go
@@ -7,7 +7,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -28,7 +28,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
var newEntry *filer.Entry
if message.OldEntry != nil {
oldPath = util.NewFullPath(dir, message.OldEntry.Name)
- glog.V(4).Infof("deleting %v", oldPath)
+ log.Tracef("deleting %v", oldPath)
}
if message.NewEntry != nil {
@@ -36,7 +36,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
dir = message.NewParentPath
}
key := util.NewFullPath(dir, message.NewEntry.Name)
- glog.V(4).Infof("creating %v", key)
+ log.Tracef("creating %v", key)
newEntry = filer.FromPbEntry(dir, message.NewEntry)
}
err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
@@ -73,13 +73,13 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
}
if err := processEventFn(resp); err != nil {
- glog.Fatalf("process %v: %v", resp, err)
+ log.Fatalf("process %v: %v", resp, err)
}
lastTsNs = resp.TsNs
}
})
if err != nil {
- glog.Errorf("subscribing filer meta change: %v", err)
+ log.Errorf("subscribing filer meta change: %v", err)
}
time.Sleep(time.Second)
}
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index cd14e8032..a6a77874e 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -17,7 +17,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
@@ -128,7 +128,7 @@ func (wfs *WFS) Root() (fs.Node, error) {
func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
fullpath := file.fullpath()
- glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
+ log.Tracef("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
@@ -156,7 +156,7 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
- glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
+ log.Tracef("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
delete(wfs.handles, fullpath.AsInode())
@@ -166,7 +166,7 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
// Statfs is called to obtain file system metadata. Implements fuse.FSStatfser
func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error {
- glog.V(4).Infof("reading fs stats: %+v", req)
+ log.Tracef("reading fs stats: %+v", req)
if wfs.stats.lastChecked < time.Now().Unix()-20 {
@@ -178,13 +178,13 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec),
}
- glog.V(4).Infof("reading filer stats: %+v", request)
+ log.Tracef("reading filer stats: %+v", request)
resp, err := client.Statistics(context.Background(), request)
if err != nil {
- glog.V(0).Infof("reading filer stats %v: %v", request, err)
+ log.Infof("reading filer stats %v: %v", request, err)
return err
}
- glog.V(4).Infof("read filer stats: %+v", resp)
+ log.Tracef("read filer stats: %+v", resp)
wfs.stats.TotalSize = resp.TotalSize
wfs.stats.UsedSize = resp.UsedSize
@@ -194,7 +194,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
return nil
})
if err != nil {
- glog.V(0).Infof("filer Statistics: %v", err)
+ log.Infof("filer Statistics: %v", err)
return err
}
}
diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go
index a245b6795..50f2ec2f0 100644
--- a/weed/filesys/wfs_deletion.go
+++ b/weed/filesys/wfs_deletion.go
@@ -6,7 +6,7 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -24,7 +24,7 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
}
dataChunks, manifestResolveErr := filer.ResolveOneChunkManifest(filer.LookupFn(wfs), chunk)
if manifestResolveErr != nil {
- glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
+ log.Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
}
for _, dChunk := range dataChunks {
fileIds = append(fileIds, dChunk.GetFileIdString())
@@ -49,7 +49,7 @@ func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.Se
m := make(map[string]operation.LookupResult)
- glog.V(4).Infof("deleteFileIds lookup volume id locations: %v", vids)
+ log.Tracef("deleteFileIds lookup volume id locations: %v", vids)
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: vids,
})
diff --git a/weed/filesys/wfs_write.go b/weed/filesys/wfs_write.go
index 83e40e7f5..2540cfd5a 100644
--- a/weed/filesys/wfs_write.go
+++ b/weed/filesys/wfs_write.go
@@ -6,7 +6,7 @@ import (
"io"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@@ -32,7 +32,7 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
- glog.V(0).Infof("assign volume failure %v: %v", request, err)
+ log.Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
@@ -55,11 +55,11 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
uploadResult, err, data := operation.Upload(fileUrl, filename, wfs.option.Cipher, reader, false, "", nil, auth)
if err != nil {
- glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
+ log.Infof("upload data %v to %s: %v", filename, fileUrl, err)
return nil, "", "", fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
- glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err)
+ log.Infof("upload failure %v to %s: %v", filename, fileUrl, err)
return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error)
}
diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go
index 92e43b675..63bded303 100644
--- a/weed/filesys/xattr.go
+++ b/weed/filesys/xattr.go
@@ -111,7 +111,7 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis
func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) {
fullpath := util.NewFullPath(dir, name)
- // glog.V(3).Infof("read entry cache miss %s", fullpath)
+ // log.Tracef("read entry cache miss %s", fullpath)
// read from async meta cache
meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir))
diff --git a/weed/glog/README b/weed/glog/README
index 5f9c11485..76cdabdc5 100644
--- a/weed/glog/README
+++ b/weed/glog/README
@@ -24,7 +24,7 @@ The comment from glog.go introduces the ideas:
glog.Info("Prepare to repel boarders")
- glog.Fatalf("Initialization failed: %s", err)
+ log.Fatalf("Initialization failed: %s", err)
See the documentation for the V function for an explanation
of these examples:
diff --git a/weed/glog/glog.go b/weed/glog/glog.go
index f46632f1c..50fa73e1a 100644
--- a/weed/glog/glog.go
+++ b/weed/glog/glog.go
@@ -22,7 +22,7 @@
//
// glog.Info("Prepare to repel boarders")
//
-// glog.Fatalf("Initialization failed: %s", err)
+// log.Fatalf("Initialization failed: %s", err)
//
// See the documentation for the V function for an explanation of these examples:
//
diff --git a/weed/images/resizing.go b/weed/images/resizing.go
index b048daa1c..c05b70064 100644
--- a/weed/images/resizing.go
+++ b/weed/images/resizing.go
@@ -10,7 +10,7 @@ import (
"github.com/disintegration/imaging"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) {
@@ -50,7 +50,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re
}
return bytes.NewReader(buf.Bytes()), dstImage.Bounds().Dx(), dstImage.Bounds().Dy()
} else {
- glog.Error(err)
+ log.Error(err)
}
return read, 0, 0
}
diff --git a/weed/messaging/broker/broker_append.go b/weed/messaging/broker/broker_append.go
index 8e5b56fd0..505560ce0 100644
--- a/weed/messaging/broker/broker_append.go
+++ b/weed/messaging/broker/broker_append.go
@@ -5,7 +5,7 @@ import (
"fmt"
"io"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -34,7 +34,7 @@ func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messag
_, err := client.AppendToEntry(context.Background(), request)
if err != nil {
- glog.V(0).Infof("append to file %v: %v", request, err)
+ log.Infof("append to file %v: %v", request, err)
return err
}
@@ -61,7 +61,7 @@ func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConf
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
- glog.V(0).Infof("assign volume failure %v: %v", request, err)
+ log.Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
@@ -98,7 +98,7 @@ func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient
if err == io.EOF {
return
}
- glog.V(0).Infof("fail to connect to %s: %v", filer, err)
+ log.Infof("fail to connect to %s: %v", filer, err)
} else {
break
}
diff --git a/weed/messaging/broker/broker_grpc_server_discovery.go b/weed/messaging/broker/broker_grpc_server_discovery.go
index 3c14f3220..594ce057f 100644
--- a/weed/messaging/broker/broker_grpc_server_discovery.go
+++ b/weed/messaging/broker/broker_grpc_server_discovery.go
@@ -5,7 +5,7 @@ import (
"fmt"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
@@ -78,11 +78,11 @@ func (broker *MessageBroker) checkFilers() {
found = true
break
}
- glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err)
+ log.Infof("failed to read masters from %+v: %v", broker.option.Filers, err)
time.Sleep(time.Second)
}
}
- glog.V(0).Infof("received master list: %s", masters)
+ log.Infof("received master list: %s", masters)
// contact each masters for filers
var filers []string
@@ -105,11 +105,11 @@ func (broker *MessageBroker) checkFilers() {
found = true
break
}
- glog.V(0).Infof("failed to list filers: %v", err)
+ log.Infof("failed to list filers: %v", err)
time.Sleep(time.Second)
}
}
- glog.V(0).Infof("received filer list: %s", filers)
+ log.Infof("received filer list: %s", filers)
broker.option.Filers = filers
diff --git a/weed/messaging/broker/broker_grpc_server_publish.go b/weed/messaging/broker/broker_grpc_server_publish.go
index 6e6b723d1..515c70b96 100644
--- a/weed/messaging/broker/broker_grpc_server_publish.go
+++ b/weed/messaging/broker/broker_grpc_server_publish.go
@@ -8,7 +8,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
)
@@ -65,7 +65,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
for {
// println("recv")
in, err := stream.Recv()
- // glog.V(0).Infof("recieved %v err: %v", in, err)
+ // log.Infof("recieved %v err: %v", in, err)
if err == io.EOF {
return nil
}
@@ -81,7 +81,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
data, err := proto.Marshal(in.Data)
if err != nil {
- glog.Errorf("marshall error: %v\n", err)
+ log.Errorf("marshall error: %v\n", err)
continue
}
@@ -97,7 +97,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
}
if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil {
- glog.V(0).Infof("err writing %s: %v", md5File, err)
+ log.Infof("err writing %s: %v", md5File, err)
}
// fmt.Printf("received md5 %X\n", md5hash.Sum(nil))
@@ -105,7 +105,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
// send the close ack
// println("server send ack closing")
if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil {
- glog.V(0).Infof("err sending close response: %v", err)
+ log.Infof("err sending close response: %v", err)
}
return nil
diff --git a/weed/messaging/broker/broker_grpc_server_subscribe.go b/weed/messaging/broker/broker_grpc_server_subscribe.go
index df4052096..2bb41dcef 100644
--- a/weed/messaging/broker/broker_grpc_server_subscribe.go
+++ b/weed/messaging/broker/broker_grpc_server_subscribe.go
@@ -10,7 +10,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
)
@@ -76,7 +76,7 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
Data: m,
})
if err != nil {
- glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err)
+ log.Infof("=> subscriber %v: %+v", subscriberId, err)
}
return err
}
@@ -84,12 +84,12 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error {
m := &messaging_pb.Message{}
if err = proto.Unmarshal(logEntry.Data, m); err != nil {
- glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
+ log.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
return err
}
// fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs)
if err = eachMessageFn(m); err != nil {
- glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
+ log.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
return err
}
if m.IsClose {
@@ -122,7 +122,7 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
return isConnected
}, eachLogEntryFn)
if err != nil {
- glog.Errorf("processed to %v: %v", lastReadTime, err)
+ log.Errorf("processed to %v: %v", lastReadTime, err)
time.Sleep(3127 * time.Millisecond)
if err != log_buffer.ResumeError {
break
diff --git a/weed/messaging/broker/broker_server.go b/weed/messaging/broker/broker_server.go
index 06162471c..8d540755f 100644
--- a/weed/messaging/broker/broker_server.go
+++ b/weed/messaging/broker/broker_server.go
@@ -6,7 +6,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@@ -52,7 +52,7 @@ func (broker *MessageBroker) keepConnectedToOneFiler() {
defer cancel()
stream, err := client.KeepConnected(ctx)
if err != nil {
- glog.V(0).Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ log.Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err
}
@@ -67,24 +67,24 @@ func (broker *MessageBroker) keepConnectedToOneFiler() {
Name: broker.option.Ip,
GrpcPort: uint32(broker.option.Port),
}); err != nil {
- glog.V(0).Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ log.Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err
}
// TODO send events of adding/removing topics
- glog.V(0).Infof("conntected with filer: %v", filer)
+ log.Infof("conntected with filer: %v", filer)
for {
if err := stream.Send(&filer_pb.KeepConnectedRequest{
Name: broker.option.Ip,
GrpcPort: uint32(broker.option.Port),
}); err != nil {
- glog.V(0).Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ log.Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err
}
// println("send heartbeat")
if _, err := stream.Recv(); err != nil {
- glog.V(0).Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ log.Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err
}
// println("received reply")
diff --git a/weed/messaging/broker/topic_manager.go b/weed/messaging/broker/topic_manager.go
index edddca813..b7705281e 100644
--- a/weed/messaging/broker/topic_manager.go
+++ b/weed/messaging/broker/topic_manager.go
@@ -6,7 +6,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
)
@@ -65,7 +65,7 @@ func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topi
)
if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil {
- glog.V(0).Infof("log write failed %s: %v", targetFile, err)
+ log.Infof("log write failed %s: %v", targetFile, err)
}
}
logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() {
diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go
index d881049dd..8ae74869d 100644
--- a/weed/notification/aws_sqs/aws_sqs_pub.go
+++ b/weed/notification/aws_sqs/aws_sqs_pub.go
@@ -8,7 +8,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@@ -28,8 +28,8 @@ func (k *AwsSqsPub) GetName() string {
}
func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) {
- glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
- glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
+ log.Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
+ log.Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
return k.initialize(
configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"),
diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go
index 36211692c..32d0193df 100644
--- a/weed/notification/configuration.go
+++ b/weed/notification/configuration.go
@@ -1,7 +1,7 @@
package notification
import (
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
"github.com/spf13/viper"
@@ -32,11 +32,11 @@ func LoadConfiguration(config *viper.Viper, prefix string) {
for _, queue := range MessageQueues {
if config.GetBool(prefix + queue.GetName() + ".enabled") {
if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil {
- glog.Fatalf("Failed to initialize notification for %s: %+v",
+ log.Fatalf("Failed to initialize notification for %s: %+v",
queue.GetName(), err)
}
Queue = queue
- glog.V(0).Infof("Configure notification message queue for %s", queue.GetName())
+ log.Infof("Configure notification message queue for %s", queue.GetName())
return
}
}
@@ -50,7 +50,7 @@ func validateOneEnabledQueue(config *viper.Viper) {
if enabledQueue == "" {
enabledQueue = queue.GetName()
} else {
- glog.Fatalf("Notification message queue is enabled for both %s and %s", enabledQueue, queue.GetName())
+ log.Fatalf("Notification message queue is enabled for both %s and %s", enabledQueue, queue.GetName())
}
}
}
diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
index 1ae102509..049eb837a 100644
--- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
+++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
@@ -22,7 +22,7 @@ import (
"gocloud.dev/pubsub"
_ "gocloud.dev/pubsub/awssnssqs"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
// _ "gocloud.dev/pubsub/azuresb"
@@ -46,10 +46,10 @@ func (k *GoCDKPubSub) GetName() string {
func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error {
k.topicURL = configuration.GetString(prefix + "topic_url")
- glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
+ log.Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
topic, err := pubsub.OpenTopic(context.Background(), k.topicURL)
if err != nil {
- glog.Fatalf("Failed to open topic: %v", err)
+ log.Fatalf("Failed to open topic: %v", err)
}
k.topic = topic
return nil
diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go
index 363a86eb6..65cf8b850 100644
--- a/weed/notification/google_pub_sub/google_pub_sub.go
+++ b/weed/notification/google_pub_sub/google_pub_sub.go
@@ -6,7 +6,7 @@ import (
"os"
"cloud.google.com/go/pubsub"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@@ -26,8 +26,8 @@ func (k *GooglePubSub) GetName() string {
}
func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) {
- glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
- glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
+ log.Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
+ log.Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
return k.initialize(
configuration.GetString(prefix+"google_application_credentials"),
configuration.GetString(prefix+"project_id"),
@@ -43,13 +43,13 @@ func (k *GooglePubSub) initialize(google_application_credentials, projectId, top
var found bool
google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !found {
- glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
+ log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
}
}
client, err := pubsub.NewClient(ctx, projectId, option.WithCredentialsFile(google_application_credentials))
if err != nil {
- glog.Fatalf("Failed to create client: %v", err)
+ log.Fatalf("Failed to create client: %v", err)
}
k.topic = client.Topic(topicName)
@@ -57,11 +57,11 @@ func (k *GooglePubSub) initialize(google_application_credentials, projectId, top
if !exists {
k.topic, err = client.CreateTopic(ctx, topicName)
if err != nil {
- glog.Fatalf("Failed to create topic %s: %v", topicName, err)
+ log.Fatalf("Failed to create topic %s: %v", topicName, err)
}
}
} else {
- glog.Fatalf("Failed to check topic %s: %v", topicName, err)
+ log.Fatalf("Failed to check topic %s: %v", topicName, err)
}
return nil
diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go
index 8d83b5892..6c737c716 100644
--- a/weed/notification/kafka/kafka_queue.go
+++ b/weed/notification/kafka/kafka_queue.go
@@ -2,7 +2,7 @@ package kafka
import (
"github.com/Shopify/sarama"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@@ -22,8 +22,8 @@ func (k *KafkaQueue) GetName() string {
}
func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) {
- glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
- glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
+ log.Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
+ log.Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
return k.initialize(
configuration.GetStringSlice(prefix+"hosts"),
configuration.GetString(prefix+"topic"),
@@ -67,7 +67,7 @@ func (k *KafkaQueue) handleSuccess() {
for {
pm := <-k.producer.Successes()
if pm != nil {
- glog.V(3).Infof("producer message success, partition:%d offset:%d key:%v", pm.Partition, pm.Offset, pm.Key)
+ log.Tracef("producer message success, partition:%d offset:%d key:%v", pm.Partition, pm.Offset, pm.Key)
}
}
}
@@ -76,7 +76,7 @@ func (k *KafkaQueue) handleError() {
for {
err := <-k.producer.Errors()
if err != nil {
- glog.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic)
+ log.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic)
}
}
}
diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go
index 1ca4786a1..8f28265bc 100644
--- a/weed/notification/log/log_queue.go
+++ b/weed/notification/log/log_queue.go
@@ -1,7 +1,7 @@
package kafka
import (
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@@ -24,6 +24,6 @@ func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (
func (k *LogQueue) SendMessage(key string, message proto.Message) (err error) {
- glog.V(0).Infof("%v: %+v", key, message)
+ log.Infof("%v: %+v", key, message)
return nil
}
diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go
index 1bac028ff..c918f8e04 100644
--- a/weed/operation/chunked_file.go
+++ b/weed/operation/chunked_file.go
@@ -12,7 +12,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -57,7 +57,7 @@ func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error)
if isCompressed {
var err error
if buffer, err = util.DecompressData(buffer); err != nil {
- glog.V(0).Infof("fail to decompress chunk manifest: %v", err)
+ log.Infof("fail to decompress chunk manifest: %v", err)
}
}
cm := ChunkManifest{}
@@ -79,12 +79,12 @@ func (cm *ChunkManifest) DeleteChunks(master string, usePublicUrl bool, grpcDial
}
results, err := DeleteFiles(master, usePublicUrl, grpcDialOption, fileIds)
if err != nil {
- glog.V(0).Infof("delete %+v: %v", fileIds, err)
+ log.Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err)
}
for _, result := range results {
if result.Error != "" {
- glog.V(0).Infof("delete file %+v: %v", result.FileId, result.Error)
+ log.Infof("delete file %+v: %v", result.FileId, result.Error)
return fmt.Errorf("chunk delete %v: %v", result.FileId, result.Error)
}
}
diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go
index 025a65b38..8daf54e1e 100644
--- a/weed/operation/grpc_client.go
+++ b/weed/operation/grpc_client.go
@@ -7,7 +7,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@@ -32,7 +32,7 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err
sepIndex := strings.LastIndex(volumeServer, ":")
port, err := strconv.Atoi(volumeServer[sepIndex+1:])
if err != nil {
- glog.Errorf("failed to parse volume server address: %v", volumeServer)
+ log.Errorf("failed to parse volume server address: %v", volumeServer)
return "", err
}
return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil
diff --git a/weed/operation/lookup_vid_cache.go b/weed/operation/lookup_vid_cache.go
index ccc1f2beb..71a97016b 100644
--- a/weed/operation/lookup_vid_cache.go
+++ b/weed/operation/lookup_vid_cache.go
@@ -6,7 +6,7 @@ import (
"sync"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
var ErrorNotFound = errors.New("not found")
@@ -23,7 +23,7 @@ type VidCache struct {
func (vc *VidCache) Get(vid string) ([]Location, error) {
id, err := strconv.Atoi(vid)
if err != nil {
- glog.V(1).Infof("Unknown volume id %s", vid)
+ log.Debugf("Unknown volume id %s", vid)
return nil, err
}
vc.RLock()
@@ -42,7 +42,7 @@ func (vc *VidCache) Get(vid string) ([]Location, error) {
func (vc *VidCache) Set(vid string, locations []Location, duration time.Duration) {
id, err := strconv.Atoi(vid)
if err != nil {
- glog.V(1).Infof("Unknown volume id %s", vid)
+ log.Debugf("Unknown volume id %s", vid)
return
}
vc.Lock()
diff --git a/weed/operation/submit.go b/weed/operation/submit.go
index 25843c892..60658f0d3 100644
--- a/weed/operation/submit.go
+++ b/weed/operation/submit.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/security"
)
@@ -91,14 +91,14 @@ func NewFileParts(fullPathFilenames []string) (ret []FilePart, err error) {
func newFilePart(fullPathFilename string) (ret FilePart, err error) {
fh, openErr := os.Open(fullPathFilename)
if openErr != nil {
- glog.V(0).Info("Failed to open file: ", fullPathFilename)
+ log.Info("Failed to open file: ", fullPathFilename)
return ret, openErr
}
ret.Reader = fh
fi, fiErr := fh.Stat()
if fiErr != nil {
- glog.V(0).Info("Failed to stat file:", fullPathFilename)
+ log.Info("Failed to stat file:", fullPathFilename)
return ret, fiErr
}
ret.ModTime = fi.ModTime().UTC().Unix()
@@ -210,7 +210,7 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
func upload_one_chunk(filename string, reader io.Reader, master,
fileUrl string, jwt security.EncodedJwt,
) (size uint32, e error) {
- glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")
+ log.Trace("Uploading part ", filename, " to ", fileUrl, "...")
uploadResult, uploadError, _ := Upload(fileUrl, filename, false, reader, false, "", nil, jwt)
if uploadError != nil {
return 0, uploadError
@@ -223,7 +223,7 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s
if e != nil {
return e
}
- glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
+ log.Trace("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
u, _ := url.Parse(fileUrl)
q := u.Query()
q.Set("cm", "true")
diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go
index fccc24b16..bb41b5c81 100644
--- a/weed/operation/upload_content.go
+++ b/weed/operation/upload_content.go
@@ -15,7 +15,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -97,7 +97,7 @@ func retriedUploadData(uploadUrl string, filename string, cipher bool, data []by
if err == nil {
return
} else {
- glog.Warningf("uploading to %s: %v", uploadUrl, err)
+ log.Warnf("uploading to %s: %v", uploadUrl, err)
}
}
return
@@ -203,22 +203,22 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
file_writer, cp_err := body_writer.CreatePart(h)
if cp_err != nil {
- glog.V(0).Infoln("error creating form file", cp_err.Error())
+ log.Infoln("error creating form file", cp_err.Error())
return nil, cp_err
}
if err := fillBufferFunction(file_writer); err != nil {
- glog.V(0).Infoln("error copying data", err)
+ log.Infoln("error copying data", err)
return nil, err
}
content_type := body_writer.FormDataContentType()
if err := body_writer.Close(); err != nil {
- glog.V(0).Infoln("error closing body", err)
+ log.Infoln("error closing body", err)
return nil, err
}
req, postErr := http.NewRequest("POST", uploadUrl, bytes.NewReader(buf.Bytes()))
if postErr != nil {
- glog.V(1).Infof("create upload request %s: %v", uploadUrl, postErr)
+ log.Debugf("create upload request %s: %v", uploadUrl, postErr)
return nil, fmt.Errorf("create upload request %s: %v", uploadUrl, postErr)
}
req.Header.Set("Content-Type", content_type)
@@ -231,7 +231,7 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
// print("+")
resp, post_err := HttpClient.Do(req)
if post_err != nil {
- glog.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
+ log.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
debug.PrintStack()
return nil, fmt.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
}
@@ -252,7 +252,7 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
- glog.Errorf("unmarshal %s: %v", uploadUrl, string(resp_body))
+ log.Errorf("unmarshal %s: %v", uploadUrl, string(resp_body))
return nil, fmt.Errorf("unmarshal %v: %v", uploadUrl, unmarshal_err)
}
if ret.Error != "" {
diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go
index 96a716d5b..3958449ab 100644
--- a/weed/pb/filer_pb/filer_client.go
+++ b/weed/pb/filer_pb/filer_client.go
@@ -10,7 +10,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -35,18 +35,18 @@ func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry
Name: name,
}
- // glog.V(3).Infof("read %s request: %v", fullFilePath, request)
+ // log.Tracef("read %s request: %v", fullFilePath, request)
resp, err := LookupEntry(client, request)
if err != nil {
if err == ErrNotFound {
return nil
}
- glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err)
+ log.Tracef("read %s %v: %v", fullFilePath, resp, err)
return err
}
if resp.Entry == nil {
- // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry)
+ // log.Tracef("read %s entry: %v", fullFilePath, entry)
return nil
}
@@ -83,7 +83,7 @@ func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, f
InclusiveStartFrom: inclusive,
}
- glog.V(4).Infof("read directory: %v", request)
+ log.Tracef("read directory: %v", request)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.ListEntries(ctx, request)
@@ -130,14 +130,14 @@ func Exists(filerClient FilerClient, parentDirectoryPath string, entryName strin
Name: entryName,
}
- glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
+ log.Tracef("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
resp, err := LookupEntry(client, request)
if err != nil {
if err == ErrNotFound {
exists = false
return nil
}
- glog.V(0).Infof("exists entry %v: %v", request, err)
+ log.Infof("exists entry %v: %v", request, err)
return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
}
@@ -173,9 +173,9 @@ func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string,
Entry: entry,
}
- glog.V(1).Infof("mkdir: %v", request)
+ log.Debugf("mkdir: %v", request)
if err := CreateEntry(client, request); err != nil {
- glog.V(0).Infof("mkdir %v: %v", request, err)
+ log.Infof("mkdir %v: %v", request, err)
return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
}
@@ -204,9 +204,9 @@ func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string
Entry: entry,
}
- glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
+ log.Debugf("create file: %s/%s", parentDirectoryPath, fileName)
if err := CreateEntry(client, request); err != nil {
- glog.V(0).Infof("create file %v:%v", request, err)
+ log.Infof("create file %v:%v", request, err)
return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
}
diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go
index b46385c8f..84e8c3a40 100644
--- a/weed/pb/filer_pb/filer_pb_helper.go
+++ b/weed/pb/filer_pb/filer_pb_helper.go
@@ -6,7 +6,7 @@ import (
"fmt"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/golang/protobuf/proto"
"github.com/viant/ptrie"
@@ -88,11 +88,11 @@ func AfterEntryDeserialization(chunks []*FileChunk) {
func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error {
resp, err := client.CreateEntry(context.Background(), request)
if err != nil {
- glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
+ log.Debugf("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
return fmt.Errorf("CreateEntry: %v", err)
}
if resp.Error != "" {
- glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
+ log.Debugf("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
return fmt.Errorf("CreateEntry : %v", resp.Error)
}
return nil
@@ -101,7 +101,7 @@ func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error {
func UpdateEntry(client SeaweedFilerClient, request *UpdateEntryRequest) error {
_, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.V(1).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
+ log.Debugf("update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
return fmt.Errorf("UpdateEntry: %v", err)
}
return nil
@@ -113,7 +113,7 @@ func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest
if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
return nil, ErrNotFound
}
- glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err)
+ log.Tracef("read %s/%v: %v", request.Directory, request.Name, err)
return nil, fmt.Errorf("LookupEntry1: %v", err)
}
if resp.Entry == nil {
diff --git a/weed/pb/volume_info.go b/weed/pb/volume_info.go
index c4f733f5c..d44bc90f3 100644
--- a/weed/pb/volume_info.go
+++ b/weed/pb/volume_info.go
@@ -10,7 +10,7 @@ import (
"github.com/golang/protobuf/jsonpb"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
)
@@ -19,28 +19,28 @@ func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool, e
volumeInfo := &volume_server_pb.VolumeInfo{}
- glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName)
+ log.Debugf("maybeLoadVolumeInfo checks %s", fileName)
if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead {
if !exists {
return volumeInfo, false, nil
}
if !canRead {
- glog.Warningf("can not read %s", fileName)
+ log.Warnf("can not read %s", fileName)
return volumeInfo, false, fmt.Errorf("can not read %s", fileName)
}
return volumeInfo, false, nil
}
- glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName)
+ log.Debugf("maybeLoadVolumeInfo reads %s", fileName)
tierData, readErr := ioutil.ReadFile(fileName)
if readErr != nil {
- glog.Warningf("fail to read %s : %v", fileName, readErr)
+ log.Warnf("fail to read %s : %v", fileName, readErr)
return volumeInfo, false, fmt.Errorf("fail to read %s : %v", fileName, readErr)
}
- glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
+ log.Debugf("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil {
- glog.Warningf("unmarshal error: %v", err)
+ log.Warnf("unmarshal error: %v", err)
return volumeInfo, false, fmt.Errorf("unmarshal error: %v", err)
}
diff --git a/weed/replication/repl_util/replication_utli.go b/weed/replication/repl_util/replication_utli.go
index 42777f4ad..cc4c5d806 100644
--- a/weed/replication/repl_util/replication_utli.go
+++ b/weed/replication/repl_util/replication_utli.go
@@ -2,7 +2,7 @@ package repl_util
import (
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -23,9 +23,9 @@ func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.Filer
writeErr = writeFunc(data)
})
if err != nil {
- glog.V(1).Infof("read from %s: %v", fileUrl, err)
+ log.Debugf("read from %s: %v", fileUrl, err)
} else if writeErr != nil {
- glog.V(1).Infof("copy from %s: %v", fileUrl, writeErr)
+ log.Debugf("copy from %s: %v", fileUrl, writeErr)
} else {
break
}
diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go
index c4228434f..5e8d47d86 100644
--- a/weed/replication/replicator.go
+++ b/weed/replication/replicator.go
@@ -7,7 +7,7 @@ import (
"google.golang.org/grpc"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@@ -37,28 +37,28 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
return nil
}
if !strings.HasPrefix(key, r.source.Dir) {
- glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir)
+ log.Tracef("skipping %v outside of %v", key, r.source.Dir)
return nil
}
newKey := util.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])
- glog.V(3).Infof("replicate %s => %s", key, newKey)
+ log.Tracef("replicate %s => %s", key, newKey)
key = newKey
if message.OldEntry != nil && message.NewEntry == nil {
- glog.V(4).Infof("deleting %v", key)
+ log.Tracef("deleting %v", key)
return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures)
}
if message.OldEntry == nil && message.NewEntry != nil {
- glog.V(4).Infof("creating %v", key)
+ log.Tracef("creating %v", key)
return r.sink.CreateEntry(key, message.NewEntry, message.Signatures)
}
if message.OldEntry == nil && message.NewEntry == nil {
- glog.V(0).Infof("weird message %+v", message)
+ log.Infof("weird message %+v", message)
return nil
}
foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures)
if foundExisting {
- glog.V(4).Infof("updated %v", key)
+ log.Tracef("updated %v", key)
return err
}
@@ -67,7 +67,7 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
return fmt.Errorf("delete old entry %v: %v", key, err)
}
- glog.V(4).Infof("creating missing %v", key)
+ log.Tracef("creating missing %v", key)
return r.sink.CreateEntry(key, message.NewEntry, message.Signatures)
}
diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go
index df70be64b..ff2325685 100644
--- a/weed/replication/sink/azuresink/azure_sink.go
+++ b/weed/replication/sink/azuresink/azure_sink.go
@@ -10,7 +10,7 @@ import (
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@@ -56,7 +56,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e
// Use your Storage account's name and key to create a credential object.
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
- glog.Fatalf("failed to create Azure credential with account name:%s key:%s", accountName, accountKey)
+ log.Fatalf("failed to create Azure credential with account name:%s key:%s", accountName, accountKey)
}
// Create a request pipeline that is used to process HTTP(S) requests and responses.
diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go
index d193ff81c..f26b4876c 100644
--- a/weed/replication/sink/filersink/fetch_write.go
+++ b/weed/replication/sink/filersink/fetch_write.go
@@ -8,7 +8,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -82,7 +82,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
- glog.V(0).Infof("assign volume failure %v: %v", request, err)
+ log.Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
@@ -98,16 +98,16 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
- glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
+ log.Tracef("replicating %s to %s header:%+v", filename, fileUrl, header)
// fetch data as is, regardless whether it is encrypted or not
uploadResult, err, _ := operation.Upload(fileUrl, filename, false, resp.Body, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
if err != nil {
- glog.V(0).Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err)
+ log.Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err)
return "", fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
- glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err)
+ log.Infof("upload failure %v to %s: %v", filename, fileUrl, err)
return "", fmt.Errorf("upload result: %v", uploadResult.Error)
}
diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go
index 6f467ea58..948d5913c 100644
--- a/weed/replication/sink/filersink/filer_sink.go
+++ b/weed/replication/sink/filersink/filer_sink.go
@@ -9,7 +9,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@@ -68,10 +68,10 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo
dir, name := util.FullPath(key).DirAndName()
- glog.V(4).Infof("delete entry: %v", key)
+ log.Tracef("delete entry: %v", key)
err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures)
if err != nil {
- glog.V(0).Infof("delete entry %s: %v", key, err)
+ log.Infof("delete entry %s: %v", key, err)
return fmt.Errorf("delete entry %s: %v", key, err)
}
return nil
@@ -88,10 +88,10 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
Directory: dir,
Name: name,
}
- glog.V(1).Infof("lookup: %v", lookupRequest)
+ log.Debugf("lookup: %v", lookupRequest)
if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
if filer.ETag(resp.Entry) == filer.ETag(entry) {
- glog.V(3).Infof("already replicated %s", key)
+ log.Tracef("already replicated %s", key)
return nil
}
}
@@ -100,10 +100,10 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
if err != nil {
// only warning here since the source chunk may have been deleted already
- glog.Warningf("replicate entry chunks %s: %v", key, err)
+ log.Warnf("replicate entry chunks %s: %v", key, err)
}
- glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
+ log.Tracef("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
request := &filer_pb.CreateEntryRequest{
Directory: dir,
@@ -117,9 +117,9 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
Signatures: signatures,
}
- glog.V(3).Infof("create: %v", request)
+ log.Tracef("create: %v", request)
if err := filer_pb.CreateEntry(client, request); err != nil {
- glog.V(0).Infof("create entry %s: %v", key, err)
+ log.Infof("create entry %s: %v", key, err)
return fmt.Errorf("create entry %s: %v", key, err)
}
@@ -140,10 +140,10 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
Name: name,
}
- glog.V(4).Infof("lookup entry: %v", request)
+ log.Tracef("lookup entry: %v", request)
resp, err := filer_pb.LookupEntry(client, request)
if err != nil {
- glog.V(0).Infof("lookup %s: %v", key, err)
+ log.Infof("lookup %s: %v", key, err)
return err
}
@@ -156,16 +156,16 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
return false, fmt.Errorf("lookup %s: %v", key, err)
}
- glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
+ log.Tracef("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
// skip if already changed
// this usually happens when the messages are not ordered
- glog.V(2).Infof("late updates %s", key)
+ log.Debugf("late updates %s", key)
} else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
// skip if no change
// this usually happens when retrying the replication
- glog.V(3).Infof("already replicated %s", key)
+ log.Tracef("already replicated %s", key)
} else {
// find out what changed
deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go
index badabc32c..80feb2cbb 100644
--- a/weed/replication/sink/gcssink/gcs_sink.go
+++ b/weed/replication/sink/gcssink/gcs_sink.go
@@ -10,7 +10,7 @@ import (
"google.golang.org/api/option"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@@ -57,12 +57,12 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
var found bool
google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !found {
- glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml")
+ log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml")
}
}
client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials))
if err != nil {
- glog.Fatalf("Failed to create client: %v", err)
+ log.Fatalf("Failed to create client: %v", err)
}
g.client = client
diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go
index 58432ee6b..71da89df3 100644
--- a/weed/replication/sink/s3sink/s3_sink.go
+++ b/weed/replication/sink/s3sink/s3_sink.go
@@ -13,7 +13,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@@ -42,10 +42,10 @@ func (s3sink *S3Sink) GetSinkToDirectory() string {
}
func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error {
- glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
- glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
- glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
- glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
+ log.Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
+ log.Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
+ log.Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
+ log.Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
return s3sink.initialize(
configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"),
diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go
index b172ea2c3..51d6ae1b5 100644
--- a/weed/replication/sink/s3sink/s3_write.go
+++ b/weed/replication/sink/s3sink/s3_write.go
@@ -10,7 +10,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -24,9 +24,9 @@ func (s3sink *S3Sink) deleteObject(key string) error {
result, err := s3sink.conn.DeleteObject(input)
if err == nil {
- glog.V(0).Infof("[%s] delete %s: %v", s3sink.bucket, key, result)
+ log.Infof("[%s] delete %s: %v", s3sink.bucket, key, result)
} else {
- glog.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err)
+ log.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err)
}
return err
@@ -43,9 +43,9 @@ func (s3sink *S3Sink) createMultipartUpload(key string, entry *filer_pb.Entry) (
result, err := s3sink.conn.CreateMultipartUpload(input)
if err == nil {
- glog.V(0).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result)
+ log.Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result)
} else {
- glog.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err)
+ log.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err)
return "", err
}
@@ -64,19 +64,19 @@ func (s3sink *S3Sink) abortMultipartUpload(key, uploadId string) error {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchUpload:
- glog.Errorf("[%s] abortMultipartUpload %s: %v %v", s3sink.bucket, key, s3.ErrCodeNoSuchUpload, aerr.Error())
+ log.Errorf("[%s] abortMultipartUpload %s: %v %v", s3sink.bucket, key, s3.ErrCodeNoSuchUpload, aerr.Error())
default:
- glog.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
+ log.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
- glog.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
+ log.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
}
return err
}
- glog.V(0).Infof("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, result)
+ log.Infof("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, result)
return nil
}
@@ -94,9 +94,9 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId
result, err := s3sink.conn.CompleteMultipartUpload(input)
if err == nil {
- glog.V(0).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result)
+ log.Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result)
} else {
- glog.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err)
+ log.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err)
}
return err
@@ -108,7 +108,7 @@ func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.
readSeeker, err := s3sink.buildReadSeeker(chunk)
if err != nil {
- glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
+ log.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
}
@@ -122,9 +122,9 @@ func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.
result, err := s3sink.conn.UploadPart(input)
if err == nil {
- glog.V(0).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result)
+ log.Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result)
} else {
- glog.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err)
+ log.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err)
}
part := &s3.CompletedPart{
@@ -148,9 +148,9 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
result, err := s3sink.conn.UploadPartCopy(input)
if err == nil {
- glog.V(0).Infof("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, result)
+ log.Infof("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, result)
} else {
- glog.Errorf("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, err)
+ log.Errorf("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, err)
}
return err
@@ -165,7 +165,7 @@ func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, er
for _, fileUrl := range fileUrls {
_, err = util.ReadUrl(fileUrl+"?readDeleted=true", nil, false, false, chunk.Offset, int(chunk.Size), buf)
if err != nil {
- glog.V(1).Infof("read from %s: %v", fileUrl, err)
+ log.Debugf("read from %s: %v", fileUrl, err)
} else {
break
}
diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go
index ff4f2eb26..a54bc99fd 100644
--- a/weed/replication/source/filer_source.go
+++ b/weed/replication/source/filer_source.go
@@ -12,7 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/security"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -49,7 +49,7 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error)
err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- glog.V(4).Infof("read lookup volume id locations: %v", vid)
+ log.Tracef("read lookup volume id locations: %v", vid)
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
@@ -63,14 +63,14 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error)
})
if err != nil {
- glog.V(1).Infof("LookupFileId volume id %s: %v", vid, err)
+ log.Debugf("LookupFileId volume id %s: %v", vid, err)
return nil, fmt.Errorf("LookupFileId volume id %s: %v", vid, err)
}
locations := vid2Locations[vid]
if locations == nil || len(locations.Locations) == 0 {
- glog.V(1).Infof("LookupFileId locate volume id %s: %v", vid, err)
+ log.Debugf("LookupFileId locate volume id %s: %v", vid, err)
return nil, fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err)
}
@@ -91,7 +91,7 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade
for _, fileUrl := range fileUrls {
filename, header, resp, err = util.DownloadFile(fileUrl)
if err != nil {
- glog.V(1).Infof("fail to read from %s: %v", fileUrl, err)
+ log.Debugf("fail to read from %s: %v", fileUrl, err)
} else {
break
}
diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go
index 1dd386ba7..fc114d3c2 100644
--- a/weed/replication/sub/notification_aws_sqs.go
+++ b/weed/replication/sub/notification_aws_sqs.go
@@ -8,7 +8,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@@ -28,8 +28,8 @@ func (k *AwsSqsInput) GetName() string {
}
func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error {
- glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
- glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
+ log.Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
+ log.Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
return k.initialize(
configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"),
@@ -106,7 +106,7 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif
})
if err != nil {
- glog.V(1).Infof("delete message from sqs %s: %v", k.queueUrl, err)
+ log.Debugf("delete message from sqs %s: %v", k.queueUrl, err)
}
return
diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go
index 9726096e5..edbb11bc0 100644
--- a/weed/replication/sub/notification_gocdk_pub_sub.go
+++ b/weed/replication/sub/notification_gocdk_pub_sub.go
@@ -3,7 +3,7 @@ package sub
import (
"context"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@@ -29,7 +29,7 @@ func (k *GoCDKPubSubInput) GetName() string {
func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error {
subURL := configuration.GetString(prefix + "sub_url")
- glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
+ log.Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
sub, err := pubsub.OpenSubscription(context.Background(), subURL)
if err != nil {
return err
diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go
index a950bb42b..c21246606 100644
--- a/weed/replication/sub/notification_google_pub_sub.go
+++ b/weed/replication/sub/notification_google_pub_sub.go
@@ -6,7 +6,7 @@ import (
"os"
"cloud.google.com/go/pubsub"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@@ -28,8 +28,8 @@ func (k *GooglePubSubInput) GetName() string {
}
func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error {
- glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
- glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
+ log.Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
+ log.Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
return k.initialize(
configuration.GetString(prefix+"google_application_credentials"),
configuration.GetString(prefix+"project_id"),
@@ -45,13 +45,13 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
var found bool
google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !found {
- glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
+ log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
}
}
client, err := pubsub.NewClient(ctx, projectId, option.WithCredentialsFile(google_application_credentials))
if err != nil {
- glog.Fatalf("Failed to create client: %v", err)
+ log.Fatalf("Failed to create client: %v", err)
}
k.topicName = topicName
@@ -60,11 +60,11 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
if !exists {
topic, err = client.CreateTopic(ctx, topicName)
if err != nil {
- glog.Fatalf("Failed to create topic %s: %v", topicName, err)
+ log.Fatalf("Failed to create topic %s: %v", topicName, err)
}
}
} else {
- glog.Fatalf("Failed to check topic %s: %v", topicName, err)
+ log.Fatalf("Failed to check topic %s: %v", topicName, err)
}
subscriptionName := "seaweedfs_sub"
@@ -74,11 +74,11 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
if !exists {
k.sub, err = client.CreateSubscription(ctx, subscriptionName, pubsub.SubscriptionConfig{Topic: topic})
if err != nil {
- glog.Fatalf("Failed to create subscription %s: %v", subscriptionName, err)
+ log.Fatalf("Failed to create subscription %s: %v", subscriptionName, err)
}
}
} else {
- glog.Fatalf("Failed to check subscription %s: %v", topicName, err)
+ log.Fatalf("Failed to check subscription %s: %v", topicName, err)
}
k.messageChan = make(chan *pubsub.Message, 1)
diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go
index fa9cfad9b..2b570ab30 100644
--- a/weed/replication/sub/notification_kafka.go
+++ b/weed/replication/sub/notification_kafka.go
@@ -8,7 +8,7 @@ import (
"time"
"github.com/Shopify/sarama"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@@ -29,8 +29,8 @@ func (k *KafkaInput) GetName() string {
}
func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error {
- glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
- glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
+ log.Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
+ log.Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
return k.initialize(
configuration.GetStringSlice(prefix+"hosts"),
configuration.GetString(prefix+"topic"),
@@ -46,7 +46,7 @@ func (k *KafkaInput) initialize(hosts []string, topic string, offsetFile string,
if err != nil {
panic(err)
} else {
- glog.V(0).Infof("connected to %v", hosts)
+ log.Infof("connected to %v", hosts)
}
k.topic = topic
@@ -87,7 +87,7 @@ func (k *KafkaInput) initialize(hosts []string, topic string, offsetFile string,
case msg := <-partitionConsumer.Messages():
k.messageChan <- msg
if err := progress.setOffset(msg.Partition, msg.Offset); err != nil {
- glog.Warningf("set kafka offset: %v", err)
+ log.Warnf("set kafka offset: %v", err)
}
}
}
@@ -121,12 +121,12 @@ func loadProgress(offsetFile string) *KafkaProgress {
progress := &KafkaProgress{}
data, err := ioutil.ReadFile(offsetFile)
if err != nil {
- glog.Warningf("failed to read kafka progress file: %s", offsetFile)
+ log.Warnf("failed to read kafka progress file: %s", offsetFile)
return nil
}
err = json.Unmarshal(data, progress)
if err != nil {
- glog.Warningf("failed to read kafka progress message: %s", string(data))
+ log.Warnf("failed to read kafka progress message: %s", string(data))
return nil
}
return progress
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
index c5dae782d..8e4b60c43 100644
--- a/weed/s3api/auth_credentials.go
+++ b/weed/s3api/auth_credentials.go
@@ -10,7 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/golang/protobuf/jsonpb"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
)
@@ -52,7 +52,7 @@ func NewIdentityAccessManagement(fileName string, domain string) *IdentityAccess
return iam
}
if err := iam.loadS3ApiConfiguration(fileName); err != nil {
- glog.Fatalf("fail to load config file %s: %v", fileName, err)
+ log.Fatalf("fail to load config file %s: %v", fileName, err)
}
return iam
}
@@ -63,13 +63,13 @@ func (iam *IdentityAccessManagement) loadS3ApiConfiguration(fileName string) err
rawData, readErr := ioutil.ReadFile(fileName)
if readErr != nil {
- glog.Warningf("fail to read %s : %v", fileName, readErr)
+ log.Warnf("fail to read %s : %v", fileName, readErr)
return fmt.Errorf("fail to read %s : %v", fileName, readErr)
}
- glog.V(1).Infof("load s3 config: %v", fileName)
+ log.Debugf("load s3 config: %v", fileName)
if err := jsonpb.Unmarshal(bytes.NewReader(rawData), s3ApiConfiguration); err != nil {
- glog.Warningf("unmarshal error: %v", err)
+ log.Warnf("unmarshal error: %v", err)
return fmt.Errorf("unmarshal %s error: %v", fileName, err)
}
@@ -152,19 +152,19 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
case authTypeStreamingSigned:
return identity, s3err.ErrNone
case authTypeUnknown:
- glog.V(3).Infof("unknown auth type")
+ log.Tracef("unknown auth type")
return identity, s3err.ErrAccessDenied
case authTypePresignedV2, authTypeSignedV2:
- glog.V(3).Infof("v2 auth type")
+ log.Tracef("v2 auth type")
identity, s3Err = iam.isReqAuthenticatedV2(r)
case authTypeSigned, authTypePresigned:
- glog.V(3).Infof("v4 auth type")
+ log.Tracef("v4 auth type")
identity, s3Err = iam.reqSignatureV4Verify(r)
case authTypePostPolicy:
- glog.V(3).Infof("post policy auth type")
+ log.Tracef("post policy auth type")
return identity, s3err.ErrNone
case authTypeJWT:
- glog.V(3).Infof("jwt auth type")
+ log.Tracef("jwt auth type")
return identity, s3err.ErrNotImplemented
case authTypeAnonymous:
identity, found = iam.lookupAnonymous()
@@ -175,12 +175,12 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
return identity, s3err.ErrNotImplemented
}
- glog.V(3).Infof("auth error: %v", s3Err)
+ log.Tracef("auth error: %v", s3Err)
if s3Err != s3err.ErrNone {
return identity, s3Err
}
- glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions)
+ log.Tracef("user name: %v actions: %v", identity.Name, identity.Actions)
bucket, _ := getBucketAndObject(r)
diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go
index f882592c1..eba846ac7 100644
--- a/weed/s3api/filer_multipart.go
+++ b/weed/s3api/filer_multipart.go
@@ -14,7 +14,7 @@ import (
"github.com/google/uuid"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -25,7 +25,7 @@ type InitiateMultipartUploadResult struct {
func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) {
- glog.V(2).Infof("createMultipartUpload input %v", input)
+ log.Debugf("createMultipartUpload input %v", input)
uploadId, _ := uuid.NewRandom()
uploadIdString := uploadId.String()
@@ -36,7 +36,7 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp
}
entry.Extended["key"] = []byte(*input.Key)
}); err != nil {
- glog.Errorf("NewMultipartUpload error: %v", err)
+ log.Errorf("NewMultipartUpload error: %v", err)
return nil, s3err.ErrInternalError
}
@@ -58,13 +58,13 @@ type CompleteMultipartUploadResult struct {
func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) {
- glog.V(2).Infof("completeMultipartUpload input %v", input)
+ log.Debugf("completeMultipartUpload input %v", input)
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
entries, _, err := s3a.list(uploadDirectory, "", "", false, 0)
if err != nil || len(entries) == 0 {
- glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries))
+ log.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries))
return nil, s3err.ErrNoSuchUpload
}
@@ -106,7 +106,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
err = s3a.mkFile(dirName, entryName, finalParts)
if err != nil {
- glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
+ log.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
return nil, s3err.ErrInternalError
}
@@ -120,7 +120,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
}
if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil {
- glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
+ log.Debugf("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
}
return
@@ -128,18 +128,18 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code s3err.ErrorCode) {
- glog.V(2).Infof("abortMultipartUpload input %v", input)
+ log.Debugf("abortMultipartUpload input %v", input)
exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
if err != nil {
- glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
+ log.Debugf("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
return nil, s3err.ErrNoSuchUpload
}
if exists {
err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true)
}
if err != nil {
- glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
+ log.Debugf("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
return nil, s3err.ErrInternalError
}
@@ -166,7 +166,7 @@ type ListMultipartUploadsResult struct {
func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code s3err.ErrorCode) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
- glog.V(2).Infof("listMultipartUploads input %v", input)
+ log.Debugf("listMultipartUploads input %v", input)
output = &ListMultipartUploadsResult{
Bucket: input.Bucket,
@@ -179,7 +179,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, uint32(*input.MaxUploads))
if err != nil {
- glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
+ log.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
return
}
output.IsTruncated = aws.Bool(!isLast)
@@ -224,7 +224,7 @@ type ListPartsResult struct {
func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code s3err.ErrorCode) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
- glog.V(2).Infof("listObjectParts input %v", input)
+ log.Debugf("listObjectParts input %v", input)
output = &ListPartsResult{
Bucket: input.Bucket,
@@ -237,7 +237,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts))
if err != nil {
- glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
+ log.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
return nil, s3err.ErrNoSuchUpload
}
@@ -248,7 +248,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
partNumberString := entry.Name[:len(entry.Name)-len(".part")]
partNumber, err := strconv.Atoi(partNumberString)
if err != nil {
- glog.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err)
+ log.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err)
continue
}
output.Part = append(output.Part, &s3.Part{
diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go
index b6ac52c80..b9d763146 100644
--- a/weed/s3api/filer_util.go
+++ b/weed/s3api/filer_util.go
@@ -5,7 +5,7 @@ import (
"fmt"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -58,9 +58,9 @@ func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath strin
IsRecursive: isRecursive,
}
- glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
+ log.Debugf("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
if resp, err := client.DeleteEntry(context.Background(), request); err != nil {
- glog.V(0).Infof("delete entry %v: %v", request, err)
+ log.Infof("delete entry %v: %v", request, err)
return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
} else {
if resp.Error != "" {
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index 00b7382cc..a0dfc6974 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -14,7 +14,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -72,7 +72,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
IncludeEcVolumes: true,
IncludeNormalVolumes: true,
}); err != nil {
- glog.Errorf("list collection: %v", err)
+ log.Errorf("list collection: %v", err)
return fmt.Errorf("list collections: %v", err)
} else {
for _, c := range resp.Collections {
@@ -106,7 +106,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
// create the folder for bucket, but lazily create actual collection
if err := s3a.mkdir(s3a.option.BucketsPath, bucket, fn); err != nil {
- glog.Errorf("PutBucketHandler mkdir: %v", err)
+ log.Errorf("PutBucketHandler mkdir: %v", err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
@@ -130,7 +130,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
Collection: bucket,
}
- glog.V(1).Infof("delete collection: %v", deleteCollectionRequest)
+ log.Debugf("delete collection: %v", deleteCollectionRequest)
if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil {
return fmt.Errorf("delete collection %s: %v", bucket, err)
}
diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go
index 6935c75bd..373cf1617 100644
--- a/weed/s3api/s3api_handlers.go
+++ b/weed/s3api/s3api_handlers.go
@@ -13,7 +13,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -56,7 +56,7 @@ func (s3a *S3ApiServer) AdjustedUrl(location *filer_pb.Location) string {
// If none of the http routes match respond with MethodNotAllowed
func notFoundHandler(w http.ResponseWriter, r *http.Request) {
- glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI)
+ log.Infof("unsupported %s %s", r.Method, r.RequestURI)
writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL)
}
@@ -86,10 +86,10 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
}
w.WriteHeader(statusCode)
if response != nil {
- glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response))
+ log.Tracef("status %d %s: %s", statusCode, mType, string(response))
_, err := w.Write(response)
if err != nil {
- glog.V(0).Infof("write err: %v", err)
+ log.Infof("write err: %v", err)
}
w.(http.Flusher).Flush()
}
diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go
index ca578e7e5..66f788fd3 100644
--- a/weed/s3api/s3api_object_copy_handlers.go
+++ b/weed/s3api/s3api_object_copy_handlers.go
@@ -2,7 +2,7 @@ package s3api
import (
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http"
"net/url"
@@ -48,7 +48,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
}
defer util.CloseResponse(resp)
- glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
+ log.Debugf("copy from %s to %s", srcUrl, dstUrl)
etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body)
if errCode != s3err.ErrNone {
@@ -129,7 +129,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
}
defer dataReader.Close()
- glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
+ log.Debugf("copy from %s to %s", srcUrl, dstUrl)
etag, errCode := s3a.putToFiler(r, dstUrl, dataReader)
if errCode != s3err.ErrNone {
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
index 7ea49f2c6..6d26f1236 100644
--- a/weed/s3api/s3api_object_handlers.go
+++ b/weed/s3api/s3api_object_handlers.go
@@ -14,7 +14,7 @@ import (
"github.com/gorilla/mux"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_server "github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -232,12 +232,12 @@ var passThroughHeaders = []string{
func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter)) {
- glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl)
+ log.Debugf("s3 proxying %s to %s", r.Method, destUrl)
proxyReq, err := http.NewRequest(r.Method, destUrl, r.Body)
if err != nil {
- glog.Errorf("NewRequest %s: %v", destUrl, err)
+ log.Errorf("NewRequest %s: %v", destUrl, err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
@@ -272,7 +272,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
}
if postErr != nil {
- glog.Errorf("post to filer: %v", postErr)
+ log.Errorf("post to filer: %v", postErr)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
@@ -298,7 +298,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
proxyReq, err := http.NewRequest("PUT", uploadUrl, body)
if err != nil {
- glog.Errorf("NewRequest %s: %v", uploadUrl, err)
+ log.Errorf("NewRequest %s: %v", uploadUrl, err)
return "", s3err.ErrInternalError
}
@@ -314,7 +314,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
resp, postErr := client.Do(proxyReq)
if postErr != nil {
- glog.Errorf("post to filer: %v", postErr)
+ log.Errorf("post to filer: %v", postErr)
return "", s3err.ErrInternalError
}
defer resp.Body.Close()
@@ -323,17 +323,17 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
resp_body, ra_err := ioutil.ReadAll(resp.Body)
if ra_err != nil {
- glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err)
+ log.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err)
return etag, s3err.ErrInternalError
}
var ret weed_server.FilerPostResult
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
- glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
+ log.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
return "", s3err.ErrInternalError
}
if ret.Error != "" {
- glog.Errorf("upload to filer error: %v", ret.Error)
+ log.Errorf("upload to filer error: %v", ret.Error)
return "", filerErrorToS3Error(ret.Error)
}
diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go
index 4ddb24e31..56d1036f7 100644
--- a/weed/s3api/s3api_object_multipart_handlers.go
+++ b/weed/s3api/s3api_object_multipart_handlers.go
@@ -2,7 +2,7 @@ package s3api
import (
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http"
"net/url"
@@ -29,7 +29,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
Key: objectKey(aws.String(object)),
})
- glog.V(2).Info("NewMultipartUploadHandler", string(encodeResponse(response)), errCode)
+ log.Debug("NewMultipartUploadHandler", string(encodeResponse(response)), errCode)
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
@@ -53,7 +53,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
UploadId: aws.String(uploadID),
})
- glog.V(2).Info("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode)
+ log.Debug("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode)
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
@@ -82,7 +82,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
return
}
- glog.V(2).Info("AbortMultipartUploadHandler", string(encodeResponse(response)))
+ log.Debug("AbortMultipartUploadHandler", string(encodeResponse(response)))
writeSuccessResponseXML(w, encodeResponse(response))
@@ -115,7 +115,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
UploadIdMarker: aws.String(uploadIDMarker),
})
- glog.V(2).Info("ListMultipartUploadsHandler", string(encodeResponse(response)), errCode)
+ log.Debug("ListMultipartUploadsHandler", string(encodeResponse(response)), errCode)
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
@@ -149,7 +149,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
UploadId: aws.String(uploadID),
})
- glog.V(2).Info("ListObjectPartsHandler", string(encodeResponse(response)), errCode)
+ log.Debug("ListObjectPartsHandler", string(encodeResponse(response)), errCode)
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
diff --git a/weed/s3api/s3api_object_tagging_handlers.go b/weed/s3api/s3api_object_tagging_handlers.go
index 94719834c..dd3176f57 100644
--- a/weed/s3api/s3api_object_tagging_handlers.go
+++ b/weed/s3api/s3api_object_tagging_handlers.go
@@ -3,7 +3,7 @@ package s3api
import (
"encoding/xml"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -24,10 +24,10 @@ func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.R
tags, err := s3a.getTags(dir, name)
if err != nil {
if err == filer_pb.ErrNotFound {
- glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
+ log.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
} else {
- glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
+ log.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
}
return
@@ -49,29 +49,29 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
tagging := &Tagging{}
input, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
- glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err)
+ log.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
if err = xml.Unmarshal(input, tagging); err != nil {
- glog.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err)
+ log.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrMalformedXML, r.URL)
return
}
tags := tagging.ToTags()
if len(tags) > 10 {
- glog.Errorf("PutObjectTaggingHandler tags %s: %d tags more than 10", r.URL, len(tags))
+ log.Errorf("PutObjectTaggingHandler tags %s: %d tags more than 10", r.URL, len(tags))
writeErrorResponse(w, s3err.ErrInvalidTag, r.URL)
return
}
for k, v := range tags {
if len(k) > 128 {
- glog.Errorf("PutObjectTaggingHandler tags %s: tag key %s longer than 128", r.URL, k)
+ log.Errorf("PutObjectTaggingHandler tags %s: tag key %s longer than 128", r.URL, k)
writeErrorResponse(w, s3err.ErrInvalidTag, r.URL)
return
}
if len(v) > 256 {
- glog.Errorf("PutObjectTaggingHandler tags %s: tag value %s longer than 256", r.URL, v)
+ log.Errorf("PutObjectTaggingHandler tags %s: tag value %s longer than 256", r.URL, v)
writeErrorResponse(w, s3err.ErrInvalidTag, r.URL)
return
}
@@ -79,10 +79,10 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
if err = s3a.setTags(dir, name, tagging.ToTags()); err != nil {
if err == filer_pb.ErrNotFound {
- glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
+ log.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
} else {
- glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
+ log.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
}
return
@@ -104,10 +104,10 @@ func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *htt
err := s3a.rmTags(dir, name)
if err != nil {
if err == filer_pb.ErrNotFound {
- glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
+ log.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
} else {
- glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
+ log.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
}
return
diff --git a/weed/security/guard.go b/weed/security/guard.go
index 87ec91ec1..0b116e530 100644
--- a/weed/security/guard.go
+++ b/weed/security/guard.go
@@ -7,7 +7,7 @@ import (
"net/http"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
var (
@@ -122,6 +122,6 @@ func (g *Guard) checkWhiteList(w http.ResponseWriter, r *http.Request) error {
}
}
- glog.V(0).Infof("Not in whitelist: %s", r.RemoteAddr)
+ log.Infof("Not in whitelist: %s", r.RemoteAddr)
return fmt.Errorf("Not in whitelis: %s", r.RemoteAddr)
}
diff --git a/weed/security/jwt.go b/weed/security/jwt.go
index 0bd7fa974..36d8e10f2 100644
--- a/weed/security/jwt.go
+++ b/weed/security/jwt.go
@@ -6,7 +6,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
jwt "github.com/dgrijalva/jwt-go"
)
@@ -33,7 +33,7 @@ func GenJwt(signingKey SigningKey, expiresAfterSec int, fileId string) EncodedJw
t := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
encoded, e := t.SignedString([]byte(signingKey))
if e != nil {
- glog.V(0).Infof("Failed to sign claims %+v: %v", t.Claims, e)
+ log.Infof("Failed to sign claims %+v: %v", t.Claims, e)
return ""
}
return EncodedJwt(encoded)
diff --git a/weed/security/tls.go b/weed/security/tls.go
index 5821b159d..53816f256 100644
--- a/weed/security/tls.go
+++ b/weed/security/tls.go
@@ -10,7 +10,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption {
@@ -21,12 +21,12 @@ func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption {
// load cert/key, ca cert
cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key"))
if err != nil {
- glog.V(1).Infof("load cert/key error: %v", err)
+ log.Debugf("load cert/key error: %v", err)
return nil
}
caCert, err := ioutil.ReadFile(config.GetString(component + ".ca"))
if err != nil {
- glog.V(1).Infof("read ca cert file error: %v", err)
+ log.Debugf("read ca cert file error: %v", err)
return nil
}
caCertPool := x509.NewCertPool()
@@ -53,12 +53,12 @@ func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption {
// load cert/key, cacert
cert, err := tls.LoadX509KeyPair(certFileName, keyFileName)
if err != nil {
- glog.V(1).Infof("load cert/key error: %v", err)
+ log.Debugf("load cert/key error: %v", err)
return grpc.WithInsecure()
}
caCert, err := ioutil.ReadFile(caFileName)
if err != nil {
- glog.V(1).Infof("read ca cert file error: %v", err)
+ log.Debugf("read ca cert file error: %v", err)
return grpc.WithInsecure()
}
caCertPool := x509.NewCertPool()
diff --git a/weed/sequence/etcd_sequencer.go b/weed/sequence/etcd_sequencer.go
index 1fc378640..71f99aa59 100644
--- a/weed/sequence/etcd_sequencer.go
+++ b/weed/sequence/etcd_sequencer.go
@@ -18,7 +18,7 @@ import (
"strconv"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"go.etcd.io/etcd/client"
)
@@ -63,7 +63,7 @@ func NewEtcdSequencer(etcdUrls string, metaFolder string) (*EtcdSequencer, error
if err != nil {
return nil, fmt.Errorf("read sequence from file failed, %v", err)
}
- glog.V(4).Infof("read sequence from file : %d", maxValue)
+ log.Tracef("read sequence from file : %d", maxValue)
newSeq, err := setMaxSequenceToEtcd(keysApi, maxValue)
if err != nil {
@@ -88,16 +88,16 @@ func (es *EtcdSequencer) NextFileId(count uint64) uint64 {
reqSteps += count
}
maxId, err := batchGetSequenceFromEtcd(es.keysAPI, reqSteps)
- glog.V(4).Infof("get max sequence id from etcd, %d", maxId)
+ log.Tracef("get max sequence id from etcd, %d", maxId)
if err != nil {
- glog.Error(err)
+ log.Error(err)
return 0
}
es.currentSeqId, es.maxSeqId = maxId-reqSteps, maxId
- glog.V(4).Infof("current id : %d, max id : %d", es.currentSeqId, es.maxSeqId)
+ log.Tracef("current id : %d, max id : %d", es.currentSeqId, es.maxSeqId)
if err := writeSequenceFile(es.seqFile, es.maxSeqId, es.currentSeqId); err != nil {
- glog.Errorf("flush sequence to file failed, %v", err)
+ log.Errorf("flush sequence to file failed, %v", err)
}
}
@@ -116,13 +116,13 @@ func (es *EtcdSequencer) SetMax(seenValue uint64) {
if seenValue > es.maxSeqId {
maxId, err := setMaxSequenceToEtcd(es.keysAPI, seenValue)
if err != nil {
- glog.Errorf("set Etcd Max sequence failed : %v", err)
+ log.Errorf("set Etcd Max sequence failed : %v", err)
return
}
es.currentSeqId, es.maxSeqId = maxId, maxId
if err := writeSequenceFile(es.seqFile, maxId, maxId); err != nil {
- glog.Errorf("flush sequence to file failed, %v", err)
+ log.Errorf("flush sequence to file failed, %v", err)
}
}
}
@@ -164,7 +164,7 @@ func batchGetSequenceFromEtcd(kvApi client.KeysAPI, step uint64) (uint64, error)
if err == nil {
break
}
- glog.Error(err)
+ log.Error(err)
}
return endSeqValue, nil
diff --git a/weed/server/common.go b/weed/server/common.go
index 58079032e..130c8bae7 100644
--- a/weed/server/common.go
+++ b/weed/server/common.go
@@ -14,7 +14,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -50,7 +50,7 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter
}
if httpStatus >= 400 {
- glog.V(0).Infof("response method:%s URL:%s with httpStatus:%d and JSON:%s",
+ log.Infof("response method:%s URL:%s with httpStatus:%d and JSON:%s",
r.Method, r.URL.String(), httpStatus, string(bytes))
}
@@ -86,8 +86,8 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter
// wrapper for writeJson - just logs errors
func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) {
if err := writeJson(w, r, httpStatus, obj); err != nil {
- glog.V(0).Infof("error writing JSON status %d: %v", httpStatus, err)
- glog.V(1).Infof("JSON content: %+v", obj)
+ log.Infof("error writing JSON status %d: %v", httpStatus, err)
+ log.Debugf("JSON content: %+v", obj)
}
}
func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err error) {
@@ -97,7 +97,7 @@ func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err
}
func debug(params ...interface{}) {
- glog.V(4).Infoln(params...)
+ log.Trace(params...)
}
func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string, grpcDialOption grpc.DialOption) {
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index 7b04e4fab..080e59492 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -9,7 +9,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@@ -19,14 +19,14 @@ import (
func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) {
- glog.V(4).Infof("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name))
+ log.Tracef("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name))
entry, err := fs.filer.FindEntry(ctx, util.JoinPath(req.Directory, req.Name))
if err == filer_pb.ErrNotFound {
return &filer_pb.LookupDirectoryEntryResponse{}, err
}
if err != nil {
- glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err)
+ log.Tracef("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err)
return nil, err
}
@@ -45,7 +45,7 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error {
- glog.V(4).Infof("ListEntries %v", req)
+ log.Tracef("ListEntries %v", req)
limit := int(req.Limit)
if limit == 0 {
@@ -113,7 +113,7 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol
for _, vidString := range req.VolumeIds {
vid, err := strconv.Atoi(vidString)
if err != nil {
- glog.V(1).Infof("Unknown volume id %d", vid)
+ log.Debugf("Unknown volume id %d", vid)
return nil, err
}
var locs []*filer_pb.Location
@@ -152,7 +152,7 @@ func (fs *FilerServer) lookupFileId(fileId string) (targetUrls []string, err err
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
- glog.V(4).Infof("CreateEntry %v/%v", req.Directory, req.Entry.Name)
+ log.Tracef("CreateEntry %v/%v", req.Directory, req.Entry.Name)
resp = &filer_pb.CreateEntryResponse{}
@@ -173,7 +173,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr
if createErr == nil {
fs.filer.DeleteChunks(garbage)
} else {
- glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr)
+ log.Tracef("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr)
resp.Error = createErr.Error()
}
@@ -182,7 +182,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr
func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) {
- glog.V(4).Infof("UpdateEntry %v", req)
+ log.Tracef("UpdateEntry %v", req)
fullpath := util.Join(req.Directory, req.Entry.Name)
entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath))
@@ -204,7 +204,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
HardLinkCounter: req.Entry.HardLinkCounter,
}
- glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v, extended: %v => %v",
+ log.Tracef("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v, extended: %v => %v",
fullpath, entry.Attr, len(entry.Chunks), entry.Chunks,
req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks,
entry.Extended, req.Entry.Extended)
@@ -234,7 +234,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
fs.filer.NotifyUpdateEvent(ctx, entry, newEntry, true, req.IsFromOtherCluster, req.Signatures)
} else {
- glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err)
+ log.Tracef("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err)
}
return &filer_pb.UpdateEntryResponse{}, err
@@ -267,7 +267,7 @@ func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry
chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), chunks)
if err != nil {
// not good, but should be ok
- glog.V(0).Infof("MaybeManifestize: %v", err)
+ log.Infof("MaybeManifestize: %v", err)
}
}
@@ -278,7 +278,7 @@ func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry
func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendToEntryRequest) (*filer_pb.AppendToEntryResponse, error) {
- glog.V(4).Infof("AppendToEntry %v", req)
+ log.Tracef("AppendToEntry %v", req)
fullpath := util.NewFullPath(req.Directory, req.EntryName)
var offset int64 = 0
@@ -308,7 +308,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.Chunks)
if err != nil {
// not good, but should be ok
- glog.V(0).Infof("MaybeManifestize: %v", err)
+ log.Infof("MaybeManifestize: %v", err)
}
err = fs.filer.CreateEntry(context.Background(), entry, false, false, nil)
@@ -318,7 +318,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
- glog.V(4).Infof("DeleteEntry %v", req)
+ log.Tracef("DeleteEntry %v", req)
err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures)
resp = &filer_pb.DeleteEntryResponse{}
@@ -336,11 +336,11 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest)
if err != nil {
- glog.V(3).Infof("AssignVolume: %v", err)
+ log.Tracef("AssignVolume: %v", err)
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil
}
if assignResult.Error != "" {
- glog.V(3).Infof("AssignVolume error: %v", assignResult.Error)
+ log.Tracef("AssignVolume error: %v", assignResult.Error)
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil
}
@@ -357,7 +357,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
func (fs *FilerServer) CollectionList(ctx context.Context, req *filer_pb.CollectionListRequest) (resp *filer_pb.CollectionListResponse, err error) {
- glog.V(4).Infof("CollectionList %v", req)
+ log.Tracef("CollectionList %v", req)
resp = &filer_pb.CollectionListResponse{}
err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
@@ -379,7 +379,7 @@ func (fs *FilerServer) CollectionList(ctx context.Context, req *filer_pb.Collect
func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) {
- glog.V(4).Infof("DeleteCollection %v", req)
+ log.Tracef("DeleteCollection %v", req)
err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
_, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
@@ -434,7 +434,7 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.
MetricsIntervalSec: int32(fs.metricsIntervalSec),
}
- glog.V(4).Infof("GetFilerConfiguration: %v", t)
+ log.Tracef("GetFilerConfiguration: %v", t)
return t, nil
}
@@ -453,25 +453,25 @@ func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedS
}
fs.brokersLock.Lock()
fs.brokers[clientName] = m
- glog.V(0).Infof("+ broker %v", clientName)
+ log.Infof("+ broker %v", clientName)
fs.brokersLock.Unlock()
defer func() {
fs.brokersLock.Lock()
delete(fs.brokers, clientName)
- glog.V(0).Infof("- broker %v: %v", clientName, err)
+ log.Infof("- broker %v: %v", clientName, err)
fs.brokersLock.Unlock()
}()
for {
if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil {
- glog.V(0).Infof("send broker %v: %+v", clientName, err)
+ log.Infof("send broker %v: %+v", clientName, err)
return err
}
// println("replied")
if _, err := stream.Recv(); err != nil {
- glog.V(0).Infof("recv broker %v: %v", clientName, err)
+ log.Infof("recv broker %v: %v", clientName, err)
return err
}
// println("received")
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
index f9ddeb600..3fdea4432 100644
--- a/weed/server/filer_grpc_server_rename.go
+++ b/weed/server/filer_grpc_server_rename.go
@@ -6,14 +6,14 @@ import (
"path/filepath"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) {
- glog.V(1).Infof("AtomicRenameEntry %v", req)
+ log.Debugf("AtomicRenameEntry %v", req)
ctx, err := fs.filer.BeginTransaction(ctx)
if err != nil {
@@ -64,7 +64,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
currentDirPath := oldParent.Child(entry.Name())
newDirPath := newParent.Child(newName)
- glog.V(1).Infof("moving folder %s => %s", currentDirPath, newDirPath)
+ log.Debugf("moving folder %s => %s", currentDirPath, newDirPath)
lastFileName := ""
includeLastFile := false
@@ -97,10 +97,10 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName)
- glog.V(1).Infof("moving entry %s => %s", oldPath, newPath)
+ log.Debugf("moving entry %s => %s", oldPath, newPath)
if oldPath == newPath {
- glog.V(1).Infof("skip moving entry %s => %s", oldPath, newPath)
+ log.Debugf("skip moving entry %s => %s", oldPath, newPath)
return nil
}
diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go
index 634fb5211..a45c09968 100644
--- a/weed/server/filer_grpc_server_sub_meta.go
+++ b/weed/server/filer_grpc_server_sub_meta.go
@@ -9,7 +9,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -23,7 +23,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest,
defer fs.deleteClient(clientName)
lastReadTime := time.Unix(0, req.SinceNs)
- glog.V(0).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
+ log.Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName, req.Signature)
@@ -46,7 +46,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest,
return true
}, eachLogEntryFn)
if err != nil {
- glog.Errorf("processed to %v: %v", lastReadTime, err)
+ log.Errorf("processed to %v: %v", lastReadTime, err)
time.Sleep(3127 * time.Millisecond)
if err != log_buffer.ResumeError {
break
@@ -67,7 +67,7 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
defer fs.deleteClient(clientName)
lastReadTime := time.Unix(0, req.SinceNs)
- glog.V(0).Infof(" %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
+ log.Infof(" %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName, req.Signature)
@@ -82,7 +82,7 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
if processedTsNs != 0 {
lastReadTime = time.Unix(0, processedTsNs)
}
- glog.V(0).Infof("after local log reads, %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
+ log.Infof("after local log reads, %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
// println("reading from in memory logs ...")
for {
@@ -93,7 +93,7 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
return true
}, eachLogEntryFn)
if err != nil {
- glog.Errorf("processed to %v: %v", lastReadTime, err)
+ log.Errorf("processed to %v: %v", lastReadTime, err)
time.Sleep(3127 * time.Millisecond)
if err != log_buffer.ResumeError {
break
@@ -109,7 +109,7 @@ func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotificati
return func(logEntry *filer_pb.LogEntry) error {
event := &filer_pb.SubscribeMetadataResponse{}
if err := proto.Unmarshal(logEntry.Data, event); err != nil {
- glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
+ log.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
return fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
}
@@ -163,7 +163,7 @@ func (fs *FilerServer) eachEventNotificationFn(req *filer_pb.SubscribeMetadataRe
}
// println("sending", dirPath, entryName)
if err := stream.Send(message); err != nil {
- glog.V(0).Infof("=> client %v: %+v", clientName, err)
+ log.Infof("=> client %v: %+v", clientName, err)
return err
}
return nil
@@ -172,10 +172,10 @@ func (fs *FilerServer) eachEventNotificationFn(req *filer_pb.SubscribeMetadataRe
func (fs *FilerServer) addClient(clientType string, clientAddress string) (clientName string) {
clientName = clientType + "@" + clientAddress
- glog.V(0).Infof("+ listener %v", clientName)
+ log.Infof("+ listener %v", clientName)
return
}
func (fs *FilerServer) deleteClient(clientName string) {
- glog.V(0).Infof("- listener %v", clientName)
+ log.Infof("- listener %v", clientName)
}
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index b11448db4..abbb1e4dc 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -30,7 +30,7 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
_ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs"
_ "github.com/chrislusf/seaweedfs/weed/notification/gocdk_pub_sub"
@@ -86,7 +86,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
fs.listenersCond = sync.NewCond(&fs.listenersLock)
if len(option.Masters) == 0 {
- glog.Fatal("master list is required!")
+ log.Fatal("master list is required!")
}
fs.filer = filer.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, option.DataCenter, func() {
@@ -107,7 +107,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
if os.IsNotExist(err) {
os.MkdirAll(option.DefaultLevelDbDir, 0755)
}
- glog.V(0).Infof("default to create filer store dir in %s", option.DefaultLevelDbDir)
+ log.Infof("default to create filer store dir in %s", option.DefaultLevelDbDir)
}
util.LoadConfiguration("notification", false)
@@ -147,7 +147,7 @@ func (fs *FilerServer) checkWithMaster() {
for _, master := range fs.option.Masters {
_, err := pb.ParseFilerGrpcAddress(master)
if err != nil {
- glog.Fatalf("invalid master address %s: %v", master, err)
+ log.Fatalf("invalid master address %s: %v", master, err)
}
}
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index 69d485e90..e4f4608de 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -12,7 +12,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/images"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
@@ -35,11 +35,11 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
return
}
if err == filer_pb.ErrNotFound {
- glog.V(1).Infof("Not found %s: %v", path, err)
+ log.Debugf("Not found %s: %v", path, err)
stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc()
w.WriteHeader(http.StatusNotFound)
} else {
- glog.V(0).Infof("Internal %s: %v", path, err)
+ log.Infof("Internal %s: %v", path, err)
stats.FilerRequestCounter.WithLabelValues("read.internalerror").Inc()
w.WriteHeader(http.StatusInternalServerError)
}
@@ -61,7 +61,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
}
if len(entry.Chunks) == 0 {
- glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr)
+ log.Debugf("no file chunks for %s, attr=%+v", path, entry.Attr)
stats.FilerRequestCounter.WithLabelValues("read.nocontent").Inc()
w.WriteHeader(http.StatusNoContent)
return
@@ -136,7 +136,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
if shouldResize {
data, err := filer.ReadAll(fs.filer.MasterClient, entry.Chunks)
if err != nil {
- glog.Errorf("failed to read %s: %v", path, err)
+ log.Errorf("failed to read %s: %v", path, err)
w.WriteHeader(http.StatusNotModified)
return
}
diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go
index 99345550c..579e385ce 100644
--- a/weed/server/filer_server_handlers_read_dir.go
+++ b/weed/server/filer_server_handlers_read_dir.go
@@ -9,7 +9,7 @@ import (
"strconv"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -38,7 +38,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit, "")
if err != nil {
- glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err)
+ log.Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err)
w.WriteHeader(http.StatusNotFound)
return
}
@@ -52,7 +52,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
lastFileName = entries[len(entries)-1].Name()
}
- glog.V(4).Infof("listDirectory %s, last file %s, limit %d: %d items", path, lastFileName, limit, len(entries))
+ log.Tracef("listDirectory %s, last file %s, limit %d: %d items", path, lastFileName, limit, len(entries))
if r.Header.Get("Accept") == "application/json" {
writeJsonQuiet(w, r, http.StatusOK, struct {
diff --git a/weed/server/filer_server_handlers_tagging.go b/weed/server/filer_server_handlers_tagging.go
index 50b3a2c06..9e0abeee1 100644
--- a/weed/server/filer_server_handlers_tagging.go
+++ b/weed/server/filer_server_handlers_tagging.go
@@ -5,7 +5,7 @@ import (
"net/http"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -44,7 +44,7 @@ func (fs *FilerServer) PutTaggingHandler(w http.ResponseWriter, r *http.Request)
}
if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil); dbErr != nil {
- glog.V(0).Infof("failing to update %s tagging : %v", path, dbErr)
+ log.Infof("failing to update %s tagging : %v", path, dbErr)
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
@@ -92,7 +92,7 @@ func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Reque
}
if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil); dbErr != nil {
- glog.V(0).Infof("failing to delete %s tagging : %v", path, dbErr)
+ log.Infof("failing to delete %s tagging : %v", path, dbErr)
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go
index 5806b0c1f..5f4d50cda 100644
--- a/weed/server/filer_server_handlers_write.go
+++ b/weed/server/filer_server_handlers_write.go
@@ -7,7 +7,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@@ -39,7 +39,7 @@ func (fs *FilerServer) assignNewFileInfo(so *operation.StorageOption) (fileId, u
assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest)
if ae != nil {
- glog.Errorf("failing to assign a file id: %v", ae)
+ log.Errorf("failing to assign a file id: %v", ae)
err = ae
return
}
@@ -91,7 +91,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false, nil)
if err != nil {
- glog.V(1).Infoln("deleting", objectPath, ":", err.Error())
+ log.Debug("deleting", objectPath, ":", err.Error())
httpStatus := http.StatusInternalServerError
if err == filer_pb.ErrNotFound {
httpStatus = http.StatusNotFound
@@ -129,7 +129,7 @@ func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication
if ttlSeconds == 0 {
ttl, err := needle.ReadTTL(rule.GetTtl())
if err != nil {
- glog.Errorf("fail to parse %s ttl setting %s: %v", rule.LocationPrefix, rule.Ttl, err)
+ log.Errorf("fail to parse %s ttl setting %s: %v", rule.LocationPrefix, rule.Ttl, err)
}
ttlSeconds = int32(ttl.Minutes()) * 60
}
@@ -148,7 +148,7 @@ func (fs *FilerServer) detectStorageOption0(requestURI, qCollection, qReplicatio
ttl, err := needle.ReadTTL(qTtl)
if err != nil {
- glog.Errorf("fail to parse ttl %s: %v", qTtl, err)
+ log.Errorf("fail to parse ttl %s: %v", qTtl, err)
}
return fs.detectStorageOption(requestURI, qCollection, qReplication, int32(ttl.Minutes())*60, dataCenter, rack)
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index fd2db884f..fdd80ad8b 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -15,7 +15,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
@@ -94,7 +94,7 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), fileChunks)
if replyerr != nil {
- glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
+ log.Infof("manifestize %s: %v", r.RequestURI, replyerr)
return
}
@@ -116,7 +116,7 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter
fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), fileChunks)
if replyerr != nil {
- glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
+ log.Infof("manifestize %s: %v", r.RequestURI, replyerr)
return
}
@@ -135,7 +135,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
}
mode, err := strconv.ParseUint(modeStr, 8, 32)
if err != nil {
- glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr)
+ log.Errorf("Invalid mode format: %s, use 0660 by default", modeStr)
mode = 0660
}
@@ -154,7 +154,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
crTime = existingEntry.Crtime
}
- glog.V(4).Infoln("saving", path)
+ log.Trace("saving", path)
entry := &filer.Entry{
FullPath: util.FullPath(path),
Attr: filer.Attr{
@@ -194,7 +194,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
fs.filer.DeleteChunks(entry.Chunks)
replyerr = dbErr
filerResult.Error = dbErr.Error()
- glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr)
+ log.Infof("failing to write %s to filer server : %v", path, dbErr)
}
return filerResult, replyerr
}
@@ -230,7 +230,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
// Save to chunk manifest structure
fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset))
- glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size))
+ log.Tracef("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size))
// reset variables for the next chunk
chunkOffset = chunkOffset + int64(uploadResult.Size)
@@ -283,7 +283,7 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http
}
mode, err := strconv.ParseUint(modeStr, 8, 32)
if err != nil {
- glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr)
+ log.Errorf("Invalid mode format: %s, use 0660 by default", modeStr)
mode = 0660
}
@@ -299,7 +299,7 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http
return
}
- glog.V(4).Infoln("mkdir", path)
+ log.Trace("mkdir", path)
entry := &filer.Entry{
FullPath: util.FullPath(path),
Attr: filer.Attr{
@@ -318,7 +318,7 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http
if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil {
replyerr = dbErr
filerResult.Error = dbErr.Error()
- glog.V(0).Infof("failing to create dir %s on filer server : %v", path, dbErr)
+ log.Infof("failing to create dir %s on filer server : %v", path, dbErr)
}
return filerResult, replyerr
}
diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go
index 3cc0d0c41..bfbaa04e9 100644
--- a/weed/server/filer_server_handlers_write_cipher.go
+++ b/weed/server/filer_server_handlers_write_cipher.go
@@ -8,7 +8,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -24,7 +24,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, so.Collection, so.DataCenter)
}
- glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
+ log.Tracef("write %s to %v", r.URL.Path, urlLocation)
// Note: encrypt(gzip(data)), encrypt data first, then gzip
diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go
index 9df88e956..d62946d63 100644
--- a/weed/server/master_grpc_server.go
+++ b/weed/server/master_grpc_server.go
@@ -11,7 +11,7 @@ import (
"github.com/chrislusf/raft"
"google.golang.org/grpc/peer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/topology"
@@ -26,7 +26,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
// if the volume server disconnects and reconnects quickly
// the unregister and register can race with each other
ms.Topo.UnRegisterDataNode(dn)
- glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port)
+ log.Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port)
message := &master_pb.VolumeLocation{
Url: dn.Url(),
@@ -54,9 +54,9 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
heartbeat, err := stream.Recv()
if err != nil {
if dn != nil {
- glog.Warningf("SendHeartbeat.Recv server %s:%d : %v", dn.Ip, dn.Port, err)
+ log.Warnf("SendHeartbeat.Recv server %s:%d : %v", dn.Ip, dn.Port, err)
} else {
- glog.Warningf("SendHeartbeat.Recv: %v", err)
+ log.Warnf("SendHeartbeat.Recv: %v", err)
}
return err
}
@@ -70,11 +70,11 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
dn = rack.GetOrCreateDataNode(heartbeat.Ip,
int(heartbeat.Port), heartbeat.PublicUrl,
int64(heartbeat.MaxVolumeCount))
- glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort())
+ log.Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort())
if err := stream.Send(&master_pb.HeartbeatResponse{
VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024,
}); err != nil {
- glog.Warningf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err)
+ log.Warnf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err)
return err
}
}
@@ -84,7 +84,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
dn.UpAdjustMaxVolumeCountDelta(delta)
}
- glog.V(4).Infof("master received heartbeat %s", heartbeat.String())
+ log.Tracef("master received heartbeat %s", heartbeat.String())
message := &master_pb.VolumeLocation{
Url: dn.Url(),
PublicUrl: dn.PublicUrl,
@@ -107,11 +107,11 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
newVolumes, deletedVolumes := ms.Topo.SyncDataNodeRegistration(heartbeat.Volumes, dn)
for _, v := range newVolumes {
- glog.V(0).Infof("master see new volume %d from %s", uint32(v.Id), dn.Url())
+ log.Infof("master see new volume %d from %s", uint32(v.Id), dn.Url())
message.NewVids = append(message.NewVids, uint32(v.Id))
}
for _, v := range deletedVolumes {
- glog.V(0).Infof("master see deleted volume %d from %s", uint32(v.Id), dn.Url())
+ log.Infof("master see deleted volume %d from %s", uint32(v.Id), dn.Url())
message.DeletedVids = append(message.DeletedVids, uint32(v.Id))
}
}
@@ -134,7 +134,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
}
if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards {
- glog.V(1).Infof("master received ec shards from %s: %+v", dn.Url(), heartbeat.EcShards)
+ log.Debugf("master received ec shards from %s: %+v", dn.Url(), heartbeat.EcShards)
newShards, deletedShards := ms.Topo.SyncDataNodeEcShards(heartbeat.EcShards, dn)
// broadcast the ec vid changes to master clients
@@ -152,7 +152,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 {
ms.clientChansLock.RLock()
for host, ch := range ms.clientChans {
- glog.V(0).Infof("master send to %s: %s", host, message.String())
+ log.Infof("master send to %s: %s", host, message.String())
ch <- message
}
ms.clientChansLock.RUnlock()
@@ -161,13 +161,13 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
// tell the volume servers about the leader
newLeader, err := ms.Topo.Leader()
if err != nil {
- glog.Warningf("SendHeartbeat find leader: %v", err)
+ log.Warnf("SendHeartbeat find leader: %v", err)
return err
}
if err := stream.Send(&master_pb.HeartbeatResponse{
Leader: newLeader,
}); err != nil {
- glog.Warningf("SendHeartbeat.Send response to to %s:%d %v", dn.Ip, dn.Port, err)
+ log.Warnf("SendHeartbeat.Send response to to %s:%d %v", dn.Ip, dn.Port, err)
return err
}
}
@@ -205,7 +205,7 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
for {
_, err := stream.Recv()
if err != nil {
- glog.V(2).Infof("- client %v: %v", clientName, err)
+ log.Debugf("- client %v: %v", clientName, err)
stopChan <- true
break
}
@@ -217,7 +217,7 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
select {
case message := <-messageChan:
if err := stream.Send(message); err != nil {
- glog.V(0).Infof("=> client %v: %+v", clientName, message)
+ log.Infof("=> client %v: %+v", clientName, message)
return err
}
case <-ticker.C:
@@ -234,7 +234,7 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error {
leader, err := ms.Topo.Leader()
if err != nil {
- glog.Errorf("topo leader: %v", err)
+ log.Errorf("topo leader: %v", err)
return raft.NotLeaderError
}
if err := stream.Send(&master_pb.VolumeLocation{
@@ -247,7 +247,7 @@ func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedSe
func (ms *MasterServer) addClient(clientType string, clientAddress string) (clientName string, messageChan chan *master_pb.VolumeLocation) {
clientName = clientType + "@" + clientAddress
- glog.V(0).Infof("+ client %v", clientName)
+ log.Infof("+ client %v", clientName)
// we buffer this because otherwise we end up in a potential deadlock where
// the KeepConnected loop is no longer listening on this channel but we're
@@ -263,7 +263,7 @@ func (ms *MasterServer) addClient(clientType string, clientAddress string) (clie
}
func (ms *MasterServer) deleteClient(clientName string) {
- glog.V(0).Infof("- client %v", clientName)
+ log.Infof("- client %v", clientName)
ms.clientChansLock.Lock()
delete(ms.clientChans, clientName)
ms.clientChansLock.Unlock()
@@ -273,11 +273,11 @@ func findClientAddress(ctx context.Context, grpcPort uint32) string {
// fmt.Printf("FromContext %+v\n", ctx)
pr, ok := peer.FromContext(ctx)
if !ok {
- glog.Error("failed to get peer from ctx")
+ log.Error("failed to get peer from ctx")
return ""
}
if pr.Addr == net.Addr(nil) {
- glog.Error("failed to get peer address")
+ log.Error("failed to get peer address")
return ""
}
if grpcPort == 0 {
diff --git a/weed/server/master_server.go b/weed/server/master_server.go
index ccc94ebac..42d6dc6c1 100644
--- a/weed/server/master_server.go
+++ b/weed/server/master_server.go
@@ -15,7 +15,7 @@ import (
"github.com/gorilla/mux"
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/sequence"
@@ -100,11 +100,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
seq := ms.createSequencer(option)
if nil == seq {
- glog.Fatalf("create sequencer failed.")
+ log.Fatalf("create sequencer failed.")
}
ms.Topo = topology.NewTopology("topo", seq, uint64(ms.option.VolumeSizeLimitMB)*1024*1024, 5, replicationAsMin)
ms.vg = topology.NewDefaultVolumeGrowth()
- glog.V(0).Infoln("Volume Size Limit is", ms.option.VolumeSizeLimitMB, "MB")
+ log.Infoln("Volume Size Limit is", ms.option.VolumeSizeLimitMB, "MB")
ms.guard = security.NewGuard(ms.option.WhiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec)
@@ -138,16 +138,16 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) {
ms.Topo.RaftServer = raftServer.raftServer
ms.Topo.RaftServer.AddEventListener(raft.LeaderChangeEventType, func(e raft.Event) {
- glog.V(0).Infof("leader change event: %+v => %+v", e.PrevValue(), e.Value())
+ log.Infof("leader change event: %+v => %+v", e.PrevValue(), e.Value())
if ms.Topo.RaftServer.Leader() != "" {
- glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.")
+ log.Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.")
}
})
if ms.Topo.IsLeader() {
- glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!")
+ log.Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!")
} else {
if ms.Topo.RaftServer.Leader() != "" {
- glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "is the leader.")
+ log.Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "is the leader.")
}
}
}
@@ -165,7 +165,7 @@ func (ms *MasterServer) proxyToLeader(f http.HandlerFunc) http.HandlerFunc {
fmt.Errorf("Leader URL http://%s Parse Error: %v", ms.Topo.RaftServer.Leader(), err))
return
}
- glog.V(4).Infoln("proxying to leader", ms.Topo.RaftServer.Leader())
+ log.Trace("proxying to leader", ms.Topo.RaftServer.Leader())
proxy := httputil.NewSingleHostReverseProxy(targetUrl)
director := proxy.Director
proxy.Director = func(req *http.Request) {
@@ -189,7 +189,7 @@ func (ms *MasterServer) startAdminScripts() {
v := util.GetViper()
adminScripts := v.GetString("master.maintenance.scripts")
- glog.V(0).Infof("adminScripts:\n%v", adminScripts)
+ log.Infof("adminScripts:\n%v", adminScripts)
if adminScripts == "" {
return
}
@@ -215,7 +215,7 @@ func (ms *MasterServer) startAdminScripts() {
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(filerHostPort)
shellOptions.Directory = "/"
if err != nil {
- glog.V(0).Infof("failed to parse master.filer.default = %s : %v\n", filerHostPort, err)
+ log.Infof("failed to parse master.filer.default = %s : %v\n", filerHostPort, err)
return
}
@@ -254,9 +254,9 @@ func processEachCmd(reg *regexp.Regexp, line string, commandEnv *shell.CommandEn
for _, c := range shell.Commands {
if c.Name() == cmd {
- glog.V(0).Infof("executing: %s %v", cmd, args)
+ log.Infof("executing: %s %v", cmd, args)
if err := c.Do(args, commandEnv, os.Stdout); err != nil {
- glog.V(0).Infof("error: %v", err)
+ log.Infof("error: %v", err)
}
}
}
@@ -266,15 +266,15 @@ func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer
var seq sequence.Sequencer
v := util.GetViper()
seqType := strings.ToLower(v.GetString(SequencerType))
- glog.V(1).Infof("[%s] : [%s]", SequencerType, seqType)
+ log.Debugf("[%s] : [%s]", SequencerType, seqType)
switch strings.ToLower(seqType) {
case "etcd":
var err error
urls := v.GetString(SequencerEtcdUrls)
- glog.V(0).Infof("[%s] : [%s]", SequencerEtcdUrls, urls)
+ log.Infof("[%s] : [%s]", SequencerEtcdUrls, urls)
seq, err = sequence.NewEtcdSequencer(urls, option.MetaFolder)
if err != nil {
- glog.Error(err)
+ log.Error(err)
seq = nil
}
default:
diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go
index 34235384f..8af3f9c3c 100644
--- a/weed/server/master_server_handlers_admin.go
+++ b/weed/server/master_server_handlers_admin.go
@@ -7,7 +7,7 @@ import (
"net/http"
"strconv"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map"
@@ -56,7 +56,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque
var err error
gcThreshold, err = strconv.ParseFloat(gcString, 32)
if err != nil {
- glog.V(0).Infof("garbageThreshold %s is not a valid float number: %v", gcString, err)
+ log.Infof("garbageThreshold %s is not a valid float number: %v", gcString, err)
writeJsonError(w, r, http.StatusNotAcceptable, fmt.Errorf("garbageThreshold %s is not a valid float number", gcString))
return
}
diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go
index 85841e409..8b26d96f5 100644
--- a/weed/server/raft_server.go
+++ b/weed/server/raft_server.go
@@ -14,7 +14,7 @@ import (
"github.com/chrislusf/raft"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/topology"
)
@@ -36,7 +36,7 @@ func (s StateMachine) Save() ([]byte, error) {
state := topology.MaxVolumeIdCommand{
MaxVolumeId: s.topo.GetMaxVolumeId(),
}
- glog.V(1).Infof("Save raft state %+v", state)
+ log.Debugf("Save raft state %+v", state)
return json.Marshal(state)
}
@@ -46,7 +46,7 @@ func (s StateMachine) Recovery(data []byte) error {
if err != nil {
return err
}
- glog.V(1).Infof("Recovery raft state %+v", state)
+ log.Debugf("Recovery raft state %+v", state)
s.topo.UpAdjustMaxVolumeId(state.MaxVolumeId)
return nil
}
@@ -59,7 +59,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d
topo: topo,
}
- if glog.V(4) {
+ if log.IsTrace() {
raft.SetLogLevel(2)
}
@@ -67,7 +67,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d
var err error
transporter := raft.NewGrpcTransporter(grpcDialOption)
- glog.V(0).Infof("Starting RaftServer with %v", serverAddr)
+ log.Infof("Starting RaftServer with %v", serverAddr)
if !raftResumeState {
// always clear previous metadata
@@ -82,7 +82,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d
stateMachine := StateMachine{topo: topo}
s.raftServer, err = raft.NewServer(s.serverAddr, s.dataDir, transporter, stateMachine, topo, "")
if err != nil {
- glog.V(0).Infoln(err)
+ log.Infoln(err)
return nil, err
}
s.raftServer.SetHeartbeatInterval(time.Duration(300+rand.Intn(150)) * time.Millisecond)
@@ -111,10 +111,10 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d
}
if exists {
if err := s.raftServer.RemovePeer(existsPeerName); err != nil {
- glog.V(0).Infoln(err)
+ log.Infoln(err)
return nil, err
} else {
- glog.V(0).Infof("removing old peer %s", existingPeer)
+ log.Infof("removing old peer %s", existingPeer)
}
}
}
@@ -126,7 +126,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d
// s.DoJoinCommand()
}
- glog.V(0).Infof("current cluster leader: %v", s.raftServer.Leader())
+ log.Infof("current cluster leader: %v", s.raftServer.Leader())
return s, nil
}
@@ -151,13 +151,13 @@ func isTheFirstOne(self string, peers []string) bool {
func (s *RaftServer) DoJoinCommand() {
- glog.V(0).Infoln("Initializing new cluster")
+ log.Infoln("Initializing new cluster")
if _, err := s.raftServer.Do(&raft.DefaultJoinCommand{
Name: s.raftServer.Name(),
ConnectionString: pb.ServerToGrpcAddress(s.serverAddr),
}); err != nil {
- glog.Errorf("fail to send join command: %v", err)
+ log.Errorf("fail to send join command: %v", err)
}
}
diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go
index 9296c63e9..77065c8bc 100644
--- a/weed/server/volume_grpc_admin.go
+++ b/weed/server/volume_grpc_admin.go
@@ -5,7 +5,7 @@ import (
"fmt"
"path/filepath"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -20,9 +20,9 @@ func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server
err := vs.store.DeleteCollection(req.Collection)
if err != nil {
- glog.Errorf("delete collection %s: %v", req.Collection, err)
+ log.Errorf("delete collection %s: %v", req.Collection, err)
} else {
- glog.V(2).Infof("delete collection %v", req)
+ log.Debugf("delete collection %v", req)
}
return resp, err
@@ -44,9 +44,9 @@ func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_p
)
if err != nil {
- glog.Errorf("assign volume %v: %v", req, err)
+ log.Errorf("assign volume %v: %v", req, err)
} else {
- glog.V(2).Infof("assign volume %v", req)
+ log.Debugf("assign volume %v", req)
}
return resp, err
@@ -60,9 +60,9 @@ func (vs *VolumeServer) VolumeMount(ctx context.Context, req *volume_server_pb.V
err := vs.store.MountVolume(needle.VolumeId(req.VolumeId))
if err != nil {
- glog.Errorf("volume mount %v: %v", req, err)
+ log.Errorf("volume mount %v: %v", req, err)
} else {
- glog.V(2).Infof("volume mount %v", req)
+ log.Debugf("volume mount %v", req)
}
return resp, err
@@ -76,9 +76,9 @@ func (vs *VolumeServer) VolumeUnmount(ctx context.Context, req *volume_server_pb
err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId))
if err != nil {
- glog.Errorf("volume unmount %v: %v", req, err)
+ log.Errorf("volume unmount %v: %v", req, err)
} else {
- glog.V(2).Infof("volume unmount %v", req)
+ log.Debugf("volume unmount %v", req)
}
return resp, err
@@ -92,9 +92,9 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb.
err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
if err != nil {
- glog.Errorf("volume delete %v: %v", req, err)
+ log.Errorf("volume delete %v: %v", req, err)
} else {
- glog.V(2).Infof("volume delete %v", req)
+ log.Debugf("volume delete %v", req)
}
return resp, err
@@ -113,21 +113,21 @@ func (vs *VolumeServer) VolumeConfigure(ctx context.Context, req *volume_server_
// unmount
if err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)); err != nil {
- glog.Errorf("volume configure unmount %v: %v", req, err)
+ log.Errorf("volume configure unmount %v: %v", req, err)
resp.Error = fmt.Sprintf("volume configure unmount %v: %v", req, err)
return resp, nil
}
// modify the volume info file
if err := vs.store.ConfigureVolume(needle.VolumeId(req.VolumeId), req.Replication); err != nil {
- glog.Errorf("volume configure %v: %v", req, err)
+ log.Errorf("volume configure %v: %v", req, err)
resp.Error = fmt.Sprintf("volume configure %v: %v", req, err)
return resp, nil
}
// mount
if err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)); err != nil {
- glog.Errorf("volume configure mount %v: %v", req, err)
+ log.Errorf("volume configure mount %v: %v", req, err)
resp.Error = fmt.Sprintf("volume configure mount %v: %v", req, err)
return resp, nil
}
@@ -143,9 +143,9 @@ func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_serv
err := vs.store.MarkVolumeReadonly(needle.VolumeId(req.VolumeId))
if err != nil {
- glog.Errorf("volume mark readonly %v: %v", req, err)
+ log.Errorf("volume mark readonly %v: %v", req, err)
} else {
- glog.V(2).Infof("volume mark readonly %v", req)
+ log.Debugf("volume mark readonly %v", req)
}
return resp, err
@@ -158,9 +158,9 @@ func (vs *VolumeServer) VolumeMarkWritable(ctx context.Context, req *volume_serv
err := vs.store.MarkVolumeWritable(needle.VolumeId(req.VolumeId))
if err != nil {
- glog.Errorf("volume mark writable %v: %v", req, err)
+ log.Errorf("volume mark writable %v: %v", req, err)
} else {
- glog.V(2).Infof("volume mark writable %v", req)
+ log.Debugf("volume mark writable %v", req)
}
return resp, err
diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go
index 2f594fa2b..d0a07c3b4 100644
--- a/weed/server/volume_grpc_client_to_master.go
+++ b/weed/server/volume_grpc_client_to_master.go
@@ -14,7 +14,7 @@ import (
"golang.org/x/net/context"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -39,7 +39,7 @@ func (vs *VolumeServer) checkWithMaster() (err error) {
if err == nil {
return
} else {
- glog.V(0).Infof("checkWithMaster %s: %v", master, err)
+ log.Infof("checkWithMaster %s: %v", master, err)
}
}
time.Sleep(1790 * time.Millisecond)
@@ -49,7 +49,7 @@ func (vs *VolumeServer) checkWithMaster() (err error) {
func (vs *VolumeServer) heartbeat() {
- glog.V(0).Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes)
+ log.Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes)
vs.store.SetDataCenter(vs.dataCenter)
vs.store.SetRack(vs.rack)
@@ -67,13 +67,13 @@ func (vs *VolumeServer) heartbeat() {
}
masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(master)
if parseErr != nil {
- glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr)
+ log.Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr)
continue
}
vs.store.MasterAddress = master
newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
if err != nil {
- glog.V(0).Infof("heartbeat error: %v", err)
+ log.Infof("heartbeat error: %v", err)
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
newLeader = ""
vs.store.MasterAddress = ""
@@ -108,10 +108,10 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
client := master_pb.NewSeaweedClient(grpcConection)
stream, err := client.SendHeartbeat(ctx)
if err != nil {
- glog.V(0).Infof("SendHeartbeat to %s: %v", masterNode, err)
+ log.Infof("SendHeartbeat to %s: %v", masterNode, err)
return "", err
}
- glog.V(0).Infof("Heartbeat to: %v", masterNode)
+ log.Infof("Heartbeat to: %v", masterNode)
vs.currentMaster = masterNode
doneChan := make(chan error, 1)
@@ -127,12 +127,12 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
vs.store.SetVolumeSizeLimit(in.GetVolumeSizeLimit())
if vs.store.MaybeAdjustVolumeMax() {
if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
- glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", vs.currentMaster, err)
+ log.Infof("Volume Server Failed to talk with master %s: %v", vs.currentMaster, err)
}
}
}
if in.GetLeader() != "" && vs.currentMaster != in.GetLeader() {
- glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), vs.currentMaster)
+ log.Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), vs.currentMaster)
newLeader = in.GetLeader()
doneChan <- nil
return
@@ -141,12 +141,12 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
}()
if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
- glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
+ log.Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return "", err
}
if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil {
- glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
+ log.Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return "", err
}
@@ -161,9 +161,9 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
&volumeMessage,
},
}
- glog.V(1).Infof("volume server %s:%d adds volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id)
+ log.Debugf("volume server %s:%d adds volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id)
if err = stream.Send(deltaBeat); err != nil {
- glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
+ log.Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
case ecShardMessage := <-vs.store.NewEcShardsChan:
@@ -172,10 +172,10 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
&ecShardMessage,
},
}
- glog.V(1).Infof("volume server %s:%d adds ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id,
+ log.Debugf("volume server %s:%d adds ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id,
erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds())
if err = stream.Send(deltaBeat); err != nil {
- glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
+ log.Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
case volumeMessage := <-vs.store.DeletedVolumesChan:
@@ -184,9 +184,9 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
&volumeMessage,
},
}
- glog.V(1).Infof("volume server %s:%d deletes volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id)
+ log.Debugf("volume server %s:%d deletes volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id)
if err = stream.Send(deltaBeat); err != nil {
- glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
+ log.Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
case ecShardMessage := <-vs.store.DeletedEcShardsChan:
@@ -195,23 +195,23 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
&ecShardMessage,
},
}
- glog.V(1).Infof("volume server %s:%d deletes ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id,
+ log.Debugf("volume server %s:%d deletes ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id,
erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds())
if err = stream.Send(deltaBeat); err != nil {
- glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
+ log.Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
case <-volumeTickChan:
- glog.V(4).Infof("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port)
+ log.Tracef("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port)
vs.store.MaybeAdjustVolumeMax()
if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
- glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
+ log.Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return "", err
}
case <-ecShardTickChan:
- glog.V(4).Infof("volume server %s:%d ec heartbeat", vs.store.Ip, vs.store.Port)
+ log.Tracef("volume server %s:%d ec heartbeat", vs.store.Ip, vs.store.Port)
if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil {
- glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
+ log.Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return "", err
}
case err = <-doneChan:
@@ -229,9 +229,9 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
Volumes: volumeMessages,
HasNoVolumes: len(volumeMessages) == 0,
}
- glog.V(1).Infof("volume server %s:%d stops and deletes all volumes", vs.store.Ip, vs.store.Port)
+ log.Debugf("volume server %s:%d stops and deletes all volumes", vs.store.Ip, vs.store.Port)
if err = stream.Send(emptyBeat); err != nil {
- glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
+ log.Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
return
diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go
index 2aecb140f..b5b9a4aca 100644
--- a/weed/server/volume_grpc_copy.go
+++ b/weed/server/volume_grpc_copy.go
@@ -9,7 +9,7 @@ import (
"os"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage"
@@ -26,14 +26,14 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
if v != nil {
- glog.V(0).Infof("volume %d already exists. deleted before copying...", req.VolumeId)
+ log.Infof("volume %d already exists. deleted before copying...", req.VolumeId)
err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
if err != nil {
return nil, fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err)
}
- glog.V(0).Infof("deleted existing volume %d before copying.", req.VolumeId)
+ log.Infof("deleted existing volume %d before copying.", req.VolumeId)
}
location := vs.store.FindFreeLocation()
@@ -164,7 +164,7 @@ func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse
}
func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string, wt *util.WriteThrottler, isAppend bool) error {
- glog.V(4).Infof("writing to %s", fileName)
+ log.Tracef("writing to %s", fileName)
flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
if isAppend {
flags = os.O_WRONLY | os.O_CREATE
diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go
index 55e0261c8..e19c48745 100644
--- a/weed/server/volume_grpc_erasure_coding.go
+++ b/weed/server/volume_grpc_erasure_coding.go
@@ -11,7 +11,7 @@ import (
"path/filepath"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
@@ -38,7 +38,7 @@ Steps to apply erasure coding to .dat .idx files
// VolumeEcShardsGenerate generates the .ecx and .ec00 ~ .ec13 files
func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) {
- glog.V(0).Infof("VolumeEcShardsGenerate: %v", req)
+ log.Infof("VolumeEcShardsGenerate: %v", req)
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
if v == nil {
@@ -71,7 +71,7 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_
// VolumeEcShardsRebuild generates the any of the missing .ec00 ~ .ec13 files
func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_server_pb.VolumeEcShardsRebuildRequest) (*volume_server_pb.VolumeEcShardsRebuildResponse, error) {
- glog.V(0).Infof("VolumeEcShardsRebuild: %v", req)
+ log.Infof("VolumeEcShardsRebuild: %v", req)
baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
@@ -103,7 +103,7 @@ func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_s
// VolumeEcShardsCopy copy the .ecx and some ec data slices
func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_server_pb.VolumeEcShardsCopyRequest) (*volume_server_pb.VolumeEcShardsCopyResponse, error) {
- glog.V(0).Infof("VolumeEcShardsCopy: %v", req)
+ log.Infof("VolumeEcShardsCopy: %v", req)
location := vs.store.FindFreeLocation()
if location == nil {
@@ -159,7 +159,7 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se
baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
- glog.V(0).Infof("ec volume %d shard delete %v", req.VolumeId, req.ShardIds)
+ log.Infof("ec volume %d shard delete %v", req.VolumeId, req.ShardIds)
found := false
for _, location := range vs.store.Locations {
@@ -219,15 +219,15 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se
func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_server_pb.VolumeEcShardsMountRequest) (*volume_server_pb.VolumeEcShardsMountResponse, error) {
- glog.V(0).Infof("VolumeEcShardsMount: %v", req)
+ log.Infof("VolumeEcShardsMount: %v", req)
for _, shardId := range req.ShardIds {
err := vs.store.MountEcShards(req.Collection, needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
if err != nil {
- glog.Errorf("ec shard mount %v: %v", req, err)
+ log.Errorf("ec shard mount %v: %v", req, err)
} else {
- glog.V(2).Infof("ec shard mount %v", req)
+ log.Debugf("ec shard mount %v", req)
}
if err != nil {
@@ -240,15 +240,15 @@ func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_ser
func (vs *VolumeServer) VolumeEcShardsUnmount(ctx context.Context, req *volume_server_pb.VolumeEcShardsUnmountRequest) (*volume_server_pb.VolumeEcShardsUnmountResponse, error) {
- glog.V(0).Infof("VolumeEcShardsUnmount: %v", req)
+ log.Infof("VolumeEcShardsUnmount: %v", req)
for _, shardId := range req.ShardIds {
err := vs.store.UnmountEcShards(needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
if err != nil {
- glog.Errorf("ec shard unmount %v: %v", req, err)
+ log.Errorf("ec shard unmount %v: %v", req, err)
} else {
- glog.V(2).Infof("ec shard unmount %v", req)
+ log.Debugf("ec shard unmount %v", req)
}
if err != nil {
@@ -329,7 +329,7 @@ func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardRea
func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_server_pb.VolumeEcBlobDeleteRequest) (*volume_server_pb.VolumeEcBlobDeleteResponse, error) {
- glog.V(0).Infof("VolumeEcBlobDelete: %v", req)
+ log.Infof("VolumeEcBlobDelete: %v", req)
resp := &volume_server_pb.VolumeEcBlobDeleteResponse{}
@@ -359,7 +359,7 @@ func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_serv
// VolumeEcShardsToVolume generates the .idx, .dat files from .ecx, .ecj and .ec01 ~ .ec14 files
func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_server_pb.VolumeEcShardsToVolumeRequest) (*volume_server_pb.VolumeEcShardsToVolumeResponse, error) {
- glog.V(0).Infof("VolumeEcShardsToVolume: %v", req)
+ log.Infof("VolumeEcShardsToVolume: %v", req)
v, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId))
if !found {
diff --git a/weed/server/volume_grpc_query.go b/weed/server/volume_grpc_query.go
index 2f4fab96a..21bfcdefb 100644
--- a/weed/server/volume_grpc_query.go
+++ b/weed/server/volume_grpc_query.go
@@ -1,7 +1,7 @@
package weed_server
import (
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/query/json"
@@ -15,7 +15,7 @@ func (vs *VolumeServer) Query(req *volume_server_pb.QueryRequest, stream volume_
vid, id_cookie, err := operation.ParseFileId(fid)
if err != nil {
- glog.V(0).Infof("volume query failed to parse fid %s: %v", fid, err)
+ log.Infof("volume query failed to parse fid %s: %v", fid, err)
return err
}
@@ -25,12 +25,12 @@ func (vs *VolumeServer) Query(req *volume_server_pb.QueryRequest, stream volume_
cookie := n.Cookie
if _, err := vs.store.ReadVolumeNeedle(volumeId, n, nil); err != nil {
- glog.V(0).Infof("volume query failed to read fid %s: %v", fid, err)
+ log.Infof("volume query failed to read fid %s: %v", fid, err)
return err
}
if n.Cookie != cookie {
- glog.V(0).Infof("volume query failed to read fid cookie %s: %v", fid, err)
+ log.Infof("volume query failed to read fid cookie %s: %v", fid, err)
return err
}
diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go
index 2dde5b69c..9d1c0c35a 100644
--- a/weed/server/volume_grpc_tail.go
+++ b/weed/server/volume_grpc_tail.go
@@ -5,7 +5,7 @@ import (
"fmt"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage"
@@ -20,7 +20,7 @@ func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderR
return fmt.Errorf("not found volume id %d", req.VolumeId)
}
- defer glog.V(1).Infof("tailing volume %d finished", v.Id)
+ defer log.Debugf("tailing volume %d finished", v.Id)
lastTimestampNs := req.SinceNs
drainingSeconds := req.IdleTimeoutSeconds
@@ -28,7 +28,7 @@ func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderR
for {
lastProcessedTimestampNs, err := sendNeedlesSince(stream, v, lastTimestampNs)
if err != nil {
- glog.Infof("sendNeedlesSince: %v", err)
+ log.Infof("sendNeedlesSince: %v", err)
return fmt.Errorf("streamFollow: %v", err)
}
time.Sleep(2 * time.Second)
@@ -42,11 +42,11 @@ func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderR
if drainingSeconds <= 0 {
return nil
}
- glog.V(1).Infof("tailing volume %d drains requests with %d seconds remaining", v.Id, drainingSeconds)
+ log.Debugf("tailing volume %d drains requests with %d seconds remaining", v.Id, drainingSeconds)
} else {
lastTimestampNs = lastProcessedTimestampNs
drainingSeconds = req.IdleTimeoutSeconds
- glog.V(1).Infof("tailing volume %d resets draining wait time to %d seconds", v.Id, drainingSeconds)
+ log.Debugf("tailing volume %d resets draining wait time to %d seconds", v.Id, drainingSeconds)
}
}
@@ -87,7 +87,7 @@ func (vs *VolumeServer) VolumeTailReceiver(ctx context.Context, req *volume_serv
return resp, fmt.Errorf("receiver not found volume id %d", req.VolumeId)
}
- defer glog.V(1).Infof("receive tailing volume %d finished", v.Id)
+ defer log.Debugf("receive tailing volume %d finished", v.Id)
return resp, operation.TailVolumeFromSource(req.SourceVolumeServer, vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error {
_, err := vs.store.WriteVolumeNeedle(v.Id, n, false)
diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go
index b87de4b5b..1c3f9b349 100644
--- a/weed/server/volume_grpc_vacuum.go
+++ b/weed/server/volume_grpc_vacuum.go
@@ -3,7 +3,7 @@ package weed_server
import (
"context"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
@@ -17,7 +17,7 @@ func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_serve
resp.GarbageRatio = garbageRatio
if err != nil {
- glog.V(3).Infof("check volume %d: %v", req.VolumeId, err)
+ log.Tracef("check volume %d: %v", req.VolumeId, err)
}
return resp, err
@@ -31,9 +31,9 @@ func (vs *VolumeServer) VacuumVolumeCompact(ctx context.Context, req *volume_ser
err := vs.store.CompactVolume(needle.VolumeId(req.VolumeId), req.Preallocate, vs.compactionBytePerSecond)
if err != nil {
- glog.Errorf("compact volume %d: %v", req.VolumeId, err)
+ log.Errorf("compact volume %d: %v", req.VolumeId, err)
} else {
- glog.V(1).Infof("compact volume %d", req.VolumeId)
+ log.Debugf("compact volume %d", req.VolumeId)
}
return resp, err
@@ -47,9 +47,9 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv
err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId))
if err != nil {
- glog.Errorf("commit volume %d: %v", req.VolumeId, err)
+ log.Errorf("commit volume %d: %v", req.VolumeId, err)
} else {
- glog.V(1).Infof("commit volume %d", req.VolumeId)
+ log.Debugf("commit volume %d", req.VolumeId)
}
if err == nil {
if vs.store.GetVolume(needle.VolumeId(req.VolumeId)).IsReadOnly() {
@@ -68,9 +68,9 @@ func (vs *VolumeServer) VacuumVolumeCleanup(ctx context.Context, req *volume_ser
err := vs.store.CommitCleanupVolume(needle.VolumeId(req.VolumeId))
if err != nil {
- glog.Errorf("cleanup volume %d: %v", req.VolumeId, err)
+ log.Errorf("cleanup volume %d: %v", req.VolumeId, err)
} else {
- glog.V(1).Infof("cleanup volume %d", req.VolumeId)
+ log.Debugf("cleanup volume %d", req.VolumeId)
}
return resp, err
diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go
index 83df32fdd..1ab468712 100644
--- a/weed/server/volume_server.go
+++ b/weed/server/volume_server.go
@@ -9,7 +9,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage"
)
@@ -103,7 +103,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
}
func (vs *VolumeServer) Shutdown() {
- glog.V(0).Infoln("Shutting down volume server...")
+ log.Infoln("Shutting down volume server...")
vs.store.Close()
- glog.V(0).Infoln("Shut down successfully!")
+ log.Infoln("Shut down successfully!")
}
diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go
index 7852c950a..69a8ae87b 100644
--- a/weed/server/volume_server_handlers.go
+++ b/weed/server/volume_server_handlers.go
@@ -6,7 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/stats"
)
@@ -89,17 +89,17 @@ func (vs *VolumeServer) maybeCheckJwtAuthorization(r *http.Request, vid, fid str
tokenStr := security.GetJwt(r)
if tokenStr == "" {
- glog.V(1).Infof("missing jwt from %s", r.RemoteAddr)
+ log.Debugf("missing jwt from %s", r.RemoteAddr)
return false
}
token, err := security.DecodeJwt(signingKey, tokenStr)
if err != nil {
- glog.V(1).Infof("jwt verification error from %s: %v", r.RemoteAddr, err)
+ log.Debugf("jwt verification error from %s: %v", r.RemoteAddr, err)
return false
}
if !token.Valid {
- glog.V(1).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr)
+ log.Debugf("jwt invalid from %s: %v", r.RemoteAddr, tokenStr)
return false
}
@@ -109,6 +109,6 @@ func (vs *VolumeServer) maybeCheckJwtAuthorization(r *http.Request, vid, fid str
}
return sc.Fid == vid+","+fid
}
- glog.V(1).Infof("unexpected jwt from %s: %v", r.RemoteAddr, tokenStr)
+ log.Debugf("unexpected jwt from %s: %v", r.RemoteAddr, tokenStr)
return false
}
diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go
index 15fd446e7..18c0023a6 100644
--- a/weed/server/volume_server_handlers_read.go
+++ b/weed/server/volume_server_handlers_read.go
@@ -14,7 +14,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/images"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/stats"
@@ -43,28 +43,28 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
volumeId, err := needle.NewVolumeId(vid)
if err != nil {
- glog.V(2).Infof("parsing vid %s: %v", r.URL.Path, err)
+ log.Debugf("parsing vid %s: %v", r.URL.Path, err)
w.WriteHeader(http.StatusBadRequest)
return
}
err = n.ParsePath(fid)
if err != nil {
- glog.V(2).Infof("parsing fid %s: %v", r.URL.Path, err)
+ log.Debugf("parsing fid %s: %v", r.URL.Path, err)
w.WriteHeader(http.StatusBadRequest)
return
}
- // glog.V(4).Infoln("volume", volumeId, "reading", n)
+ // log.Trace("volume", volumeId, "reading", n)
hasVolume := vs.store.HasVolume(volumeId)
_, hasEcVolume := vs.store.FindEcVolume(volumeId)
if !hasVolume && !hasEcVolume {
if !vs.ReadRedirect {
- glog.V(2).Infoln("volume is not local:", err, r.URL.Path)
+ log.Debug("volume is not local:", err, r.URL.Path)
w.WriteHeader(http.StatusNotFound)
return
}
lookupResult, err := operation.Lookup(vs.GetMaster(), volumeId.String())
- glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err)
+ log.Debug("volume", volumeId, "found on", lookupResult, "error", err)
if err == nil && len(lookupResult.Locations) > 0 {
u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl))
u.Path = fmt.Sprintf("%s/%s,%s", u.Path, vid, fid)
@@ -76,7 +76,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
http.Redirect(w, r, u.String(), http.StatusMovedPermanently)
} else {
- glog.V(2).Infoln("lookup error:", err, r.URL.Path)
+ log.Debug("lookup error:", err, r.URL.Path)
w.WriteHeader(http.StatusNotFound)
}
return
@@ -94,17 +94,17 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
count, err = vs.store.ReadEcShardNeedle(volumeId, n)
}
if err != nil && err != storage.ErrorDeleted && r.FormValue("type") != "replicate" && hasVolume {
- glog.V(4).Infof("read needle: %v", err)
+ log.Tracef("read needle: %v", err)
// start to fix it from other replicas, if not deleted and hasVolume and is not a replicated request
}
- // glog.V(4).Infoln("read bytes", count, "error", err)
+ // log.Trace("read bytes", count, "error", err)
if err != nil || count < 0 {
- glog.V(3).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err)
+ log.Tracef("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err)
w.WriteHeader(http.StatusNotFound)
return
}
if n.Cookie != cookie {
- glog.V(0).Infof("request %s with cookie:%x expected:%x from %s agent %s", r.URL.Path, cookie, n.Cookie, r.RemoteAddr, r.UserAgent())
+ log.Infof("request %s with cookie:%x expected:%x from %s agent %s", r.URL.Path, cookie, n.Cookie, r.RemoteAddr, r.UserAgent())
w.WriteHeader(http.StatusNotFound)
return
}
@@ -129,7 +129,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
pairMap := make(map[string]string)
err = json.Unmarshal(n.Pairs, &pairMap)
if err != nil {
- glog.V(0).Infoln("Unmarshal pairs error:", err)
+ log.Infoln("Unmarshal pairs error:", err)
}
for k, v := range pairMap {
w.Header().Set(k, v)
@@ -157,7 +157,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if n.IsCompressed() {
if _, _, _, shouldResize := shouldResizeImages(ext, r); shouldResize {
if n.Data, err = util.DecompressData(n.Data); err != nil {
- glog.V(0).Infoln("ungzip error:", err, r.URL.Path)
+ log.Infoln("ungzip error:", err, r.URL.Path)
}
} else if strings.Contains(r.Header.Get("Accept-Encoding"), "zstd") && util.IsZstdContent(n.Data) {
w.Header().Set("Content-Encoding", "zstd")
@@ -165,7 +165,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
w.Header().Set("Content-Encoding", "gzip")
} else {
if n.Data, err = util.DecompressData(n.Data); err != nil {
- glog.V(0).Infoln("uncompress error:", err, r.URL.Path)
+ log.Infoln("uncompress error:", err, r.URL.Path)
}
}
}
@@ -173,7 +173,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
rs := conditionallyResizeImages(bytes.NewReader(n.Data), ext, r)
if e := writeResponseContent(filename, mtype, rs, w, r); e != nil {
- glog.V(2).Infoln("response write error:", e)
+ log.Debug("response write error:", e)
}
}
@@ -184,7 +184,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string,
chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsCompressed())
if e != nil {
- glog.V(0).Infof("load chunked manifest (%s) error: %v", r.URL.Path, e)
+ log.Infof("load chunked manifest (%s) error: %v", r.URL.Path, e)
return false
}
if fileName == "" && chunkManifest.Name != "" {
@@ -211,7 +211,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string,
rs := conditionallyResizeImages(chunkedFileReader, ext, r)
if e := writeResponseContent(fileName, mType, rs, w, r); e != nil {
- glog.V(2).Infoln("response write error:", e)
+ log.Debug("response write error:", e)
}
return true
}
diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go
index 01a77b901..a09aa0eb9 100644
--- a/weed/server/volume_server_handlers_write.go
+++ b/weed/server/volume_server_handlers_write.go
@@ -8,7 +8,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -25,7 +25,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
}()
if e := r.ParseForm(); e != nil {
- glog.V(0).Infoln("form parse error:", e)
+ log.Infoln("form parse error:", e)
writeJsonError(w, r, http.StatusBadRequest, e)
return
}
@@ -33,7 +33,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
vid, fid, _, _, _ := parseURLPath(r.URL.Path)
volumeId, ve := needle.NewVolumeId(vid)
if ve != nil {
- glog.V(0).Infoln("NewVolumeId error:", ve)
+ log.Infoln("NewVolumeId error:", ve)
writeJsonError(w, r, http.StatusBadRequest, ve)
return
}
@@ -93,7 +93,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
return
}
- // glog.V(2).Infof("volume %s deleting %s", vid, n)
+ // log.Debugf("volume %s deleting %s", vid, n)
cookie := n.Cookie
@@ -114,7 +114,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
}
if n.Cookie != cookie {
- glog.V(0).Infoln("delete", r.URL.Path, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
+ log.Infoln("delete", r.URL.Path, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
writeJsonError(w, r, http.StatusBadRequest, errors.New("File Random Cookie does not match."))
return
}
diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go
index 3e9f882e3..054c22818 100644
--- a/weed/server/webdav_server.go
+++ b/weed/server/webdav_server.go
@@ -20,7 +20,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/security"
)
@@ -136,7 +136,7 @@ func clearName(name string) (string, error) {
func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm os.FileMode) error {
- glog.V(2).Infof("WebDavFileSystem.Mkdir %v", fullDirPath)
+ log.Debugf("WebDavFileSystem.Mkdir %v", fullDirPath)
if !strings.HasSuffix(fullDirPath, "/") {
fullDirPath += "/"
@@ -170,7 +170,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm
Signatures: []int32{fs.signature},
}
- glog.V(1).Infof("mkdir: %v", request)
+ log.Debugf("mkdir: %v", request)
if err := filer_pb.CreateEntry(client, request); err != nil {
return fmt.Errorf("mkdir %s/%s: %v", dir, name, err)
}
@@ -181,7 +181,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm
func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, flag int, perm os.FileMode) (webdav.File, error) {
- glog.V(2).Infof("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag)
+ log.Debugf("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag)
var err error
if fullFilePath, err = clearName(fullFilePath); err != nil {
@@ -265,14 +265,14 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string)
func (fs *WebDavFileSystem) RemoveAll(ctx context.Context, name string) error {
- glog.V(2).Infof("WebDavFileSystem.RemoveAll %v", name)
+ log.Debugf("WebDavFileSystem.RemoveAll %v", name)
return fs.removeAll(ctx, name)
}
func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) error {
- glog.V(2).Infof("WebDavFileSystem.Rename %v to %v", oldName, newName)
+ log.Debugf("WebDavFileSystem.Rename %v to %v", oldName, newName)
var err error
if oldName, err = clearName(oldName); err != nil {
@@ -353,14 +353,14 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F
func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, error) {
- glog.V(2).Infof("WebDavFileSystem.Stat %v", name)
+ log.Debugf("WebDavFileSystem.Stat %v", name)
return fs.stat(ctx, name)
}
func (f *WebDavFile) Write(buf []byte) (int, error) {
- glog.V(2).Infof("WebDavFileSystem.Write %v", f.name)
+ log.Debugf("WebDavFileSystem.Write %v", f.name)
dir, _ := util.FullPath(f.name).DirAndName()
@@ -392,7 +392,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
resp, err := client.AssignVolume(ctx, request)
if err != nil {
- glog.V(0).Infof("assign volume failure %v: %v", request, err)
+ log.Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
@@ -410,11 +410,11 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
uploadResult, err := operation.UploadData(fileUrl, f.name, f.fs.option.Cipher, buf, false, "", nil, auth)
if err != nil {
- glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err)
+ log.Infof("upload data %v to %s: %v", f.name, fileUrl, err)
return 0, fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
- glog.V(0).Infof("upload failure %v to %s: %v", f.name, fileUrl, err)
+ log.Infof("upload failure %v to %s: %v", f.name, fileUrl, err)
return 0, fmt.Errorf("upload result: %v", uploadResult.Error)
}
@@ -439,7 +439,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
})
if err == nil {
- glog.V(3).Infof("WebDavFileSystem.Write %v: written [%d,%d)", f.name, f.off, f.off+int64(len(buf)))
+ log.Tracef("WebDavFileSystem.Write %v: written [%d,%d)", f.name, f.off, f.off+int64(len(buf)))
f.off += int64(len(buf))
}
@@ -448,7 +448,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
func (f *WebDavFile) Close() error {
- glog.V(2).Infof("WebDavFileSystem.Close %v", f.name)
+ log.Debugf("WebDavFileSystem.Close %v", f.name)
if f.entry != nil {
f.entry = nil
@@ -460,7 +460,7 @@ func (f *WebDavFile) Close() error {
func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
- glog.V(2).Infof("WebDavFileSystem.Read %v", f.name)
+ log.Debugf("WebDavFileSystem.Read %v", f.name)
if f.entry == nil {
f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name))
@@ -486,11 +486,11 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
readSize, err = f.reader.ReadAt(p, f.off)
- glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize))
+ log.Tracef("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize))
f.off += int64(readSize)
if err != nil && err != io.EOF {
- glog.Errorf("file read %s: %v", f.name, err)
+ log.Errorf("file read %s: %v", f.name, err)
}
return
@@ -499,7 +499,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) {
- glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count)
+ log.Debugf("WebDavFileSystem.Readdir %v count %d", f.name, count)
dir, _ := util.FullPath(f.name).DirAndName()
@@ -515,7 +515,7 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) {
if !strings.HasSuffix(fi.name, "/") && fi.IsDir() {
fi.name += "/"
}
- glog.V(4).Infof("entry: %v", fi.name)
+ log.Tracef("entry: %v", fi.name)
ret = append(ret, &fi)
return nil
})
@@ -542,7 +542,7 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) {
func (f *WebDavFile) Seek(offset int64, whence int) (int64, error) {
- glog.V(2).Infof("WebDavFile.Seek %v %v %v", f.name, offset, whence)
+ log.Debugf("WebDavFile.Seek %v %v %v", f.name, offset, whence)
ctx := context.Background()
@@ -563,7 +563,7 @@ func (f *WebDavFile) Seek(offset int64, whence int) (int64, error) {
func (f *WebDavFile) Stat() (os.FileInfo, error) {
- glog.V(2).Infof("WebDavFile.Stat %v", f.name)
+ log.Debugf("WebDavFile.Stat %v", f.name)
ctx := context.Background()
diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go
index a808335eb..42e2bdf54 100644
--- a/weed/shell/command_ec_common.go
+++ b/weed/shell/command_ec_common.go
@@ -6,7 +6,7 @@ import (
"math"
"sort"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
@@ -87,7 +87,7 @@ func oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption,
if targetServer.info.Id != existingLocation {
copiedShardIds = shardIdsToCopy
- glog.V(0).Infof("%s ec volume %d deletes shards %+v", existingLocation, volumeId, copiedShardIds)
+ log.Infof("%s ec volume %d deletes shards %+v", existingLocation, volumeId, copiedShardIds)
}
return nil
diff --git a/weed/stats/disk.go b/weed/stats/disk.go
index a8f906213..4c0512f80 100644
--- a/weed/stats/disk.go
+++ b/weed/stats/disk.go
@@ -1,7 +1,7 @@
package stats
import (
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
)
@@ -9,7 +9,7 @@ func NewDiskStatus(path string) (disk *volume_server_pb.DiskStatus) {
disk = &volume_server_pb.DiskStatus{Dir: path}
fillInDiskStatus(disk)
if disk.PercentUsed > 95 {
- glog.V(0).Infof("disk status: %v", disk)
+ log.Infof("disk status: %v", disk)
}
return
}
diff --git a/weed/stats/metrics.go b/weed/stats/metrics.go
index 3f5d851a4..78aad4990 100644
--- a/weed/stats/metrics.go
+++ b/weed/stats/metrics.go
@@ -2,7 +2,6 @@ package stats
import (
"fmt"
- "log"
"net/http"
"os"
"strings"
@@ -12,7 +11,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/client_golang/prometheus/push"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
var (
@@ -152,14 +151,14 @@ func LoopPushingMetric(name, instance, addr string, intervalSeconds int) {
return
}
- glog.V(0).Infof("%s server sends metrics to %s every %d seconds", name, addr, intervalSeconds)
+ log.Infof("%s server sends metrics to %s every %d seconds", name, addr, intervalSeconds)
pusher := push.New(addr, name).Gatherer(Gather).Grouping("instance", instance)
for {
err := pusher.Push()
if err != nil && !strings.HasPrefix(err.Error(), "unexpected status code 200") {
- glog.V(0).Infof("could not push metrics to prometheus push gateway %s: %v", addr, err)
+ log.Infof("could not push metrics to prometheus push gateway %s: %v", addr, err)
}
if intervalSeconds <= 0 {
intervalSeconds = 15
diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go
index daab29621..e32cadf08 100644
--- a/weed/storage/backend/backend.go
+++ b/weed/storage/backend/backend.go
@@ -6,7 +6,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/spf13/viper"
@@ -52,7 +52,7 @@ func LoadConfiguration(config *viper.Viper) {
for backendTypeName := range config.GetStringMap(StorageBackendPrefix) {
backendStorageFactory, found := BackendStorageFactories[StorageType(backendTypeName)]
if !found {
- glog.Fatalf("backend storage type %s not found", backendTypeName)
+ log.Fatalf("backend storage type %s not found", backendTypeName)
}
for backendStorageId := range config.GetStringMap(StorageBackendPrefix + "." + backendTypeName) {
if !config.GetBool(StorageBackendPrefix + "." + backendTypeName + "." + backendStorageId + ".enabled") {
@@ -61,7 +61,7 @@ func LoadConfiguration(config *viper.Viper) {
backendStorage, buildErr := backendStorageFactory.BuildStorage(config,
StorageBackendPrefix+"."+backendTypeName+"."+backendStorageId+".", backendStorageId)
if buildErr != nil {
- glog.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId)
+ log.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId)
}
BackendStorages[backendTypeName+"."+backendStorageId] = backendStorage
if backendStorageId == "default" {
@@ -78,12 +78,12 @@ func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) {
for _, storageBackend := range storageBackends {
backendStorageFactory, found := BackendStorageFactories[StorageType(storageBackend.Type)]
if !found {
- glog.Warningf("storage type %s not found", storageBackend.Type)
+ log.Warnf("storage type %s not found", storageBackend.Type)
continue
}
backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), "", storageBackend.Id)
if buildErr != nil {
- glog.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id)
+ log.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id)
}
BackendStorages[storageBackend.Type+"."+storageBackend.Id] = backendStorage
if storageBackend.Id == "default" {
diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go
index 4706c9334..94c71fef6 100644
--- a/weed/storage/backend/s3_backend/s3_backend.go
+++ b/weed/storage/backend/s3_backend/s3_backend.go
@@ -11,7 +11,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/google/uuid"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
)
@@ -51,7 +51,7 @@ func newS3BackendStorage(configuration backend.StringProperties, configPrefix st
s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint)
- glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
+ log.Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
return
}
@@ -83,7 +83,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn
randomUuid, _ := uuid.NewRandom()
key = randomUuid.String()
- glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
+ log.Debugf("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn)
@@ -92,7 +92,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn
func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) {
- glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key)
+ log.Debugf("download dat file of %s from remote s3.%s as %s", fileName, s.id, key)
size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn)
@@ -101,7 +101,7 @@ func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(pro
func (s *S3BackendStorage) DeleteFile(key string) (err error) {
- glog.V(1).Infof("delete dat file %s from remote", key)
+ log.Debugf("delete dat file %s from remote", key)
err = deleteFromS3(s.conn, s.bucket, key)
@@ -118,7 +118,7 @@ func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n
bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1)
- // glog.V(0).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
+ // log.Infof("read %s %s", s3backendStorageFile.key, bytesRange)
getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{
Bucket: &s3backendStorageFile.backendStorage.bucket,
@@ -131,8 +131,8 @@ func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n
}
defer getObjectOutput.Body.Close()
- glog.V(4).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
- glog.V(4).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength)
+ log.Tracef("read %s %s", s3backendStorageFile.key, bytesRange)
+ log.Tracef("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength)
for {
if n, err = getObjectOutput.Body.Read(p); err == nil && n < len(p) {
diff --git a/weed/storage/backend/s3_backend/s3_download.go b/weed/storage/backend/s3_backend/s3_download.go
index dbc28446a..fd2c56939 100644
--- a/weed/storage/backend/s3_backend/s3_download.go
+++ b/weed/storage/backend/s3_backend/s3_download.go
@@ -10,7 +10,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string, sourceKey string,
@@ -50,7 +50,7 @@ func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string
return fileSize, fmt.Errorf("failed to download file %s: %v", destFileName, err)
}
- glog.V(1).Infof("downloaded file %s\n", destFileName)
+ log.Debugf("downloaded file %s\n", destFileName)
return
}
diff --git a/weed/storage/backend/s3_backend/s3_upload.go b/weed/storage/backend/s3_backend/s3_upload.go
index 500a85590..c7b23d005 100644
--- a/weed/storage/backend/s3_backend/s3_upload.go
+++ b/weed/storage/backend/s3_backend/s3_upload.go
@@ -9,7 +9,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string,
@@ -73,7 +73,7 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
if err != nil {
return 0, fmt.Errorf("failed to upload file %s: %v", filename, err)
}
- glog.V(1).Infof("file %s uploaded to %s\n", filename, result.Location)
+ log.Debugf("file %s uploaded to %s\n", filename, result.Location)
return
}
diff --git a/weed/storage/backend/volume_create.go b/weed/storage/backend/volume_create.go
index d4bd8e40f..d408581f3 100644
--- a/weed/storage/backend/volume_create.go
+++ b/weed/storage/backend/volume_create.go
@@ -5,7 +5,7 @@ package backend
import (
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
@@ -14,7 +14,7 @@ func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
return nil, e
}
if preallocate > 0 {
- glog.V(2).Infof("Preallocated disk space for %s is not supported", fileName)
+ log.Debugf("Preallocated disk space for %s is not supported", fileName)
}
return NewDiskFile(file), nil
}
diff --git a/weed/storage/backend/volume_create_linux.go b/weed/storage/backend/volume_create_linux.go
index 260c2c2a3..ecb5f6378 100644
--- a/weed/storage/backend/volume_create_linux.go
+++ b/weed/storage/backend/volume_create_linux.go
@@ -6,7 +6,7 @@ import (
"os"
"syscall"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
@@ -16,7 +16,7 @@ func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
}
if preallocate != 0 {
syscall.Fallocate(int(file.Fd()), 1, 0, preallocate)
- glog.V(1).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName)
+ log.Debugf("Preallocated %d bytes disk space for %s", preallocate, fileName)
}
return NewDiskFile(file), nil
}
diff --git a/weed/storage/backend/volume_create_windows.go b/weed/storage/backend/volume_create_windows.go
index 7d40ec0d7..1ffb2c9d8 100644
--- a/weed/storage/backend/volume_create_windows.go
+++ b/weed/storage/backend/volume_create_windows.go
@@ -6,13 +6,13 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map"
"golang.org/x/sys/windows"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map/os_overloads"
)
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
if preallocate > 0 {
- glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
+ log.Infof("Preallocated disk space for %s is not supported", fileName)
}
if memoryMapSizeMB > 0 {
diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go
index 775ebf092..41f6e9adf 100644
--- a/weed/storage/disk_location.go
+++ b/weed/storage/disk_location.go
@@ -9,7 +9,7 @@ import (
"sync"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -66,13 +66,13 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne
noteFile := l.Directory + "/" + name + ".note"
if util.FileExists(noteFile) {
note, _ := ioutil.ReadFile(noteFile)
- glog.Warningf("volume %s was not completed: %s", name, string(note))
+ log.Warnf("volume %s was not completed: %s", name, string(note))
removeVolumeFiles(l.Directory + "/" + name)
return false
}
vid, collection, err := l.volumeIdFromPath(fileInfo)
if err != nil {
- glog.Warningf("get volume id failed, %s, err : %s", name, err)
+ log.Warnf("get volume id failed, %s, err : %s", name, err)
return false
}
@@ -81,20 +81,20 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne
_, found := l.volumes[vid]
l.volumesLock.RUnlock()
if found {
- glog.V(1).Infof("loaded volume, %v", vid)
+ log.Debugf("loaded volume, %v", vid)
return true
}
v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0)
if e != nil {
- glog.V(0).Infof("new volume %s error %s", name, e)
+ log.Infof("new volume %s error %s", name, e)
return false
}
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
- glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s",
+ log.Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s",
l.Directory+"/"+name+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
return true
}
@@ -130,10 +130,10 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) {
l.concurrentLoadingVolumes(needleMapKind, 10)
- glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
+ log.Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
l.loadAllEcShards()
- glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
+ log.Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
}
@@ -322,9 +322,9 @@ func (l *DiskLocation) CheckDiskSpace() {
l.isDiskSpaceLow = !l.isDiskSpaceLow
}
if l.isDiskSpaceLow {
- glog.V(0).Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
+ log.Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
} else {
- glog.V(4).Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
+ log.Tracef("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
}
}
time.Sleep(time.Minute)
diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go
index 34b639407..8111f24f7 100644
--- a/weed/storage/erasure_coding/ec_encoder.go
+++ b/weed/storage/erasure_coding/ec_encoder.go
@@ -7,7 +7,7 @@ import (
"github.com/klauspost/reedsolomon"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
"github.com/chrislusf/seaweedfs/weed/storage/types"
@@ -78,7 +78,7 @@ func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64,
return fmt.Errorf("failed to stat dat file: %v", err)
}
- glog.V(0).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size())
+ log.Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size())
err = encodeDatFile(fi.Size(), err, baseFileName, bufferSize, largeBlockSize, file, smallBlockSize)
if err != nil {
return fmt.Errorf("encodeDatFile: %v", err)
@@ -122,7 +122,7 @@ func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize i
bufferSize := int64(len(buffers[0]))
batchCount := blockSize / bufferSize
if blockSize%bufferSize != 0 {
- glog.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize)
+ log.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize)
}
for b := int64(0); b < batchCount; b++ {
diff --git a/weed/storage/idx/walk.go b/weed/storage/idx/walk.go
index 5215d3c4f..f55adf8cb 100644
--- a/weed/storage/idx/walk.go
+++ b/weed/storage/idx/walk.go
@@ -3,7 +3,7 @@ package idx
import (
"io"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -16,7 +16,7 @@ func WalkIndexFile(r io.ReaderAt, fn func(key types.NeedleId, offset types.Offse
if count == 0 && e == io.EOF {
return nil
}
- glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e)
+ log.Tracef("readerOffset %d count %d err: %v", readerOffset, count, e)
readerOffset += int64(count)
var (
key types.NeedleId
@@ -36,7 +36,7 @@ func WalkIndexFile(r io.ReaderAt, fn func(key types.NeedleId, offset types.Offse
return nil
}
count, e = r.ReadAt(bytes, readerOffset)
- glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e)
+ log.Tracef("readerOffset %d count %d err: %v", readerOffset, count, e)
readerOffset += int64(count)
}
return e
diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go
index 4d244046e..3cb62ebb8 100644
--- a/weed/storage/needle/needle_parse_upload.go
+++ b/weed/storage/needle/needle_parse_upload.go
@@ -13,7 +13,7 @@ import (
"strconv"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -120,7 +120,7 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
}()
form, fe := r.MultipartReader()
if fe != nil {
- glog.V(0).Infoln("MultipartReader [ERROR]", fe)
+ log.Infoln("MultipartReader [ERROR]", fe)
e = fe
return
}
@@ -128,7 +128,7 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
// first multi-part item
part, fe := form.NextPart()
if fe != nil {
- glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
+ log.Infoln("Reading Multi part [ERROR]", fe)
e = fe
return
}
@@ -140,7 +140,7 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
pu.Data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1))
if e != nil {
- glog.V(0).Infoln("Reading Content [ERROR]", e)
+ log.Infoln("Reading Content [ERROR]", e)
return
}
if len(pu.Data) == int(sizeLimit)+1 {
@@ -161,7 +161,7 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
if fName != "" {
data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1))
if fe2 != nil {
- glog.V(0).Infoln("Reading Content [ERROR]", fe2)
+ log.Infoln("Reading Content [ERROR]", fe2)
e = fe2
return
}
diff --git a/weed/storage/needle/needle_read_write.go b/weed/storage/needle/needle_read_write.go
index e758a6fee..69b63afa9 100644
--- a/weed/storage/needle/needle_read_write.go
+++ b/weed/storage/needle/needle_read_write.go
@@ -6,7 +6,7 @@ import (
"io"
"math"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -133,7 +133,7 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u
defer func(w backend.BackendStorageFile, off int64) {
if err != nil {
if te := w.Truncate(end); te != nil {
- glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te)
+ log.Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te)
}
}
}(w, end)
@@ -172,7 +172,7 @@ func (n *Needle) ReadBytes(bytes []byte, offset int64, size Size, version Versio
if n.Size != size {
// cookie is not always passed in for this API. Use size to do preliminary checking.
if OffsetSize == 4 && offset < int64(MaxPossibleVolumeSize) {
- glog.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
+ log.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
return ErrorSizeMismatch
}
return fmt.Errorf("entry not found: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
diff --git a/weed/storage/needle_map/compact_map_test.go b/weed/storage/needle_map/compact_map_test.go
index 199cb26b3..9125723eb 100644
--- a/weed/storage/needle_map/compact_map_test.go
+++ b/weed/storage/needle_map/compact_map_test.go
@@ -62,7 +62,7 @@ func TestCompactMap(t *testing.T) {
// for i := uint32(0); i < 100; i++ {
// if v := m.Get(Key(i)); v != nil {
- // glog.V(4).Infoln(i, "=", v.Key, v.Offset, v.Size)
+ // log.Trace(i, "=", v.Key, v.Offset, v.Size)
// }
// }
diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go
index b25b5e89a..f88c3ecc4 100644
--- a/weed/storage/needle_map/memdb.go
+++ b/weed/storage/needle_map/memdb.go
@@ -8,7 +8,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -24,7 +24,7 @@ func NewMemDb() *MemDb {
var err error
t := &MemDb{}
if t.db, err = leveldb.Open(storage.NewMemStorage(), opts); err != nil {
- glog.V(0).Infof("MemDb fails to open: %v", err)
+ log.Infof("MemDb fails to open: %v", err)
return nil
}
diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go
index 415cd14dd..22f0b8262 100644
--- a/weed/storage/needle_map_leveldb.go
+++ b/weed/storage/needle_map_leveldb.go
@@ -12,7 +12,7 @@ import (
"github.com/syndtr/goleveldb/leveldb"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -27,11 +27,11 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option
m = &LevelDbNeedleMap{dbFileName: dbFileName}
m.indexFile = indexFile
if !isLevelDbFresh(dbFileName, indexFile) {
- glog.V(1).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name())
+ log.Debugf("Start to Generate %s from %s", dbFileName, indexFile.Name())
generateLevelDbFile(dbFileName, indexFile)
- glog.V(1).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name())
+ log.Debugf("Finished Generating %s from %s", dbFileName, indexFile.Name())
}
- glog.V(1).Infof("Opening %s...", dbFileName)
+ log.Debugf("Opening %s...", dbFileName)
if m.db, err = leveldb.OpenFile(dbFileName, opts); err != nil {
if errors.IsCorrupted(err) {
@@ -41,7 +41,7 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option
return
}
}
- glog.V(1).Infof("Loading %s...", indexFile.Name())
+ log.Debugf("Loading %s...", indexFile.Name())
mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
if indexLoadError != nil {
return nil, indexLoadError
@@ -60,7 +60,7 @@ func isLevelDbFresh(dbFileName string, indexFile *os.File) bool {
dbStat, dbStatErr := dbLogFile.Stat()
indexStat, indexStatErr := indexFile.Stat()
if dbStatErr != nil || indexStatErr != nil {
- glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr)
+ log.Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr)
return false
}
@@ -141,14 +141,14 @@ func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error {
func (m *LevelDbNeedleMap) Close() {
indexFileName := m.indexFile.Name()
if err := m.indexFile.Sync(); err != nil {
- glog.Warningf("sync file %s failed: %v", indexFileName, err)
+ log.Warnf("sync file %s failed: %v", indexFileName, err)
}
if err := m.indexFile.Close(); err != nil {
- glog.Warningf("close index file %s failed: %v", indexFileName, err)
+ log.Warnf("close index file %s failed: %v", indexFileName, err)
}
if err := m.db.Close(); err != nil {
- glog.Warningf("close levelDB failed: %v", err)
+ log.Warnf("close levelDB failed: %v", err)
}
}
diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go
index d0891dc98..0ac93e8b1 100644
--- a/weed/storage/needle_map_memory.go
+++ b/weed/storage/needle_map_memory.go
@@ -3,7 +3,7 @@ package storage
import (
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
@@ -45,7 +45,7 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
}
return nil
})
- glog.V(1).Infof("max file key: %d for file: %s", nm.MaxFileKey(), file.Name())
+ log.Debugf("max file key: %d for file: %s", nm.MaxFileKey(), file.Name())
return nm, e
}
@@ -66,7 +66,7 @@ func (nm *NeedleMap) Delete(key NeedleId, offset Offset) error {
func (nm *NeedleMap) Close() {
indexFileName := nm.indexFile.Name()
if err := nm.indexFile.Sync(); err != nil {
- glog.Warningf("sync file %s failed, %v", indexFileName, err)
+ log.Warnf("sync file %s failed, %v", indexFileName, err)
}
_ = nm.indexFile.Close()
}
diff --git a/weed/storage/needle_map_metric.go b/weed/storage/needle_map_metric.go
index 3618dada9..5029748ce 100644
--- a/weed/storage/needle_map_metric.go
+++ b/weed/storage/needle_map_metric.go
@@ -145,7 +145,7 @@ func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key
for remainingCount >= 0 {
_, e := r.ReadAt(bytes[:NeedleMapEntrySize*nextBatchSize], NeedleMapEntrySize*remainingCount)
- // glog.V(0).Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e)
+ // log.Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e)
if e != nil {
return e
}
diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go
index 362659a11..3d9dbefb2 100644
--- a/weed/storage/needle_map_metric_test.go
+++ b/weed/storage/needle_map_metric_test.go
@@ -5,7 +5,7 @@ import (
"math/rand"
"testing"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -23,9 +23,9 @@ func TestFastLoadingNeedleMapMetrics(t *testing.T) {
mm, _ := newNeedleMapMetricFromIndexFile(idxFile)
- glog.V(0).Infof("FileCount expected %d actual %d", nm.FileCount(), mm.FileCount())
- glog.V(0).Infof("DeletedSize expected %d actual %d", nm.DeletedSize(), mm.DeletedSize())
- glog.V(0).Infof("ContentSize expected %d actual %d", nm.ContentSize(), mm.ContentSize())
- glog.V(0).Infof("DeletedCount expected %d actual %d", nm.DeletedCount(), mm.DeletedCount())
- glog.V(0).Infof("MaxFileKey expected %d actual %d", nm.MaxFileKey(), mm.MaxFileKey())
+ log.Infof("FileCount expected %d actual %d", nm.FileCount(), mm.FileCount())
+ log.Infof("DeletedSize expected %d actual %d", nm.DeletedSize(), mm.DeletedSize())
+ log.Infof("ContentSize expected %d actual %d", nm.ContentSize(), mm.ContentSize())
+ log.Infof("DeletedCount expected %d actual %d", nm.DeletedCount(), mm.DeletedCount())
+ log.Infof("MaxFileKey expected %d actual %d", nm.MaxFileKey(), mm.MaxFileKey())
}
diff --git a/weed/storage/needle_map_sorted_file.go b/weed/storage/needle_map_sorted_file.go
index 1ca113ca9..47a30e5a8 100644
--- a/weed/storage/needle_map_sorted_file.go
+++ b/weed/storage/needle_map_sorted_file.go
@@ -3,7 +3,7 @@ package storage
import (
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
@@ -21,18 +21,18 @@ func NewSortedFileNeedleMap(baseFileName string, indexFile *os.File) (m *SortedF
m.indexFile = indexFile
fileName := baseFileName + ".sdx"
if !isSortedFileFresh(fileName, indexFile) {
- glog.V(0).Infof("Start to Generate %s from %s", fileName, indexFile.Name())
+ log.Infof("Start to Generate %s from %s", fileName, indexFile.Name())
erasure_coding.WriteSortedFileFromIdx(baseFileName, ".sdx")
- glog.V(0).Infof("Finished Generating %s from %s", fileName, indexFile.Name())
+ log.Infof("Finished Generating %s from %s", fileName, indexFile.Name())
}
- glog.V(1).Infof("Opening %s...", fileName)
+ log.Debugf("Opening %s...", fileName)
if m.dbFile, err = os.Open(baseFileName + ".sdx"); err != nil {
return
}
dbStat, _ := m.dbFile.Stat()
m.dbFileSize = dbStat.Size()
- glog.V(1).Infof("Loading %s...", indexFile.Name())
+ log.Debugf("Loading %s...", indexFile.Name())
mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
if indexLoadError != nil {
return nil, indexLoadError
@@ -51,7 +51,7 @@ func isSortedFileFresh(dbFileName string, indexFile *os.File) bool {
dbStat, dbStatErr := dbFile.Stat()
indexStat, indexStatErr := indexFile.Stat()
if dbStatErr != nil || indexStatErr != nil {
- glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr)
+ log.Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr)
return false
}
diff --git a/weed/storage/store.go b/weed/storage/store.go
index 7f2415448..c43c3b02f 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -8,7 +8,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/stats"
@@ -120,11 +120,11 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind
return fmt.Errorf("Volume Id %d already exists!", vid)
}
if location := s.FindFreeLocation(); location != nil {
- glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
+ log.Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
location.Directory, vid, collection, replicaPlacement, ttl)
if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb); err == nil {
location.SetVolume(vid, volume)
- glog.V(0).Infof("add volume %d", vid)
+ log.Infof("add volume %d", vid)
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
Id: uint32(vid),
Collection: collection,
@@ -222,7 +222,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) {
deleteVids = append(deleteVids, v.Id)
} else {
- glog.V(0).Infoln("volume", v.Id, "is expired.")
+ log.Infoln("volume", v.Id, "is expired.")
}
}
collectionVolumeSize[v.Collection] += volumeMessage.Size
@@ -256,9 +256,9 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
found, err := location.deleteVolumeById(vid)
if found {
if err == nil {
- glog.V(0).Infof("volume %d is deleted", vid)
+ log.Infof("volume %d is deleted", vid)
} else {
- glog.V(0).Infof("delete volume %d: %v", vid, err)
+ log.Infof("delete volume %d: %v", vid, err)
}
}
}
@@ -305,7 +305,7 @@ func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, fsync boo
_, _, isUnchanged, err = v.writeNeedle2(n, fsync)
return
}
- glog.V(0).Infoln("volume", i, "not found!")
+ log.Infoln("volume", i, "not found!")
err = fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port)
return
}
@@ -360,7 +360,7 @@ func (s *Store) MarkVolumeWritable(i needle.VolumeId) error {
func (s *Store) MountVolume(i needle.VolumeId) error {
for _, location := range s.Locations {
if found := location.LoadVolume(i, s.NeedleMapType); found == true {
- glog.V(0).Infof("mount volume %d", i)
+ log.Infof("mount volume %d", i)
v := s.findVolume(i)
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
Id: uint32(v.Id),
@@ -391,7 +391,7 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error {
for _, location := range s.Locations {
if err := location.UnloadVolume(i); err == nil {
- glog.V(0).Infof("UnmountVolume %d", i)
+ log.Infof("UnmountVolume %d", i)
s.DeletedVolumesChan <- message
return nil
}
@@ -414,11 +414,11 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error {
}
for _, location := range s.Locations {
if err := location.DeleteVolume(i); err == nil {
- glog.V(0).Infof("DeleteVolume %d", i)
+ log.Infof("DeleteVolume %d", i)
s.DeletedVolumesChan <- message
return nil
} else {
- glog.Errorf("DeleteVolume %d: %v", i, err)
+ log.Errorf("DeleteVolume %d: %v", i, err)
}
}
@@ -472,7 +472,7 @@ func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1
}
diskLocation.MaxVolumeCount = maxVolumeCount
- glog.V(2).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
+ log.Debugf("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024)
hasChanges = hasChanges || currentMaxVolumeCount != diskLocation.MaxVolumeCount
}
diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go
index 853757ce3..0739b7453 100644
--- a/weed/storage/store_ec.go
+++ b/weed/storage/store_ec.go
@@ -11,7 +11,7 @@ import (
"github.com/klauspost/reedsolomon"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
@@ -50,7 +50,7 @@ func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat {
func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) error {
for _, location := range s.Locations {
if err := location.LoadEcShard(collection, vid, shardId); err == nil {
- glog.V(0).Infof("MountEcShards %d.%d", vid, shardId)
+ log.Infof("MountEcShards %d.%d", vid, shardId)
var shardBits erasure_coding.ShardBits
@@ -86,7 +86,7 @@ func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.Shar
for _, location := range s.Locations {
if deleted := location.UnloadEcShard(vid, shardId); deleted {
- glog.V(0).Infof("UnmountEcShards %d.%d", vid, shardId)
+ log.Infof("UnmountEcShards %d.%d", vid, shardId)
s.DeletedEcShardsChan <- message
return nil
}
@@ -131,10 +131,10 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, ErrorDeleted
}
- glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)
+ log.Tracef("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)
if len(intervals) > 1 {
- glog.V(3).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals)
+ log.Tracef("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals)
}
bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals)
if err != nil {
@@ -183,7 +183,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur
data = make([]byte, interval.Size)
if shard, found := ecVolume.FindEcVolumeShard(shardId); found {
if _, err = shard.ReadAt(data, actualOffset); err != nil {
- glog.V(0).Infof("read local ec shard %d.%d offset %d: %v", ecVolume.VolumeId, shardId, actualOffset, err)
+ log.Infof("read local ec shard %d.%d offset %d: %v", ecVolume.VolumeId, shardId, actualOffset, err)
return
}
} else {
@@ -197,7 +197,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur
if err == nil {
return
}
- glog.V(0).Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err)
+ log.Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err)
forgetShardId(ecVolume, shardId)
}
@@ -206,7 +206,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur
if err == nil {
return
}
- glog.V(0).Infof("recover ec shard %d.%d : %v", ecVolume.VolumeId, shardId, err)
+ log.Infof("recover ec shard %d.%d : %v", ecVolume.VolumeId, shardId, err)
}
return
}
@@ -231,7 +231,7 @@ func (s *Store) cachedLookupEcShardLocations(ecVolume *erasure_coding.EcVolume)
return nil
}
- glog.V(3).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId)
+ log.Tracef("lookup and cache ec volume %d locations", ecVolume.VolumeId)
err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.LookupEcVolumeRequest{
@@ -268,12 +268,12 @@ func (s *Store) readRemoteEcShardInterval(sourceDataNodes []string, needleId typ
}
for _, sourceDataNode := range sourceDataNodes {
- glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode)
+ log.Tracef("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode)
n, is_deleted, err = s.doReadRemoteEcShardInterval(sourceDataNode, needleId, vid, shardId, buf, offset)
if err == nil {
return
}
- glog.V(1).Infof("read remote ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
+ log.Debugf("read remote ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
}
return
@@ -320,7 +320,7 @@ func (s *Store) doReadRemoteEcShardInterval(sourceDataNode string, needleId type
}
func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
- glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
+ log.Tracef("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount)
if err != nil {
@@ -338,7 +338,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum
continue
}
if len(locations) == 0 {
- glog.V(3).Infof("readRemoteEcShardInterval missing %d.%d from %+v", ecVolume.VolumeId, shardId, locations)
+ log.Tracef("readRemoteEcShardInterval missing %d.%d from %+v", ecVolume.VolumeId, shardId, locations)
continue
}
@@ -349,7 +349,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum
data := make([]byte, len(buf))
nRead, isDeleted, readErr := s.readRemoteEcShardInterval(locations, needleId, ecVolume.VolumeId, shardId, data, offset)
if readErr != nil {
- glog.V(3).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr)
+ log.Tracef("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr)
forgetShardId(ecVolume, shardId)
}
if isDeleted {
@@ -365,10 +365,10 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum
wg.Wait()
if err = enc.ReconstructData(bufs); err != nil {
- glog.V(3).Infof("recovered ec shard %d.%d failed: %v", ecVolume.VolumeId, shardIdToRecover, err)
+ log.Tracef("recovered ec shard %d.%d failed: %v", ecVolume.VolumeId, shardIdToRecover, err)
return 0, false, err
}
- glog.V(4).Infof("recovered ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
+ log.Tracef("recovered ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
copy(buf, bufs[shardIdToRecover])
diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go
index 4a75fb20b..73d5fcfde 100644
--- a/weed/storage/store_ec_delete.go
+++ b/weed/storage/store_ec_delete.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
@@ -73,12 +73,12 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(shardId erasure_coding.Sh
}
for _, sourceDataNode := range sourceDataNodes {
- glog.V(4).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode)
+ log.Tracef("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode)
err := s.doDeleteNeedleFromRemoteEcShard(sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId)
if err != nil {
return err
}
- glog.V(1).Infof("delete from remote ec shard %d.%d from %s: %v", ecVolume.VolumeId, shardId, sourceDataNode, err)
+ log.Debugf("delete from remote ec shard %d.%d from %s: %v", ecVolume.VolumeId, shardId, sourceDataNode, err)
}
return nil
diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go
index 32666a417..d4ad1b1a9 100644
--- a/weed/storage/store_vacuum.go
+++ b/weed/storage/store_vacuum.go
@@ -4,13 +4,13 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/stats"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
if v := s.findVolume(volumeId); v != nil {
- glog.V(3).Infof("volumd %d garbage level: %f", volumeId, v.garbageLevel())
+ log.Tracef("volumd %d garbage level: %f", volumeId, v.garbageLevel())
return v.garbageLevel(), nil
}
return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId)
diff --git a/weed/storage/super_block/super_block.go b/weed/storage/super_block/super_block.go
index f48cd0bdc..333c4261e 100644
--- a/weed/storage/super_block/super_block.go
+++ b/weed/storage/super_block/super_block.go
@@ -3,7 +3,7 @@ package super_block
import (
"github.com/golang/protobuf/proto"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -48,12 +48,12 @@ func (s *SuperBlock) Bytes() []byte {
if s.Extra != nil {
extraData, err := proto.Marshal(s.Extra)
if err != nil {
- glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err)
+ log.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err)
}
extraSize := len(extraData)
if extraSize > 256*256-2 {
// reserve a couple of bits for future extension
- glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2)
+ log.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2)
}
s.ExtraSize = uint16(extraSize)
util.Uint16toBytes(header[6:8], s.ExtraSize)
diff --git a/weed/storage/volume.go b/weed/storage/volume.go
index a7a963a59..4e6eee9f0 100644
--- a/weed/storage/volume.go
+++ b/weed/storage/volume.go
@@ -15,7 +15,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
type Volume struct {
@@ -97,7 +97,7 @@ func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time)
if e == nil {
return uint64(datFileSize), v.nm.IndexFileSize(), modTime
}
- glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
+ log.Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
return // -1 causes integer overflow and the volume to become unwritable.
}
@@ -189,9 +189,9 @@ func (v *Volume) expired(contentSize uint64, volumeSizeLimit uint64) bool {
if v.Ttl == nil || v.Ttl.Minutes() == 0 {
return false
}
- glog.V(2).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds)
+ log.Debugf("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds)
livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60
- glog.V(2).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes)
+ log.Debugf("ttl:%v lived:%v", v.Ttl, livedMinutes)
if int64(v.Ttl.Minutes()) < livedMinutes {
return true
}
@@ -217,7 +217,7 @@ func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool {
func (v *Volume) CollectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64) {
v.dataFileAccessLock.RLock()
defer v.dataFileAccessLock.RUnlock()
- glog.V(3).Infof("CollectStatus volume %d", v.Id)
+ log.Tracef("CollectStatus volume %d", v.Id)
maxFileKey = v.nm.MaxFileKey()
datFileSize, modTime, _ = v.DataBackend.GetStat()
diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go
index 00e04047f..cc91619b6 100644
--- a/weed/storage/volume_checking.go
+++ b/weed/storage/volume_checking.go
@@ -5,7 +5,7 @@ import (
"io"
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -34,10 +34,10 @@ func CheckAndFixVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAt
}
}
if healthyIndexSize < indexSize {
- glog.Warningf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d", indexFile.Name(), indexSize, healthyIndexSize)
+ log.Warnf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d", indexFile.Name(), indexSize, healthyIndexSize)
err = indexFile.Truncate(healthyIndexSize)
if err != nil {
- glog.Warningf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d: %v", indexFile.Name(), indexSize, healthyIndexSize, err)
+ log.Warnf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d: %v", indexFile.Name(), indexSize, healthyIndexSize, err)
}
}
return
@@ -114,14 +114,14 @@ func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version,
return n.AppendAtNs, nil
}
if fileSize > fileTailOffset {
- glog.Warningf("Truncate %s from %d bytes to %d bytes!", datFile.Name(), fileSize, fileTailOffset)
+ log.Warnf("Truncate %s from %d bytes to %d bytes!", datFile.Name(), fileSize, fileTailOffset)
err = datFile.Truncate(fileTailOffset)
if err == nil {
return n.AppendAtNs, nil
}
return n.AppendAtNs, fmt.Errorf("truncate file %s: %v", datFile.Name(), err)
}
- glog.Warningf("data file %s has %d bytes, less than expected %d bytes!", datFile.Name(), fileSize, fileTailOffset)
+ log.Warnf("data file %s has %d bytes, less than expected %d bytes!", datFile.Name(), fileSize, fileTailOffset)
}
if err = n.ReadData(datFile, offset, size, v); err != nil {
return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err)
diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go
index 05684cbdb..906cbcdd4 100644
--- a/weed/storage/volume_loading.go
+++ b/weed/storage/volume_loading.go
@@ -6,7 +6,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/opt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -31,7 +31,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
if v.HasRemoteFile() {
v.noWriteCanDelete = true
v.noWriteOrDelete = false
- glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo.Files)
+ log.Infof("loading volume %d from remote %v", v.Id, v.volumeInfo.Files)
v.LoadRemoteFile()
alreadyHasSuperBlock = true
} else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(fileName + ".dat"); exists {
@@ -43,7 +43,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
if canWrite {
dataFile, err = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644)
} else {
- glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode")
+ log.Infoln("opening " + fileName + ".dat in READONLY mode")
dataFile, err = os.Open(fileName + ".dat")
v.noWriteOrDelete = true
}
@@ -79,61 +79,61 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
if err == nil && alsoLoadIndex {
var indexFile *os.File
if v.noWriteOrDelete {
- glog.V(0).Infoln("open to read file", fileName+".idx")
+ log.Infoln("open to read file", fileName+".idx")
if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); err != nil {
return fmt.Errorf("cannot read Volume Index %s.idx: %v", fileName, err)
}
} else {
- glog.V(1).Infoln("open to write file", fileName+".idx")
+ log.Debug("open to write file", fileName+".idx")
if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil {
return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, err)
}
}
if v.lastAppendAtNs, err = CheckAndFixVolumeDataIntegrity(v, indexFile); err != nil {
v.noWriteOrDelete = true
- glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err)
+ log.Infof("volumeDataIntegrityChecking failed %v", err)
}
if v.noWriteOrDelete || v.noWriteCanDelete {
if v.nm, err = NewSortedFileNeedleMap(fileName, indexFile); err != nil {
- glog.V(0).Infof("loading sorted db %s error: %v", fileName+".sdx", err)
+ log.Infof("loading sorted db %s error: %v", fileName+".sdx", err)
}
} else {
switch needleMapKind {
case NeedleMapInMemory:
- glog.V(0).Infoln("loading index", fileName+".idx", "to memory")
+ log.Infoln("loading index", fileName+".idx", "to memory")
if v.nm, err = LoadCompactNeedleMap(indexFile); err != nil {
- glog.V(0).Infof("loading index %s to memory error: %v", fileName+".idx", err)
+ log.Infof("loading index %s to memory error: %v", fileName+".idx", err)
}
case NeedleMapLevelDb:
- glog.V(0).Infoln("loading leveldb", fileName+".ldb")
+ log.Infoln("loading leveldb", fileName+".ldb")
opts := &opt.Options{
BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil {
- glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err)
+ log.Infof("loading leveldb %s error: %v", fileName+".ldb", err)
}
case NeedleMapLevelDbMedium:
- glog.V(0).Infoln("loading leveldb medium", fileName+".ldb")
+ log.Infoln("loading leveldb medium", fileName+".ldb")
opts := &opt.Options{
BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil {
- glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err)
+ log.Infof("loading leveldb %s error: %v", fileName+".ldb", err)
}
case NeedleMapLevelDbLarge:
- glog.V(0).Infoln("loading leveldb large", fileName+".ldb")
+ log.Infoln("loading leveldb large", fileName+".ldb")
opts := &opt.Options{
BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil {
- glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err)
+ log.Infof("loading leveldb %s error: %v", fileName+".ldb", err)
}
}
}
diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go
index 869796a3f..d2aed8c58 100644
--- a/weed/storage/volume_read_write.go
+++ b/weed/storage/volume_read_write.go
@@ -8,7 +8,7 @@ import (
"os"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@@ -31,7 +31,7 @@ func (v *Volume) isFileUnchanged(n *needle.Needle) bool {
oldNeedle := new(needle.Needle)
err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), nv.Size, v.Version())
if err != nil {
- glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToAcutalOffset(), nv.Size, err)
+ log.Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToAcutalOffset(), nv.Size, err)
return false
}
if oldNeedle.Cookie == n.Cookie && oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) {
@@ -76,7 +76,7 @@ func (v *Volume) asyncRequestAppend(request *needle.AsyncRequest) {
}
func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchanged bool, err error) {
- // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ // log.Tracef("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
actualSize := needle.GetActualSize(Size(len(n.Data)), v.Version())
v.dataFileAccessLock.Lock()
@@ -101,7 +101,7 @@ func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchan
return
}
if existingNeedle.Cookie != n.Cookie {
- glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
+ log.Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
err = fmt.Errorf("mismatching cookie %x", n.Cookie)
return
}
@@ -118,7 +118,7 @@ func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchan
// add to needle map
if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
- glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
+ log.Tracef("failed to save in needle map %d: %v", n.Id, err)
}
}
if v.lastModifiedTsSeconds < n.LastModified {
@@ -128,7 +128,7 @@ func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchan
}
func (v *Volume) writeNeedle2(n *needle.Needle, fsync bool) (offset uint64, size Size, isUnchanged bool, err error) {
- // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ // log.Tracef("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if n.Ttl == needle.EMPTY_TTL && v.Ttl != needle.EMPTY_TTL {
n.SetHasTtl()
n.Ttl = v.Ttl
@@ -149,7 +149,7 @@ func (v *Volume) writeNeedle2(n *needle.Needle, fsync bool) (offset uint64, size
}
func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isUnchanged bool, err error) {
- // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ // log.Tracef("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if v.isFileUnchanged(n) {
size = Size(n.DataSize)
isUnchanged = true
@@ -165,7 +165,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isU
return
}
if existingNeedle.Cookie != n.Cookie {
- glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
+ log.Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
err = fmt.Errorf("mismatching cookie %x", n.Cookie)
return
}
@@ -181,7 +181,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isU
// add to needle map
if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
- glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
+ log.Tracef("failed to save in needle map %d: %v", n.Id, err)
}
}
if v.lastModifiedTsSeconds < n.LastModified {
@@ -191,7 +191,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isU
}
func (v *Volume) syncDelete(n *needle.Needle) (Size, error) {
- // glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ // log.Tracef("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
actualSize := needle.GetActualSize(0, v.Version())
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
@@ -238,7 +238,7 @@ func (v *Volume) deleteNeedle2(n *needle.Needle) (Size, error) {
}
func (v *Volume) doDeleteRequest(n *needle.Needle) (Size, error) {
- glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ log.Tracef("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
nv, ok := v.nm.Get(n.Id)
// fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
if ok && nv.Size.IsValid() {
@@ -270,7 +270,7 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
readSize := nv.Size
if readSize.IsDeleted() {
if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize {
- glog.V(3).Infof("reading deleted %s", n.String())
+ log.Tracef("reading deleted %s", n.String())
readSize = -readSize
} else {
return -1, ErrorDeleted
@@ -361,7 +361,7 @@ func (v *Volume) startWorker() {
if err := v.DataBackend.Sync(); err != nil {
// todo: this may generate dirty data or cause data inconsistent, may be weed need to panic?
if te := v.DataBackend.Truncate(end); te != nil {
- glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te)
+ log.Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te)
}
for i := 0; i < len(currentRequests); i++ {
if currentRequests[i].IsSucceed() {
@@ -416,7 +416,7 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag
if volumeFileScanner.ReadNeedleBody() {
// println("needle", n.Id.String(), "offset", offset, "size", n.Size, "rest", rest)
if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil {
- glog.V(0).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err)
+ log.Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err)
// err = fmt.Errorf("cannot read needle body: %v", err)
// return
}
@@ -426,18 +426,18 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag
return nil
}
if err != nil {
- glog.V(0).Infof("visit needle error: %v", err)
+ log.Infof("visit needle error: %v", err)
return fmt.Errorf("visit needle error: %v", err)
}
offset += NeedleHeaderSize + rest
- glog.V(4).Infof("==> new entry offset %d", offset)
+ log.Tracef("==> new entry offset %d", offset)
if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil {
if err == io.EOF {
return nil
}
return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err)
}
- glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest)
+ log.Tracef("new entry needle size:%d rest:%d", n.Size, rest)
}
return nil
}
diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go
index 20223ac1b..f5e78024c 100644
--- a/weed/storage/volume_super_block.go
+++ b/weed/storage/volume_super_block.go
@@ -4,7 +4,7 @@ import (
"fmt"
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@@ -14,7 +14,7 @@ func (v *Volume) maybeWriteSuperBlock() error {
datSize, _, e := v.DataBackend.GetStat()
if e != nil {
- glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e)
+ log.Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e)
return e
}
if datSize == 0 {
diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go
index fd7b08654..3fbcd9785 100644
--- a/weed/storage/volume_tier.go
+++ b/weed/storage/volume_tier.go
@@ -1,7 +1,7 @@
package storage
import (
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
@@ -17,7 +17,7 @@ func (v *Volume) maybeLoadVolumeInfo() (found bool) {
v.volumeInfo, v.hasRemoteFile, _ = pb.MaybeLoadVolumeInfo(v.FileName() + ".vif")
if v.hasRemoteFile {
- glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id,
+ log.Infof("volume %d is tiered to %s as %s and read only", v.Id,
v.volumeInfo.Files[0].BackendName(), v.volumeInfo.Files[0].Key)
}
diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go
index a3e5800df..81c105635 100644
--- a/weed/storage/volume_vacuum.go
+++ b/weed/storage/volume_vacuum.go
@@ -6,7 +6,7 @@ import (
"runtime"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
idx2 "github.com/chrislusf/seaweedfs/weed/storage/idx"
@@ -39,11 +39,11 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error
if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
return nil
}
- glog.V(3).Infof("Compacting volume %d ...", v.Id)
+ log.Tracef("Compacting volume %d ...", v.Id)
//no need to lock for copy on write
//v.accessLock.Lock()
//defer v.accessLock.Unlock()
- //glog.V(3).Infof("Got Compaction lock...")
+ //log.Tracef("Got Compaction lock...")
v.isCompacting = true
defer func() {
v.isCompacting = false
@@ -52,12 +52,12 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error
filePath := v.FileName()
v.lastCompactIndexOffset = v.IndexFileSize()
v.lastCompactRevision = v.SuperBlock.CompactionRevision
- glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset)
+ log.Tracef("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset)
if err := v.DataBackend.Sync(); err != nil {
- glog.V(0).Infof("compact fail to sync volume %d", v.Id)
+ log.Infof("compact fail to sync volume %d", v.Id)
}
if err := v.nm.Sync(); err != nil {
- glog.V(0).Infof("compact fail to sync volume idx %d", v.Id)
+ log.Infof("compact fail to sync volume idx %d", v.Id)
}
return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond)
}
@@ -68,7 +68,7 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) erro
if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
return nil
}
- glog.V(3).Infof("Compact2 volume %d ...", v.Id)
+ log.Tracef("Compact2 volume %d ...", v.Id)
v.isCompacting = true
defer func() {
@@ -78,12 +78,12 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) erro
filePath := v.FileName()
v.lastCompactIndexOffset = v.IndexFileSize()
v.lastCompactRevision = v.SuperBlock.CompactionRevision
- glog.V(3).Infof("creating copies for volume %d ...", v.Id)
+ log.Tracef("creating copies for volume %d ...", v.Id)
if err := v.DataBackend.Sync(); err != nil {
- glog.V(0).Infof("compact2 fail to sync volume dat %d: %v", v.Id, err)
+ log.Infof("compact2 fail to sync volume dat %d: %v", v.Id, err)
}
if err := v.nm.Sync(); err != nil {
- glog.V(0).Infof("compact2 fail to sync volume idx %d: %v", v.Id, err)
+ log.Infof("compact2 fail to sync volume idx %d: %v", v.Id, err)
}
return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate, compactionBytePerSecond)
}
@@ -92,7 +92,7 @@ func (v *Volume) CommitCompact() error {
if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
return nil
}
- glog.V(0).Infof("Committing volume %d vacuuming...", v.Id)
+ log.Infof("Committing volume %d vacuuming...", v.Id)
v.isCompacting = true
defer func() {
@@ -102,11 +102,11 @@ func (v *Volume) CommitCompact() error {
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
- glog.V(3).Infof("Got volume %d committing lock...", v.Id)
+ log.Tracef("Got volume %d committing lock...", v.Id)
v.nm.Close()
if v.DataBackend != nil {
if err := v.DataBackend.Close(); err != nil {
- glog.V(0).Infof("fail to close volume %d", v.Id)
+ log.Infof("fail to close volume %d", v.Id)
}
}
v.DataBackend = nil
@@ -114,7 +114,7 @@ func (v *Volume) CommitCompact() error {
var e error
if e = v.makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx"); e != nil {
- glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e)
+ log.Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e)
e = os.Remove(v.FileName() + ".cpd")
if e != nil {
return e
@@ -143,12 +143,12 @@ func (v *Volume) CommitCompact() error {
}
}
- //glog.V(3).Infof("Pretending to be vacuuming...")
+ //log.Tracef("Pretending to be vacuuming...")
//time.Sleep(20 * time.Second)
os.RemoveAll(v.FileName() + ".ldb")
- glog.V(3).Infof("Loading volume %d commit file...", v.Id)
+ log.Tracef("Loading volume %d commit file...", v.Id)
if e = v.load(true, false, v.needleMapKind, 0); e != nil {
return e
}
@@ -156,7 +156,7 @@ func (v *Volume) CommitCompact() error {
}
func (v *Volume) cleanupCompact() error {
- glog.V(0).Infof("Cleaning up volume %d vacuuming...", v.Id)
+ log.Infof("Cleaning up volume %d vacuuming...", v.Id)
e1 := os.Remove(v.FileName() + ".cpd")
e2 := os.Remove(v.FileName() + ".cpx")
@@ -217,7 +217,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
return fmt.Errorf("readIndexEntry %s at offset %d failed: %v", oldIdxFileName, idxOffset, err)
}
key, offset, size := idx2.IdxFileEntry(IdxEntry)
- glog.V(4).Infof("key %d offset %d size %d", key, offset, size)
+ log.Tracef("key %d offset %d size %d", key, offset, size)
if _, found := incrementedHasUpdatedIndexEntry[key]; !found {
incrementedHasUpdatedIndexEntry[key] = keyField{
offset: offset,
@@ -261,14 +261,14 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
var offset int64
if offset, err = dst.Seek(0, 2); err != nil {
- glog.V(0).Infof("failed to seek the end of file: %v", err)
+ log.Infof("failed to seek the end of file: %v", err)
return
}
//ensure file writing starting from aligned positions
if offset%NeedlePaddingSize != 0 {
offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize)
if offset, err = dst.Seek(offset, 0); err != nil {
- glog.V(0).Infof("failed to align in datafile %s: %v", dst.Name(), err)
+ log.Infof("failed to align in datafile %s: %v", dst.Name(), err)
return
}
}
@@ -276,7 +276,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
//updated needle
if !increIdxEntry.offset.IsZero() && increIdxEntry.size != 0 && increIdxEntry.size.IsValid() {
//even the needle cache in memory is hit, the need_bytes is correct
- glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size)
+ log.Tracef("file %d offset %d size %d", key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size)
var needleBytes []byte
needleBytes, err = needle.ReadNeedleBlob(oldDatBackend, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, v.Version())
if err != nil {
@@ -334,7 +334,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in
return nil
}
nv, ok := scanner.v.nm.Get(n.Id)
- glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
+ log.Trace("needle expected offset ", offset, "ok", ok, "nv", nv)
if ok && nv.Offset.ToAcutalOffset() == offset && nv.Size > 0 && nv.Size.IsValid() {
if err := scanner.nm.Set(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil {
return fmt.Errorf("cannot put needle: %s", err)
@@ -345,7 +345,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in
delta := n.DiskSize(scanner.version)
scanner.newOffset += delta
scanner.writeThrottler.MaybeSlowdown(delta)
- glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size)
+ log.Trace("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size)
}
return nil
}
@@ -436,7 +436,7 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str
delta := n.DiskSize(version)
newOffset += delta
writeThrottler.MaybeSlowdown(delta)
- glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size)
+ log.Trace("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size)
return nil
})
diff --git a/weed/topology/cluster_commands.go b/weed/topology/cluster_commands.go
index 152691ccb..51c89f285 100644
--- a/weed/topology/cluster_commands.go
+++ b/weed/topology/cluster_commands.go
@@ -2,7 +2,7 @@ package topology
import (
"github.com/chrislusf/raft"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
@@ -25,7 +25,7 @@ func (c *MaxVolumeIdCommand) Apply(server raft.Server) (interface{}, error) {
before := topo.GetMaxVolumeId()
topo.UpAdjustMaxVolumeId(c.MaxVolumeId)
- glog.V(1).Infoln("max volume id", before, "==>", topo.GetMaxVolumeId())
+ log.Debug("max volume id", before, "==>", topo.GetMaxVolumeId())
return nil, nil
}
diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go
index 0a4df63d0..f952e13d3 100644
--- a/weed/topology/data_node.go
+++ b/weed/topology/data_node.go
@@ -10,7 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
)
@@ -86,7 +86,7 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume
for vid, v := range dn.volumes {
if _, ok := actualVolumeMap[vid]; !ok {
- glog.V(0).Infoln("Deleting volume id:", vid)
+ log.Infoln("Deleting volume id:", vid)
delete(dn.volumes, vid)
deletedVolumes = append(deletedVolumes, v)
dn.UpAdjustVolumeCountDelta(-1)
diff --git a/weed/topology/node.go b/weed/topology/node.go
index 114417edf..edf1eccf5 100644
--- a/weed/topology/node.go
+++ b/weed/topology/node.go
@@ -7,7 +7,7 @@ import (
"sync"
"sync/atomic"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
@@ -79,7 +79,7 @@ func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, filterFirstNodeFn func(d
}
n.RUnlock()
if len(candidates) < numberOfNodes {
- glog.V(0).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates")
+ log.Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates")
return nil, nil, errors.New("No enough data node found!")
}
@@ -275,7 +275,7 @@ func (n *NodeImpl) LinkChildNode(node Node) {
n.UpAdjustEcShardCountDelta(node.GetEcShardCount())
n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount())
node.SetParent(n)
- glog.V(0).Infoln(n, "adds child", node.Id())
+ log.Infoln(n, "adds child", node.Id())
}
}
@@ -291,7 +291,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) {
n.UpAdjustEcShardCountDelta(-node.GetEcShardCount())
n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount())
n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount())
- glog.V(0).Infoln(n, "removes", node.Id())
+ log.Infoln(n, "removes", node.Id())
}
}
diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go
index 6b4076913..c0ddea632 100644
--- a/weed/topology/store_replicate.go
+++ b/weed/topology/store_replicate.go
@@ -9,7 +9,7 @@ import (
"strconv"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage"
@@ -29,7 +29,7 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.Volume
// this is the initial request
remoteLocations, err = getWritableRemoteReplications(s, volumeId, masterNode)
if err != nil {
- glog.V(0).Infoln(err)
+ log.Infoln(err)
return
}
}
@@ -44,7 +44,7 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.Volume
isUnchanged, err = s.WriteVolumeNeedle(volumeId, n, fsync)
if err != nil {
err = fmt.Errorf("failed to write to local disk: %v", err)
- glog.V(0).Infoln(err)
+ log.Infoln(err)
return
}
}
@@ -73,7 +73,7 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.Volume
tmpMap := make(map[string]string)
err := json.Unmarshal(n.Pairs, &tmpMap)
if err != nil {
- glog.V(0).Infoln("Unmarshal pairs error:", err)
+ log.Infoln("Unmarshal pairs error:", err)
}
for k, v := range tmpMap {
pairMap[needle.PairNamePrefix+k] = v
@@ -86,7 +86,7 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.Volume
return err
}); err != nil {
err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err)
- glog.V(0).Infoln(err)
+ log.Infoln(err)
}
}
return
@@ -103,14 +103,14 @@ func ReplicatedDelete(masterNode string, store *storage.Store,
if r.FormValue("type") != "replicate" {
remoteLocations, err = getWritableRemoteReplications(store, volumeId, masterNode)
if err != nil {
- glog.V(0).Infoln(err)
+ log.Infoln(err)
return
}
}
size, err = store.DeleteVolumeNeedle(volumeId, n)
if err != nil {
- glog.V(0).Infoln("delete error:", err)
+ log.Infoln("delete error:", err)
return
}
diff --git a/weed/topology/topology.go b/weed/topology/topology.go
index e217617e9..e4847b86d 100644
--- a/weed/topology/topology.go
+++ b/weed/topology/topology.go
@@ -9,7 +9,7 @@ import (
"github.com/chrislusf/raft"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/sequence"
"github.com/chrislusf/seaweedfs/weed/storage"
@@ -180,7 +180,7 @@ func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl).RegisterVolume(&v, dn)
}
func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
- glog.Infof("removing volume info:%+v", v)
+ log.Infof("removing volume info:%+v", v)
volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl)
volumeLayout.UnRegisterVolume(&v, dn)
if volumeLayout.isEmpty() {
@@ -207,7 +207,7 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati
if vi, err := storage.NewVolumeInfo(v); err == nil {
volumeInfos = append(volumeInfos, vi)
} else {
- glog.V(0).Infof("Fail to convert joined volume information: %v", err)
+ log.Infof("Fail to convert joined volume information: %v", err)
}
}
// find out the delta volumes
@@ -231,7 +231,7 @@ func (t *Topology) IncrementalSyncDataNodeRegistration(newVolumes, deletedVolume
for _, v := range newVolumes {
vi, err := storage.NewVolumeInfoFromShort(v)
if err != nil {
- glog.V(0).Infof("NewVolumeInfoFromShort %v: %v", v, err)
+ log.Infof("NewVolumeInfoFromShort %v: %v", v, err)
continue
}
newVis = append(newVis, vi)
@@ -239,7 +239,7 @@ func (t *Topology) IncrementalSyncDataNodeRegistration(newVolumes, deletedVolume
for _, v := range deletedVolumes {
vi, err := storage.NewVolumeInfoFromShort(v)
if err != nil {
- glog.V(0).Infof("NewVolumeInfoFromShort %v: %v", v, err)
+ log.Infof("NewVolumeInfoFromShort %v: %v", v, err)
continue
}
oldVis = append(oldVis, vi)
diff --git a/weed/topology/topology_ec.go b/weed/topology/topology_ec.go
index 93b39bb5d..79bc37f61 100644
--- a/weed/topology/topology_ec.go
+++ b/weed/topology/topology_ec.go
@@ -1,7 +1,7 @@
package topology
import (
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -110,7 +110,7 @@ func (t *Topology) RegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, d
}
func (t *Topology) UnRegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) {
- glog.Infof("removing ec shard info:%+v", ecShardInfos)
+ log.Infof("removing ec shard info:%+v", ecShardInfos)
t.ecShardMapLock.Lock()
defer t.ecShardMapLock.Unlock()
diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go
index 068bd401e..f06c333e4 100644
--- a/weed/topology/topology_event_handling.go
+++ b/weed/topology/topology_event_handling.go
@@ -5,7 +5,7 @@ import (
"math/rand"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
)
@@ -54,7 +54,7 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
}
func (t *Topology) UnRegisterDataNode(dn *DataNode) {
for _, v := range dn.GetVolumes() {
- glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id())
+ log.Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id())
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl)
vl.SetVolumeUnavailable(dn, v.Id)
}
diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go
index 7bf55d131..66b03f47d 100644
--- a/weed/topology/topology_vacuum.go
+++ b/weed/topology/topology_vacuum.go
@@ -9,7 +9,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/needle"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
)
@@ -37,7 +37,7 @@ func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vi
return nil
})
if err != nil {
- glog.V(0).Infof("Checking vacuuming %d on %s: %v", vid, url, err)
+ log.Infof("Checking vacuuming %d on %s: %v", vid, url, err)
}
}(index, dn.Url(), vid)
}
@@ -67,7 +67,7 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout,
ch := make(chan bool, locationlist.Length())
for index, dn := range locationlist.list {
go func(index int, url string, vid needle.VolumeId) {
- glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
+ log.Infoln(index, "Start vacuuming", vid, "on", url)
err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{
VolumeId: uint32(vid),
@@ -76,10 +76,10 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout,
return err
})
if err != nil {
- glog.Errorf("Error when vacuuming %d on %s: %v", vid, url, err)
+ log.Errorf("Error when vacuuming %d on %s: %v", vid, url, err)
ch <- false
} else {
- glog.V(0).Infof("Complete vacuuming %d on %s", vid, url)
+ log.Infof("Complete vacuuming %d on %s", vid, url)
ch <- true
}
}(index, dn.Url(), vid)
@@ -103,7 +103,7 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v
isCommitSuccess := true
isReadOnly := false
for _, dn := range locationlist.list {
- glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url())
+ log.Infoln("Start Committing vacuum", vid, "on", dn.Url())
err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
resp, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{
VolumeId: uint32(vid),
@@ -114,10 +114,10 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v
return err
})
if err != nil {
- glog.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err)
+ log.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err)
isCommitSuccess = false
} else {
- glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, dn.Url())
+ log.Infof("Complete Committing vacuum %d on %s", vid, dn.Url())
}
}
if isCommitSuccess {
@@ -129,7 +129,7 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v
}
func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) {
for _, dn := range locationlist.list {
- glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url())
+ log.Infoln("Start cleaning up", vid, "on", dn.Url())
err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{
VolumeId: uint32(vid),
@@ -137,9 +137,9 @@ func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout,
return err
})
if err != nil {
- glog.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, dn.Url(), err)
+ log.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, dn.Url(), err)
} else {
- glog.V(0).Infof("Complete cleaning up vacuum %d on %s", vid, dn.Url())
+ log.Infof("Complete cleaning up vacuum %d on %s", vid, dn.Url())
}
}
}
@@ -155,7 +155,7 @@ func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float
// now only one vacuum process going on
- glog.V(1).Infof("Start vacuum on demand with threshold: %f", garbageThreshold)
+ log.Debugf("Start vacuum on demand with threshold: %f", garbageThreshold)
for _, col := range t.collectionMap.Items() {
c := col.(*Collection)
for _, vl := range c.storageType2VolumeLayout.Items() {
@@ -187,7 +187,7 @@ func vacuumOneVolumeLayout(grpcDialOption grpc.DialOption, volumeLayout *VolumeL
continue
}
- glog.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid)
+ log.Debugf("check vacuum on collection:%s volume:%d", c.Name, vid)
if vacuumLocationList, needVacuum := batchVacuumVolumeCheck(
grpcDialOption, volumeLayout, vid, locationList, garbageThreshold); needVacuum {
if batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, vacuumLocationList, preallocate) {
diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go
index 58b5702bf..5298763b3 100644
--- a/weed/topology/volume_growth.go
+++ b/weed/topology/volume_growth.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
)
@@ -85,7 +85,7 @@ func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targe
if c, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil {
counter += c
} else {
- glog.V(0).Infof("create %d volume, created %d: %v", targetCount, counter, e)
+ log.Infof("create %d volume, created %d: %v", targetCount, counter, e)
return counter, e
}
}
@@ -221,9 +221,9 @@ func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid
}
server.AddOrUpdateVolume(vi)
topo.RegisterVolumeLayout(vi, server)
- glog.V(0).Infoln("Created Volume", vid, "on", server.NodeImpl.String())
+ log.Infoln("Created Volume", vid, "on", server.NodeImpl.String())
} else {
- glog.V(0).Infoln("Failed to assign volume", vid, "to", servers, "error", err)
+ log.Infoln("Failed to assign volume", vid, "to", servers, "error", err)
return fmt.Errorf("Failed to assign %d: %v", vid, err)
}
}
diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go
index ffe36e95b..5d2be671d 100644
--- a/weed/topology/volume_layout.go
+++ b/weed/topology/volume_layout.go
@@ -7,7 +7,7 @@ import (
"sync"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@@ -146,11 +146,11 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
vl.vid2location[v.Id] = NewVolumeLocationList()
}
vl.vid2location[v.Id].Set(dn)
- // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount())
+ // log.Tracef("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount())
for _, dn := range vl.vid2location[v.Id].list {
if vInfo, err := dn.GetVolumesById(v.Id); err == nil {
if vInfo.ReadOnly {
- glog.V(1).Infof("vid %d removed from writable", v.Id)
+ log.Debugf("vid %d removed from writable", v.Id)
vl.removeFromWritable(v.Id)
vl.readonlyVolumes.Add(v.Id, dn)
return
@@ -158,7 +158,7 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
vl.readonlyVolumes.Remove(v.Id, dn)
}
} else {
- glog.V(1).Infof("vid %d removed from writable", v.Id)
+ log.Debugf("vid %d removed from writable", v.Id)
vl.removeFromWritable(v.Id)
vl.readonlyVolumes.Remove(v.Id, dn)
return
@@ -269,7 +269,7 @@ func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*n
lenWriters := len(vl.writables)
if lenWriters <= 0 {
- glog.V(0).Infoln("No more writable volumes!")
+ log.Infoln("No more writable volumes!")
return nil, 0, nil, errors.New("No more writable volumes!")
}
if option.DataCenter == "" {
@@ -336,7 +336,7 @@ func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
}
}
if toDeleteIndex >= 0 {
- glog.V(0).Infoln("Volume", vid, "becomes unwritable")
+ log.Infoln("Volume", vid, "becomes unwritable")
vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...)
return true
}
@@ -348,7 +348,7 @@ func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool {
return false
}
}
- glog.V(0).Infoln("Volume", vid, "becomes writable")
+ log.Infoln("Volume", vid, "becomes writable")
vl.writables = append(vl.writables, vid)
return true
}
@@ -362,7 +362,7 @@ func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId)
vl.readonlyVolumes.Remove(vid, dn)
vl.oversizedVolumes.Remove(vid, dn)
if location.Length() < vl.rp.GetCopyCount() {
- glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount())
+ log.Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount())
return vl.removeFromWritable(vid)
}
}
@@ -400,7 +400,7 @@ func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
vl.accessLock.Lock()
defer vl.accessLock.Unlock()
- // glog.V(0).Infoln("Volume", vid, "reaches full capacity.")
+ // log.Infoln("Volume", vid, "reaches full capacity.")
return vl.removeFromWritable(vid)
}
diff --git a/weed/util/bounded_tree/bounded_tree.go b/weed/util/bounded_tree/bounded_tree.go
index 0e8af2520..d40b82a7f 100644
--- a/weed/util/bounded_tree/bounded_tree.go
+++ b/weed/util/bounded_tree/bounded_tree.go
@@ -3,7 +3,7 @@ package bounded_tree
import (
"sync"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -77,7 +77,7 @@ func (t *BoundedTree) ensureVisited(n *Node, currentPath util.FullPath, componen
children, err := visitFn(filerPath)
if err != nil {
- glog.V(0).Infof("failed to visit %s: %v", currentPath, err)
+ log.Infof("failed to visit %s: %v", currentPath, err)
return false, err
}
diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go
index 3615aee0e..887f39c3f 100644
--- a/weed/util/chunk_cache/chunk_cache.go
+++ b/weed/util/chunk_cache/chunk_cache.go
@@ -3,7 +3,7 @@ package chunk_cache
import (
"sync"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
@@ -60,7 +60,7 @@ func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byt
fid, err := needle.ParseFileIdFromString(fileId)
if err != nil {
- glog.Errorf("failed to parse file id %s", fileId)
+ log.Errorf("failed to parse file id %s", fileId)
return nil
}
@@ -94,7 +94,7 @@ func (c *TieredChunkCache) SetChunk(fileId string, data []byte) {
c.Lock()
defer c.Unlock()
- glog.V(4).Infof("SetChunk %s size %d\n", fileId, len(data))
+ log.Tracef("SetChunk %s size %d\n", fileId, len(data))
c.doSetChunk(fileId, data)
}
@@ -107,7 +107,7 @@ func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) {
fid, err := needle.ParseFileIdFromString(fileId)
if err != nil {
- glog.Errorf("failed to parse file id %s", fileId)
+ log.Errorf("failed to parse file id %s", fileId)
return
}
diff --git a/weed/util/chunk_cache/chunk_cache_on_disk.go b/weed/util/chunk_cache/chunk_cache_on_disk.go
index 356dfe188..1ca096031 100644
--- a/weed/util/chunk_cache/chunk_cache_on_disk.go
+++ b/weed/util/chunk_cache/chunk_cache_on_disk.go
@@ -7,7 +7,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/opt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/types"
@@ -63,7 +63,7 @@ func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCac
return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err)
}
- glog.V(1).Infoln("loading leveldb", v.fileName+".ldb")
+ log.Debug("loading leveldb", v.fileName+".ldb")
opts := &opt.Options{
BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
diff --git a/weed/util/chunk_cache/on_disk_cache_layer.go b/weed/util/chunk_cache/on_disk_cache_layer.go
index eebd89798..33f877d3a 100644
--- a/weed/util/chunk_cache/on_disk_cache_layer.go
+++ b/weed/util/chunk_cache/on_disk_cache_layer.go
@@ -5,7 +5,7 @@ import (
"path"
"sort"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -26,7 +26,7 @@ func NewOnDiskCacheLayer(dir, namePrefix string, diskSize int64, segmentCount in
fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i))
diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize)
if err != nil {
- glog.Errorf("failed to add cache %s : %v", fileName, err)
+ log.Errorf("failed to add cache %s : %v", fileName, err)
} else {
c.diskCaches = append(c.diskCaches, diskCache)
}
@@ -45,7 +45,7 @@ func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) {
if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit {
t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset()
if resetErr != nil {
- glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName)
+ log.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName)
return
}
for i := len(c.diskCaches) - 1; i > 0; i-- {
@@ -55,7 +55,7 @@ func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) {
}
if err := c.diskCaches[0].WriteNeedle(needleId, data); err != nil {
- glog.V(0).Infof("cache write %v size %d: %v", needleId, len(data), err)
+ log.Infof("cache write %v size %d: %v", needleId, len(data), err)
}
}
@@ -70,7 +70,7 @@ func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) {
continue
}
if err != nil {
- glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
+ log.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
continue
}
if len(data) != 0 {
diff --git a/weed/util/cipher.go b/weed/util/cipher.go
index f044c2ca3..6e9153015 100644
--- a/weed/util/cipher.go
+++ b/weed/util/cipher.go
@@ -7,7 +7,7 @@ import (
"errors"
"io"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
type CipherKey []byte
@@ -15,7 +15,7 @@ type CipherKey []byte
func GenCipherKey() CipherKey {
key := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, key); err != nil {
- glog.Fatalf("random key gen: %v", err)
+ log.Fatalf("random key gen: %v", err)
}
return CipherKey(key)
}
diff --git a/weed/util/compression.go b/weed/util/compression.go
index cf3ac7c57..c5ef84484 100644
--- a/weed/util/compression.go
+++ b/weed/util/compression.go
@@ -8,7 +8,7 @@ import (
"io/ioutil"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/klauspost/compress/zstd"
)
@@ -34,7 +34,7 @@ func MaybeDecompressData(input []byte) []byte {
uncompressed, err := DecompressData(input)
if err != nil {
if err != UnsupportedCompression {
- glog.Errorf("decompressed data: %v", err)
+ log.Errorf("decompressed data: %v", err)
}
return input
}
@@ -45,11 +45,11 @@ func GzipData(input []byte) ([]byte, error) {
buf := new(bytes.Buffer)
w, _ := gzip.NewWriterLevel(buf, flate.BestSpeed)
if _, err := w.Write(input); err != nil {
- glog.V(2).Infof("error gzip data: %v", err)
+ log.Debugf("error gzip data: %v", err)
return nil, err
}
if err := w.Close(); err != nil {
- glog.V(2).Infof("error closing gzipped data: %v", err)
+ log.Debugf("error closing gzipped data: %v", err)
return nil, err
}
return buf.Bytes(), nil
@@ -77,7 +77,7 @@ func ungzipData(input []byte) ([]byte, error) {
defer r.Close()
output, err := ioutil.ReadAll(r)
if err != nil {
- glog.V(2).Infof("error ungzip data: %v", err)
+ log.Debugf("error ungzip data: %v", err)
}
return output, err
}
diff --git a/weed/util/config.go b/weed/util/config.go
index 6acf21c12..9d7552064 100644
--- a/weed/util/config.go
+++ b/weed/util/config.go
@@ -5,7 +5,7 @@ import (
"github.com/spf13/viper"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
type Configuration interface {
@@ -24,16 +24,16 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) {
viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths
viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in
- glog.V(1).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed())
+ log.Debugf("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed())
if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file
- logLevel := glog.Level(0)
if strings.Contains(err.Error(), "Not Found") {
- logLevel = 1
+ log.Warnf("Reading %s: %v", viper.ConfigFileUsed(), err)
+ } else {
+ log.Infof("Reading %s: %v", viper.ConfigFileUsed(), err)
}
- glog.V(logLevel).Infof("Reading %s: %v", viper.ConfigFileUsed(), err)
if required {
- glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+
+ log.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+
"\n\nPlease use this command to generate the default %s.toml file\n"+
" weed scaffold -config=%s -output=.\n\n\n",
configFileName, configFileName, configFileName)
diff --git a/weed/util/file_util.go b/weed/util/file_util.go
index 70135180d..bdc66afa4 100644
--- a/weed/util/file_util.go
+++ b/weed/util/file_util.go
@@ -8,7 +8,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func TestFolderWritable(folder string) (err error) {
@@ -20,7 +20,7 @@ func TestFolderWritable(folder string) (err error) {
return errors.New("Not a valid folder!")
}
perm := fileInfo.Mode().Perm()
- glog.V(0).Infoln("Folder", folder, "Permission:", perm)
+ log.Infoln("Folder", folder, "Permission:", perm)
if 0200&perm != 0 {
return nil
}
@@ -53,7 +53,7 @@ func CheckFile(filename string) (exists, canRead, canWrite bool, modTime time.Ti
return
}
if err != nil {
- glog.Errorf("check %s: %v", filename, err)
+ log.Errorf("check %s: %v", filename, err)
return
}
if fi.Mode()&0400 != 0 {
diff --git a/weed/util/grace/pprof.go b/weed/util/grace/pprof.go
index 14686bfc8..68c3920af 100644
--- a/weed/util/grace/pprof.go
+++ b/weed/util/grace/pprof.go
@@ -5,14 +5,14 @@ import (
"runtime"
"runtime/pprof"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func SetupProfiling(cpuProfile, memProfile string) {
if cpuProfile != "" {
f, err := os.Create(cpuProfile)
if err != nil {
- glog.Fatal(err)
+ log.Fatal(err)
}
pprof.StartCPUProfile(f)
OnInterrupt(func() {
@@ -23,7 +23,7 @@ func SetupProfiling(cpuProfile, memProfile string) {
runtime.MemProfileRate = 1
f, err := os.Create(memProfile)
if err != nil {
- glog.Fatal(err)
+ log.Fatal(err)
}
OnInterrupt(func() {
pprof.WriteHeapProfile(f)
diff --git a/weed/util/http_util.go b/weed/util/http_util.go
index 7851d8293..9887fd312 100644
--- a/weed/util/http_util.go
+++ b/weed/util/http_util.go
@@ -11,7 +11,7 @@ import (
"net/url"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
var (
@@ -251,7 +251,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC
// drains the response body to avoid memory leak
data, _ := ioutil.ReadAll(reader)
if len(data) != 0 {
- glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data))
+ log.Debugf("%s reader has remaining %d bytes", contentEncoding, len(data))
}
return n, err
}
@@ -323,7 +323,7 @@ func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentCompressed bool
if isContentCompressed {
decryptedData, err = DecompressData(decryptedData)
if err != nil {
- glog.V(0).Infof("unzip decrypt %s: %v", fileUrl, err)
+ log.Infof("unzip decrypt %s: %v", fileUrl, err)
}
}
if len(decryptedData) < int(offset)+size {
diff --git a/weed/util/log/logger.go b/weed/util/log/logger.go
new file mode 100644
index 000000000..54c4f35ff
--- /dev/null
+++ b/weed/util/log/logger.go
@@ -0,0 +1,91 @@
+package log
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ LogLevel = "LOG"
+)
+
+var (
+ logger = logrus.New()
+)
+
+func init() {
+ envLevel, _ := os.LookupEnv(LogLevel)
+ if envLevel == "" {
+ envLevel = "info"
+ }
+ level := logLevel(envLevel)
+ logger.SetLevel(level)
+ formatter := &logrus.TextFormatter{
+ FullTimestamp: false,
+ DisableLevelTruncation: true,
+ }
+ logger.SetFormatter(formatter)
+
+}
+
+func logLevel(lvl string) logrus.Level {
+
+ switch lvl {
+ case "trace":
+ return logrus.TraceLevel
+ case "debug":
+ return logrus.DebugLevel
+ case "info":
+ return logrus.InfoLevel
+ case "warn":
+ return logrus.WarnLevel
+ case "error":
+ return logrus.ErrorLevel
+ case "fatal":
+ return logrus.FatalLevel
+ default:
+ panic(fmt.Sprintf("the specified %s log level is not supported. Use [trace|debug|info|warn|error|fatal]", lvl))
+ }
+}
+
+func Info(values ...interface{}) {
+ logger.Info(values...)
+}
+func Infof(fmt string, values ...interface{}) {
+ logger.Infof(fmt, values...)
+}
+func Infoln(values ...interface{}) {
+ logger.Infoln(values...)
+}
+func Debugf(fmt string, values ...interface{}) {
+ logger.Debugf(fmt, values...)
+}
+func Debug(values ...interface{}) {
+ logger.Debug(values...)
+}
+func Tracef(fmt string, values ...interface{}) {
+ logger.Tracef(fmt, values...)
+}
+func Trace(values ...interface{}) {
+ logger.Trace(values...)
+}
+func Warnf(fmt string, values ...interface{}) {
+ logger.Warnf(fmt, values...)
+}
+func Fatalf(fmt string, values ...interface{}) {
+ logger.Fatalf(fmt, values...)
+}
+func Errorf(fmt string, values ...interface{}) {
+ logger.Errorf(fmt, values...)
+}
+func Error(values ...interface{}) {
+ logger.Error(values...)
+}
+func Fatal(values ...interface{}) {
+ logger.Fatal(values...)
+}
+func IsTrace() bool {
+ return logger.IsLevelEnabled(logrus.TraceLevel)
+}
diff --git a/weed/util/log_buffer/log_buffer.go b/weed/util/log_buffer/log_buffer.go
index e4310b5c5..a122799a1 100644
--- a/weed/util/log_buffer/log_buffer.go
+++ b/weed/util/log_buffer/log_buffer.go
@@ -7,7 +7,7 @@ import (
"github.com/golang/protobuf/proto"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -189,7 +189,7 @@ func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Bu
return nil
}
if lastReadTime.After(m.stopTime) {
- // glog.Fatalf("unexpected last read time %v, older than latest %v", lastReadTime, m.stopTime)
+ // log.Fatalf("unexpected last read time %v, older than latest %v", lastReadTime, m.stopTime)
return nil
}
if lastReadTime.Before(m.startTime) {
@@ -279,7 +279,7 @@ func readTs(buf []byte, pos int) (size int, ts int64) {
err := proto.Unmarshal(entryData, logEntry)
if err != nil {
- glog.Fatalf("unexpected unmarshal filer_pb.LogEntry: %v", err)
+ log.Fatalf("unexpected unmarshal filer_pb.LogEntry: %v", err)
}
return size, logEntry.TsNs
diff --git a/weed/util/log_buffer/log_read.go b/weed/util/log_buffer/log_read.go
index 57f4b0115..47c76d570 100644
--- a/weed/util/log_buffer/log_read.go
+++ b/weed/util/log_buffer/log_read.go
@@ -7,7 +7,7 @@ import (
"github.com/golang/protobuf/proto"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -55,14 +55,14 @@ func (logBuffer *LogBuffer) LoopProcessLogData(
size := util.BytesToUint32(buf[pos : pos+4])
if pos+4+int(size) > len(buf) {
err = ResumeError
- glog.Errorf("LoopProcessLogData: read buffer %v read %d [%d,%d) from [0,%d)", lastReadTime, batchSize, pos, pos+int(size)+4, len(buf))
+ log.Errorf("LoopProcessLogData: read buffer %v read %d [%d,%d) from [0,%d)", lastReadTime, batchSize, pos, pos+int(size)+4, len(buf))
return
}
entryData := buf[pos+4 : pos+4+int(size)]
logEntry := &filer_pb.LogEntry{}
if err = proto.Unmarshal(entryData, logEntry); err != nil {
- glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
+ log.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
pos += 4 + int(size)
continue
}
diff --git a/weed/util/network.go b/weed/util/network.go
index 7108cfea6..c1b7a08ad 100644
--- a/weed/util/network.go
+++ b/weed/util/network.go
@@ -3,13 +3,13 @@ package util
import (
"net"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func DetectedHostAddress() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
- glog.V(0).Infof("failed to detect ip address: %v", err)
+ log.Infof("failed to detect ip address: %v", err)
return ""
}
diff --git a/weed/util/retry.go b/weed/util/retry.go
index 85c4d150d..c1063685b 100644
--- a/weed/util/retry.go
+++ b/weed/util/retry.go
@@ -4,7 +4,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
var RetryWaitTime = 6 * time.Second
@@ -16,13 +16,13 @@ func Retry(name string, job func() error) (err error) {
err = job()
if err == nil {
if hasErr {
- glog.V(0).Infof("retry %s successfully", name)
+ log.Infof("retry %s successfully", name)
}
break
}
if strings.Contains(err.Error(), "transport") {
hasErr = true
- glog.V(0).Infof("retry %s", name)
+ log.Infof("retry %s", name)
time.Sleep(waitTime)
waitTime += waitTime / 2
}
diff --git a/weed/util/throttler.go b/weed/util/throttler.go
index 873161e37..bac899658 100644
--- a/weed/util/throttler.go
+++ b/weed/util/throttler.go
@@ -25,7 +25,7 @@ func (wt *WriteThrottler) MaybeSlowdown(delta int64) {
if overLimitBytes > 0 {
overRatio := float64(overLimitBytes) / float64(wt.compactionBytePerSecond)
sleepTime := time.Duration(overRatio*1000) * time.Millisecond
- // glog.V(0).Infof("currently %d bytes, limit to %d bytes, over by %d bytes, sleeping %v over %.4f", wt.lastSizeCounter, wt.compactionBytePerSecond/10, overLimitBytes, sleepTime, overRatio)
+ // log.Infof("currently %d bytes, limit to %d bytes, over by %d bytes, sleeping %v over %.4f", wt.lastSizeCounter, wt.compactionBytePerSecond/10, overLimitBytes, sleepTime, overRatio)
time.Sleep(sleepTime)
}
wt.lastSizeCounter, wt.lastSizeCheckTime = 0, time.Now()
diff --git a/weed/wdclient/exclusive_locks/exclusive_locker.go b/weed/wdclient/exclusive_locks/exclusive_locker.go
index d477a6b2d..b10168ea5 100644
--- a/weed/wdclient/exclusive_locks/exclusive_locker.go
+++ b/weed/wdclient/exclusive_locks/exclusive_locker.go
@@ -5,7 +5,7 @@ import (
"sync/atomic"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
@@ -91,7 +91,7 @@ func (l *ExclusiveLocker) RequestLock() {
}
return err
}); err != nil {
- glog.Errorf("failed to renew lock: %v", err)
+ log.Errorf("failed to renew lock: %v", err)
return
} else {
time.Sleep(RenewInteval)
diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go
index e39b9dfdf..8c863391a 100644
--- a/weed/wdclient/masterclient.go
+++ b/weed/wdclient/masterclient.go
@@ -8,7 +8,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
)
@@ -46,7 +46,7 @@ func (mc *MasterClient) WaitUntilConnected() {
}
func (mc *MasterClient) KeepConnectedToMaster() {
- glog.V(1).Infof("%s masterClient bootstraps with masters %v", mc.clientType, mc.masters)
+ log.Debugf("%s masterClient bootstraps with masters %v", mc.clientType, mc.masters)
for {
mc.tryAllMasters()
time.Sleep(time.Second)
@@ -68,14 +68,14 @@ func (mc *MasterClient) FindLeaderFromOtherPeers(myMasterAddress string) (leader
leader = resp.Leader
return nil
}); grpcErr != nil {
- glog.V(0).Infof("connect to %s: %v", master, grpcErr)
+ log.Infof("connect to %s: %v", master, grpcErr)
}
if leader != "" {
- glog.V(0).Infof("existing leader is %s", leader)
+ log.Infof("existing leader is %s", leader)
return
}
}
- glog.V(0).Infof("No existing leader found!")
+ log.Infof("No existing leader found!")
return
}
@@ -94,7 +94,7 @@ func (mc *MasterClient) tryAllMasters() {
}
func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader string) {
- glog.V(1).Infof("%s masterClient Connecting to master %v", mc.clientType, master)
+ log.Debugf("%s masterClient Connecting to master %v", mc.clientType, master)
gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
ctx, cancel := context.WithCancel(context.Background())
@@ -102,28 +102,28 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri
stream, err := client.KeepConnected(ctx)
if err != nil {
- glog.V(1).Infof("%s masterClient failed to keep connected to %s: %v", mc.clientType, master, err)
+ log.Debugf("%s masterClient failed to keep connected to %s: %v", mc.clientType, master, err)
return err
}
if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.clientType, GrpcPort: mc.grpcPort}); err != nil {
- glog.V(0).Infof("%s masterClient failed to send to %s: %v", mc.clientType, master, err)
+ log.Infof("%s masterClient failed to send to %s: %v", mc.clientType, master, err)
return err
}
- glog.V(1).Infof("%s masterClient Connected to %v", mc.clientType, master)
+ log.Debugf("%s masterClient Connected to %v", mc.clientType, master)
mc.currentMaster = master
for {
volumeLocation, err := stream.Recv()
if err != nil {
- glog.V(0).Infof("%s masterClient failed to receive from %s: %v", mc.clientType, master, err)
+ log.Infof("%s masterClient failed to receive from %s: %v", mc.clientType, master, err)
return err
}
// maybe the leader is changed
if volumeLocation.Leader != "" {
- glog.V(0).Infof("redirected to leader %v", volumeLocation.Leader)
+ log.Infof("redirected to leader %v", volumeLocation.Leader)
nextHintedLeader = volumeLocation.Leader
return nil
}
@@ -135,18 +135,18 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri
DataCenter: volumeLocation.DataCenter,
}
for _, newVid := range volumeLocation.NewVids {
- glog.V(1).Infof("%s: %s masterClient adds volume %d", mc.clientType, loc.Url, newVid)
+ log.Debugf("%s: %s masterClient adds volume %d", mc.clientType, loc.Url, newVid)
mc.addLocation(newVid, loc)
}
for _, deletedVid := range volumeLocation.DeletedVids {
- glog.V(1).Infof("%s: %s masterClient removes volume %d", mc.clientType, loc.Url, deletedVid)
+ log.Debugf("%s: %s masterClient removes volume %d", mc.clientType, loc.Url, deletedVid)
mc.deleteLocation(deletedVid, loc)
}
}
})
if gprcErr != nil {
- glog.V(1).Infof("%s masterClient failed to connect with master %v: %v", mc.clientType, master, gprcErr)
+ log.Debugf("%s masterClient failed to connect with master %v: %v", mc.clientType, master, gprcErr)
}
return
}
diff --git a/weed/wdclient/vid_map.go b/weed/wdclient/vid_map.go
index 773da0191..8f9baa62d 100644
--- a/weed/wdclient/vid_map.go
+++ b/weed/wdclient/vid_map.go
@@ -8,7 +8,7 @@ import (
"sync"
"sync/atomic"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
const (
@@ -49,7 +49,7 @@ func (vc *vidMap) getLocationIndex(length int) (int, error) {
func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrls []string, err error) {
id, err := strconv.Atoi(vid)
if err != nil {
- glog.V(1).Infof("Unknown volume id %s", vid)
+ log.Debugf("Unknown volume id %s", vid)
return nil, err
}
@@ -85,7 +85,7 @@ func (vc *vidMap) LookupFileId(fileId string) (fullUrls []string, err error) {
func (vc *vidMap) GetVidLocations(vid string) (locations []Location, err error) {
id, err := strconv.Atoi(vid)
if err != nil {
- glog.V(1).Infof("Unknown volume id %s", vid)
+ log.Debugf("Unknown volume id %s", vid)
return nil, fmt.Errorf("Unknown volume id %s", vid)
}
foundLocations, found := vc.GetLocations(uint32(id))
diff --git a/weed/weed.go b/weed/weed.go
index ecb0ba2a4..b1b19cecc 100644
--- a/weed/weed.go
+++ b/weed/weed.go
@@ -17,7 +17,7 @@ import (
"unicode/utf8"
"github.com/chrislusf/seaweedfs/weed/command"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
var IsDebug *bool
@@ -36,7 +36,7 @@ func setExitStatus(n int) {
}
func main() {
- glog.MaxSize = 1024 * 1024 * 32
+
rand.Seed(time.Now().UnixNano())
flag.Usage = usage
flag.Parse()
@@ -123,7 +123,7 @@ func printUsage(w io.Writer) {
func usage() {
printUsage(os.Stderr)
- fmt.Fprintf(os.Stderr, "For Logging, use \"weed [logging_options] [command]\". The logging options are:\n")
+ fmt.Fprintf(os.Stderr, "For Logging, use \"LOG=[trace|debug|info|warn|error|fatal] weed [command]\".\n")
flag.PrintDefaults()
os.Exit(2)
}
@@ -168,5 +168,5 @@ func exit() {
}
func debug(params ...interface{}) {
- glog.V(4).Infoln(params...)
+ log.Trace(params...)
}