diff options
Diffstat (limited to 'weed/util')
| -rw-r--r-- | weed/util/chunk_cache/chunk_cache.go | 18 | ||||
| -rw-r--r-- | weed/util/chunk_cache/chunk_cache_on_disk.go | 8 | ||||
| -rw-r--r-- | weed/util/chunk_cache/on_disk_cache_layer.go | 14 | ||||
| -rw-r--r-- | weed/util/cipher.go | 4 | ||||
| -rw-r--r-- | weed/util/compression.go | 4 | ||||
| -rw-r--r-- | weed/util/config.go | 10 | ||||
| -rw-r--r-- | weed/util/file_util.go | 6 | ||||
| -rw-r--r-- | weed/util/grace/pprof.go | 6 | ||||
| -rw-r--r-- | weed/util/grace/signal_handling.go | 4 | ||||
| -rw-r--r-- | weed/util/http/http_global_client_init.go | 4 | ||||
| -rw-r--r-- | weed/util/http/http_global_client_util.go | 14 | ||||
| -rw-r--r-- | weed/util/lock_table.go | 8 | ||||
| -rw-r--r-- | weed/util/log/README.md | 98 | ||||
| -rw-r--r-- | weed/util/log/log.go | 239 | ||||
| -rw-r--r-- | weed/util/log_buffer/log_buffer.go | 24 | ||||
| -rw-r--r-- | weed/util/log_buffer/log_read.go | 14 | ||||
| -rw-r--r-- | weed/util/minfreespace.go | 4 | ||||
| -rw-r--r-- | weed/util/net_timeout.go | 4 | ||||
| -rw-r--r-- | weed/util/network.go | 6 | ||||
| -rw-r--r-- | weed/util/retry.go | 12 | ||||
| -rw-r--r-- | weed/util/skiplist/name_batch.go | 4 | ||||
| -rw-r--r-- | weed/util/skiplist/name_list_serde.go | 6 | ||||
| -rw-r--r-- | weed/util/throttler.go | 2 |
23 files changed, 425 insertions, 88 deletions
diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go index 7eee41b9b..b0affbcd4 100644 --- a/weed/util/chunk_cache/chunk_cache.go +++ b/weed/util/chunk_cache/chunk_cache.go @@ -4,7 +4,7 @@ import ( "errors" "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/needle" ) @@ -66,13 +66,13 @@ func (c *TieredChunkCache) IsInCache(fileId string, lockNeeded bool) (answer boo item := c.memCache.cache.Get(fileId) if item != nil { - glog.V(4).Infof("fileId %s is in memcache", fileId) + log.V(-1).Infof("fileId %s is in memcache", fileId) return true } fid, err := needle.ParseFileIdFromString(fileId) if err != nil { - glog.V(4).Infof("failed to parse file id %s", fileId) + log.V(-1).Infof("failed to parse file id %s", fileId) return false } @@ -80,7 +80,7 @@ func (c *TieredChunkCache) IsInCache(fileId string, lockNeeded bool) (answer boo for k, v := range diskCacheLayer.diskCaches { _, ok := v.nm.Get(fid.Key) if ok { - glog.V(4).Infof("fileId %s is in diskCaches[%d].volume[%d]", fileId, i, k) + log.V(-1).Infof("fileId %s is in diskCaches[%d].volume[%d]", fileId, i, k) return true } } @@ -100,7 +100,7 @@ func (c *TieredChunkCache) ReadChunkAt(data []byte, fileId string, offset uint64 if minSize <= c.onDiskCacheSizeLimit0 { n, err = c.memCache.readChunkAt(data, fileId, offset) if err != nil { - glog.Errorf("failed to read from memcache: %s", err) + log.Errorf("failed to read from memcache: %s", err) } if n == int(len(data)) { return n, nil @@ -109,7 +109,7 @@ func (c *TieredChunkCache) ReadChunkAt(data []byte, fileId string, offset uint64 fid, err := needle.ParseFileIdFromString(fileId) if err != nil { - glog.Errorf("failed to parse file id %s", fileId) + log.Errorf("failed to parse file id %s", fileId) return 0, nil } @@ -143,9 +143,9 @@ func (c *TieredChunkCache) SetChunk(fileId string, data []byte) { c.Lock() defer c.Unlock() - glog.V(4).Infof("SetChunk %s size %d\n", fileId, len(data)) + log.V(-1).Infof("SetChunk %s size %d\n", fileId, len(data)) if c.IsInCache(fileId, false) { - glog.V(4).Infof("fileId %s is already in cache", fileId) + log.V(-1).Infof("fileId %s is already in cache", fileId) return } @@ -160,7 +160,7 @@ func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) { fid, err := needle.ParseFileIdFromString(fileId) if err != nil { - glog.Errorf("failed to parse file id %s", fileId) + log.Errorf("failed to parse file id %s", fileId) return } diff --git a/weed/util/chunk_cache/chunk_cache_on_disk.go b/weed/util/chunk_cache/chunk_cache_on_disk.go index 87f05d399..c67e51f27 100644 --- a/weed/util/chunk_cache/chunk_cache_on_disk.go +++ b/weed/util/chunk_cache/chunk_cache_on_disk.go @@ -7,7 +7,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -63,7 +63,7 @@ func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCac return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err) } - glog.V(1).Infoln("loading leveldb", v.fileName+".ldb") + log.V(2).Infoln("loading leveldb", v.fileName+".ldb") opts := &opt.Options{ BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB @@ -92,9 +92,9 @@ func (v *ChunkCacheVolume) doReset() { v.Shutdown() os.Truncate(v.fileName+".dat", 0) os.Truncate(v.fileName+".idx", 0) - glog.V(4).Infof("cache removeAll %s ...", v.fileName+".ldb") + log.V(-1).Infof("cache removeAll %s ...", v.fileName+".ldb") os.RemoveAll(v.fileName + ".ldb") - glog.V(4).Infof("cache removed %s", v.fileName+".ldb") + log.V(-1).Infof("cache removed %s", v.fileName+".ldb") } func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) { diff --git a/weed/util/chunk_cache/on_disk_cache_layer.go b/weed/util/chunk_cache/on_disk_cache_layer.go index fdbaef7c2..af1d79b42 100644 --- a/weed/util/chunk_cache/on_disk_cache_layer.go +++ b/weed/util/chunk_cache/on_disk_cache_layer.go @@ -2,7 +2,7 @@ package chunk_cache import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/types" "path" @@ -25,7 +25,7 @@ func NewOnDiskCacheLayer(dir, namePrefix string, diskSize int64, segmentCount in fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i)) diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize) if err != nil { - glog.Errorf("failed to add cache %s : %v", fileName, err) + log.Errorf("failed to add cache %s : %v", fileName, err) } else { c.diskCaches = append(c.diskCaches, diskCache) } @@ -47,7 +47,7 @@ func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) { if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit { t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset() if resetErr != nil { - glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName) + log.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName) return } for i := len(c.diskCaches) - 1; i > 0; i-- { @@ -57,7 +57,7 @@ func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) { } if err := c.diskCaches[0].WriteNeedle(needleId, data); err != nil { - glog.V(0).Infof("cache write %v size %d: %v", needleId, len(data), err) + log.V(3).Infof("cache write %v size %d: %v", needleId, len(data), err) } } @@ -72,7 +72,7 @@ func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) { continue } if err != nil { - glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId) + log.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId) continue } if len(data) != 0 { @@ -94,7 +94,7 @@ func (c *OnDiskCacheLayer) getChunkSlice(needleId types.NeedleId, offset, length continue } if err != nil { - glog.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err) + log.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err) continue } if len(data) != 0 { @@ -114,7 +114,7 @@ func (c *OnDiskCacheLayer) readChunkAt(buffer []byte, needleId types.NeedleId, o continue } if err != nil { - glog.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err) + log.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err) continue } if n > 0 { diff --git a/weed/util/cipher.go b/weed/util/cipher.go index f625f885e..b3f228fb3 100644 --- a/weed/util/cipher.go +++ b/weed/util/cipher.go @@ -7,7 +7,7 @@ import ( "errors" "io" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) type CipherKey []byte @@ -15,7 +15,7 @@ type CipherKey []byte func GenCipherKey() CipherKey { key := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, key); err != nil { - glog.Fatalf("random key gen: %v", err) + log.Fatalf("random key gen: %v", err) } return CipherKey(key) } diff --git a/weed/util/compression.go b/weed/util/compression.go index d62ba9088..9bd3d6fbb 100644 --- a/weed/util/compression.go +++ b/weed/util/compression.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" // "github.com/klauspost/compress/zstd" ) @@ -31,7 +31,7 @@ func MaybeDecompressData(input []byte) []byte { uncompressed, err := DecompressData(input) if err != nil { if err != UnsupportedCompression { - glog.Errorf("decompressed data: %v", err) + log.Errorf("decompressed data: %v", err) } return input } diff --git a/weed/util/config.go b/weed/util/config.go index e5b32d512..0f56203fc 100644 --- a/weed/util/config.go +++ b/weed/util/config.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/viper" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) var ( @@ -50,12 +50,12 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file if strings.Contains(err.Error(), "Not Found") { - glog.V(1).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) + log.V(2).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) } else { - glog.Fatalf("Reading %s: %v", viper.ConfigFileUsed(), err) + log.Fatalf("Reading %s: %v", viper.ConfigFileUsed(), err) } if required { - glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ + log.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ "\n\nPlease use this command to generate the default %s.toml file\n"+ " weed scaffold -config=%s -output=.\n\n\n", configFileName, configFileName, configFileName) @@ -63,7 +63,7 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { return false } } - glog.V(1).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed()) + log.V(2).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed()) return true } diff --git a/weed/util/file_util.go b/weed/util/file_util.go index 430b6bc86..f3818725d 100644 --- a/weed/util/file_util.go +++ b/weed/util/file_util.go @@ -5,7 +5,7 @@ import ( "crypto/sha256" "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "os" "os/user" "path/filepath" @@ -24,7 +24,7 @@ func TestFolderWritable(folder string) (err error) { return errors.New("Not a valid folder!") } perm := fileInfo.Mode().Perm() - glog.V(0).Infoln("Folder", folder, "Permission:", perm) + log.V(3).Infoln("Folder", folder, "Permission:", perm) if 0200&perm != 0 { return nil } @@ -67,7 +67,7 @@ func CheckFile(filename string) (exists, canRead, canWrite bool, modTime time.Ti return } if err != nil { - glog.Errorf("check %s: %v", filename, err) + log.Errorf("check %s: %v", filename, err) return } if fi.Mode()&0400 != 0 { diff --git a/weed/util/grace/pprof.go b/weed/util/grace/pprof.go index 620184c9b..6dd2bb5d0 100644 --- a/weed/util/grace/pprof.go +++ b/weed/util/grace/pprof.go @@ -5,14 +5,14 @@ import ( "runtime" "runtime/pprof" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func SetupProfiling(cpuProfile, memProfile string) { if cpuProfile != "" { f, err := os.Create(cpuProfile) if err != nil { - glog.Fatal(err) + log.Fatal(err) } runtime.SetBlockProfileRate(1) runtime.SetMutexProfileFraction(1) @@ -44,7 +44,7 @@ func SetupProfiling(cpuProfile, memProfile string) { runtime.MemProfileRate = 1 f, err := os.Create(memProfile) if err != nil { - glog.Fatal(err) + log.Fatal(err) } OnInterrupt(func() { pprof.WriteHeapProfile(f) diff --git a/weed/util/grace/signal_handling.go b/weed/util/grace/signal_handling.go index 0fc0f43e1..4e6ef456f 100644 --- a/weed/util/grace/signal_handling.go +++ b/weed/util/grace/signal_handling.go @@ -4,7 +4,7 @@ package grace import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "os" "os/signal" "reflect" @@ -45,7 +45,7 @@ func init() { } else { interruptHookLock.RLock() for _, hook := range interruptHooks { - glog.V(4).Infof("exec interrupt hook func name:%s", GetFunctionName(hook)) + log.V(-1).Infof("exec interrupt hook func name:%s", GetFunctionName(hook)) hook() } interruptHookLock.RUnlock() diff --git a/weed/util/http/http_global_client_init.go b/weed/util/http/http_global_client_init.go index 0dcb05cfd..d4f4d752c 100644 --- a/weed/util/http/http_global_client_init.go +++ b/weed/util/http/http_global_client_init.go @@ -1,7 +1,7 @@ package http import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" util_http_client "github.com/seaweedfs/seaweedfs/weed/util/http/client" ) @@ -22,6 +22,6 @@ func InitGlobalHttpClient() { globalHttpClient, err = NewGlobalHttpClient() if err != nil { - glog.Fatalf("error init global http client: %v", err) + log.Fatalf("error init global http client: %v", err) } } diff --git a/weed/util/http/http_global_client_util.go b/weed/util/http/http_global_client_util.go index 33d978d9e..e2e6dfbba 100644 --- a/weed/util/http/http_global_client_util.go +++ b/weed/util/http/http_global_client_util.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) var ErrNotFound = fmt.Errorf("not found") @@ -281,7 +281,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC // drains the response body to avoid memory leak data, _ := io.ReadAll(reader) if len(data) != 0 { - glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data)) + log.V(2).Infof("%s reader has remaining %d bytes", contentEncoding, len(data)) } return n, err } @@ -363,7 +363,7 @@ func readEncryptedUrl(fileUrl, jwt string, cipherKey []byte, isContentCompressed if isContentCompressed { decryptedData, err = util.DecompressData(decryptedData) if err != nil { - glog.V(0).Infof("unzip decrypt %s: %v", fileUrl, err) + log.V(3).Infof("unzip decrypt %s: %v", fileUrl, err) } } if len(decryptedData) < int(offset)+size { @@ -423,7 +423,7 @@ func CloseResponse(resp *http.Response) { io.Copy(io.Discard, reader) resp.Body.Close() if reader.BytesRead > 0 { - glog.V(1).Infof("response leftover %d bytes", reader.BytesRead) + log.V(2).Infof("response leftover %d bytes", reader.BytesRead) } } @@ -432,7 +432,7 @@ func CloseRequest(req *http.Request) { io.Copy(io.Discard, reader) req.Body.Close() if reader.BytesRead > 0 { - glog.V(1).Infof("request leftover %d bytes", reader.BytesRead) + log.V(2).Infof("request leftover %d bytes", reader.BytesRead) } } @@ -467,13 +467,13 @@ func RetriedFetchChunkData(buffer []byte, urlStrings []string, cipherKey []byte, break } if err != nil { - glog.V(0).Infof("read %s failed, err: %v", urlString, err) + log.V(3).Infof("read %s failed, err: %v", urlString, err) } else { break } } if err != nil && shouldRetry { - glog.V(0).Infof("retry reading in %v", waitTime) + log.V(3).Infof("retry reading in %v", waitTime) time.Sleep(waitTime) } else { break diff --git a/weed/util/lock_table.go b/weed/util/lock_table.go index a932ae5b1..5c0e11368 100644 --- a/weed/util/lock_table.go +++ b/weed/util/lock_table.go @@ -5,7 +5,7 @@ import ( "sync" "sync/atomic" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) // LockTable is a table of locks that can be acquired. @@ -70,7 +70,7 @@ func (lt *LockTable[T]) AcquireLock(intention string, key T, lockType LockType) // If the lock is held exclusively, wait entry.mu.Lock() if len(entry.waiters) > 0 || lockType == ExclusiveLock || entry.activeExclusiveLockOwnerCount > 0 { - if glog.V(4) { + if log.V(-1).Info != nil { fmt.Printf("ActiveLock %d %s wait for %+v type=%v with waiters %d active r%d w%d.\n", lock.ID, lock.intention, key, lockType, len(entry.waiters), entry.activeSharedLockOwnerCount, entry.activeExclusiveLockOwnerCount) if len(entry.waiters) > 0 { for _, waiter := range entry.waiters { @@ -97,7 +97,7 @@ func (lt *LockTable[T]) AcquireLock(intention string, key T, lockType LockType) } // Otherwise, grant the lock - if glog.V(4) { + if log.V(-1).Info != nil { fmt.Printf("ActiveLock %d %s locked %+v type=%v with waiters %d active r%d w%d.\n", lock.ID, lock.intention, key, lockType, len(entry.waiters), entry.activeSharedLockOwnerCount, entry.activeExclusiveLockOwnerCount) if len(entry.waiters) > 0 { for _, waiter := range entry.waiters { @@ -150,7 +150,7 @@ func (lt *LockTable[T]) ReleaseLock(key T, lock *ActiveLock) { delete(lt.locksInFlight, key) } - if glog.V(4) { + if log.V(-1).Info != nil { fmt.Printf("ActiveLock %d %s unlocked %+v type=%v with waiters %d active r%d w%d.\n", lock.ID, lock.intention, key, lock.lockType, len(entry.waiters), entry.activeSharedLockOwnerCount, entry.activeExclusiveLockOwnerCount) if len(entry.waiters) > 0 { for _, waiter := range entry.waiters { diff --git a/weed/util/log/README.md b/weed/util/log/README.md new file mode 100644 index 000000000..b6a603bf3 --- /dev/null +++ b/weed/util/log/README.md @@ -0,0 +1,98 @@ +# SeaweedFS Logging Package + +This package provides a logging interface for SeaweedFS using [zap](https://github.com/uber-go/zap) as the underlying logging library. It provides a similar interface to glog while offering the performance and features of zap. + +## Features + +- High-performance structured logging +- JSON output format +- Dynamic log level changes +- Support for both structured and unstructured logging +- Compatible with existing glog-style code +- Thread-safe + +## Usage + +### Basic Setup + +```go +import "github.com/seaweedfs/seaweedfs/weed/util/log" +import "go.uber.org/zap/zapcore" + +// Initialize the logger with info level +log.Init(zapcore.InfoLevel) +``` + +### Basic Logging + +```go +// Basic logging +log.Info("This is an info message") +log.Infof("This is a formatted info message: %s", "hello") +log.Warning("This is a warning message") +log.Warningf("This is a formatted warning message: %s", "hello") +log.Error("This is an error message") +log.Errorf("This is a formatted error message: %s", "hello") +``` + +### Verbose Logging + +```go +// Using V for verbose logging +if log.V(1) { + log.Info("This is a verbose message") +} +``` + +### Structured Logging + +```go +// Using structured logging +logger := log.With( + zap.String("service", "example"), + zap.Int("version", 1), +) +logger.Info("This is a structured log message") + +// Using sugared logger with fields +sugar := log.WithSugar("service", "example", "version", 1) +sugar.Infof("This is a sugared log message with fields: %s", "hello") +``` + +### Fatal Logging + +```go +// Fatal logging (will exit the program) +log.Fatal("This is a fatal message") +log.Fatalf("This is a formatted fatal message: %s", "hello") +``` + +## Log Levels + +The package supports the following log levels: + +- Debug (-1) +- Info (0) +- Warning (1) +- Error (2) +- Fatal (3) + +## Migration from glog + +To migrate from glog to this package: + +1. Replace `import "github.com/golang/glog"` with `import "github.com/seaweedfs/seaweedfs/weed/util/log"` +2. Replace glog function calls with their log package equivalents: + - `glog.Info` -> `log.Info` + - `glog.Infof` -> `log.Infof` + - `glog.Warning` -> `log.Warning` + - `glog.Warningf` -> `log.Warningf` + - `glog.Error` -> `log.Error` + - `glog.Errorf` -> `log.Errorf` + - `glog.Fatal` -> `log.Fatal` + - `glog.Fatalf` -> `log.Fatalf` + - `glog.V(level)` -> `log.V(level)` + +## Example + +See the `example` directory for a complete example of how to use the logging package.
\ No newline at end of file diff --git a/weed/util/log/log.go b/weed/util/log/log.go new file mode 100644 index 000000000..0a6af15c9 --- /dev/null +++ b/weed/util/log/log.go @@ -0,0 +1,239 @@ +package log + +import ( + "os" + "sync" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" +) + +// Level is an alias for zapcore.Level +type Level = zapcore.Level + +// LogConfig holds the configuration for logging +type LogConfig struct { + // LogFile is the path to the log file. If empty, logs will be written to stdout + LogFile string + // MaxSize is the maximum size in megabytes of the log file before it gets rotated + MaxSize int + // MaxBackups is the maximum number of old log files to retain + MaxBackups int + // MaxAge is the maximum number of days to retain old log files + MaxAge int + // Compress determines if the rotated log files should be compressed + Compress bool +} + +var ( + // Logger is the global logger instance + Logger *zap.Logger + // Sugar is the global sugared logger instance + Sugar *zap.SugaredLogger + // atom is the atomic level for dynamic log level changes + atom zap.AtomicLevel + // once ensures initialization happens only once + once sync.Once + // defaultLevel is the default logging level if not specified + defaultLevel = zapcore.InfoLevel +) + +// VerboseLogger wraps a sugared logger with verbosity level +type VerboseLogger struct { + level Level +} + +// Verbose returns a VerboseLogger for the given verbosity level +func Verbose(level Level) *VerboseLogger { + return &VerboseLogger{level: level} +} + +// Infof logs a formatted message at info level if the verbosity level is enabled +func (v *VerboseLogger) Infof(format string, args ...interface{}) { + if atom.Enabled(v.level) { + Sugar.Infof(format, args...) + } +} + +// Info logs a message at info level if the verbosity level is enabled +func (v *VerboseLogger) Info(args ...interface{}) { + if atom.Enabled(v.level) { + Sugar.Info(args...) + } +} + +// Infoln logs a message at info level with a newline if the verbosity level is enabled +func (v *VerboseLogger) Infoln(args ...interface{}) { + if atom.Enabled(v.level) { + Sugar.Infoln(args...) + } +} + +// Warning logs a message at warn level if the verbosity level is enabled +func (v *VerboseLogger) Warning(args ...interface{}) { + if atom.Enabled(v.level) { + Sugar.Warn(args...) + } +} + +// Warningf logs a formatted message at warn level if the verbosity level is enabled +func (v *VerboseLogger) Warningf(format string, args ...interface{}) { + if atom.Enabled(v.level) { + Sugar.Warnf(format, args...) + } +} + +// Init initializes the logger with the given level and configuration +func Init(level Level, config *LogConfig) { + once.Do(func() { + // Initialize with default level if not specified + if level == 0 { + level = defaultLevel + } + + atom = zap.NewAtomicLevel() + atom.SetLevel(level) + + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + + var writeSyncer zapcore.WriteSyncer + if config != nil && config.LogFile != "" { + // Create a lumberjack logger for log rotation + rotator := &lumberjack.Logger{ + Filename: config.LogFile, + MaxSize: config.MaxSize, // megabytes + MaxBackups: config.MaxBackups, + MaxAge: config.MaxAge, // days + Compress: config.Compress, + } + writeSyncer = zapcore.AddSync(rotator) + } else { + writeSyncer = zapcore.AddSync(os.Stdout) + } + + core := zapcore.NewCore( + zapcore.NewJSONEncoder(encoderConfig), + writeSyncer, + atom, + ) + + Logger = zap.New(core) + Sugar = Logger.Sugar() + }) +} + +// SetLevel changes the logging level dynamically +func SetLevel(level Level) { + if atom == (zap.AtomicLevel{}) { + Init(level, nil) + return + } + atom.SetLevel(level) +} + +// V returns a VerboseLogger for the given verbosity level +func V(level Level) *VerboseLogger { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + return Verbose(level) +} + +// Info logs a message at info level +func Info(args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Info(args...) +} + +// Infof logs a formatted message at info level +func Infof(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Infof(format, args...) +} + +// Warning logs a message at warn level +func Warning(args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Warn(args...) +} + +// Warningf logs a formatted message at warn level +func Warningf(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Warnf(format, args...) +} + +// Error logs a message at error level +func Error(args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Error(args...) +} + +// Errorf logs a formatted message at error level +func Errorf(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Errorf(format, args...) +} + +// Fatal logs a message at fatal level and then calls os.Exit(1) +func Fatal(args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Fatal(args...) +} + +// Fatalf logs a formatted message at fatal level and then calls os.Exit(1) +func Fatalf(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Fatalf(format, args...) +} + +// Exitf logs a formatted message at fatal level and then calls os.Exit(1) +func Exitf(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Fatalf(format, args...) + os.Exit(1) +} + +// With returns a logger with the given fields +func With(fields ...zap.Field) *zap.Logger { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + return Logger.With(fields...) +} + +// WithSugar returns a sugared logger with the given fields +func WithSugar(args ...interface{}) *zap.SugaredLogger { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + return Sugar.With(args...) +} + +// Printf logs a formatted message at info level +func Printf(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Infof(format, args...) +} diff --git a/weed/util/log_buffer/log_buffer.go b/weed/util/log_buffer/log_buffer.go index fb1f8dc2f..4fb6b2722 100644 --- a/weed/util/log_buffer/log_buffer.go +++ b/weed/util/log_buffer/log_buffer.go @@ -8,7 +8,7 @@ import ( "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -117,7 +117,7 @@ func (logBuffer *LogBuffer) AddDataToBuffer(partitionKey, data []byte, processin } if logBuffer.startTime.Add(logBuffer.flushInterval).Before(ts) || len(logBuffer.buf)-logBuffer.pos < size+4 { - // glog.V(0).Infof("%s copyToFlush1 batch:%d count:%d start time %v, ts %v, remaining %d bytes", logBuffer.name, logBuffer.batchIndex, len(logBuffer.idx), logBuffer.startTime, ts, len(logBuffer.buf)-logBuffer.pos) + // log.V(3).Infof("%s copyToFlush1 batch:%d count:%d start time %v, ts %v, remaining %d bytes", logBuffer.name, logBuffer.batchIndex, len(logBuffer.idx), logBuffer.startTime, ts, len(logBuffer.buf)-logBuffer.pos) toFlush = logBuffer.copyToFlush() logBuffer.startTime = ts if len(logBuffer.buf) < size+4 { @@ -159,7 +159,7 @@ func (logBuffer *LogBuffer) IsAllFlushed() bool { func (logBuffer *LogBuffer) loopFlush() { for d := range logBuffer.flushChan { if d != nil { - // glog.V(4).Infof("%s flush [%v, %v] size %d", m.name, d.startTime, d.stopTime, len(d.data.Bytes())) + // log.V(-1).Infof("%s flush [%v, %v] size %d", m.name, d.startTime, d.stopTime, len(d.data.Bytes())) logBuffer.flushFn(logBuffer, d.startTime, d.stopTime, d.data.Bytes()) d.releaseMemory() // local logbuffer is different from aggregate logbuffer here @@ -179,10 +179,10 @@ func (logBuffer *LogBuffer) loopInterval() { toFlush := logBuffer.copyToFlush() logBuffer.Unlock() if toFlush != nil { - glog.V(4).Infof("%s flush [%v, %v] size %d", logBuffer.name, toFlush.startTime, toFlush.stopTime, len(toFlush.data.Bytes())) + log.V(-1).Infof("%s flush [%v, %v] size %d", logBuffer.name, toFlush.startTime, toFlush.stopTime, len(toFlush.data.Bytes())) logBuffer.flushChan <- toFlush } else { - // glog.V(0).Infof("%s no flush", m.name) + // log.V(3).Infof("%s no flush", m.name) } } } @@ -198,9 +198,9 @@ func (logBuffer *LogBuffer) copyToFlush() *dataToFlush { stopTime: logBuffer.stopTime, data: copiedBytes(logBuffer.buf[:logBuffer.pos]), } - // glog.V(4).Infof("%s flushing [0,%d) with %d entries [%v, %v]", m.name, m.pos, len(m.idx), m.startTime, m.stopTime) + // log.V(-1).Infof("%s flushing [0,%d) with %d entries [%v, %v]", m.name, m.pos, len(m.idx), m.startTime, m.stopTime) } else { - // glog.V(4).Infof("%s removed from memory [0,%d) with %d entries [%v, %v]", m.name, m.pos, len(m.idx), m.startTime, m.stopTime) + // log.V(-1).Infof("%s removed from memory [0,%d) with %d entries [%v, %v]", m.name, m.pos, len(m.idx), m.startTime, m.stopTime) logBuffer.lastFlushDataTime = logBuffer.stopTime } logBuffer.buf = logBuffer.prevBuffers.SealBuffer(logBuffer.startTime, logBuffer.stopTime, logBuffer.buf, logBuffer.pos, logBuffer.batchIndex) @@ -259,7 +259,7 @@ func (logBuffer *LogBuffer) ReadFromBuffer(lastReadPosition MessagePosition) (bu return nil, -2, nil } else if lastReadPosition.Before(tsMemory) && lastReadPosition.BatchIndex+1 < tsBatchIndex { // case 2.3 if !logBuffer.lastFlushDataTime.IsZero() { - glog.V(0).Infof("resume with last flush time: %v", logBuffer.lastFlushDataTime) + log.V(3).Infof("resume with last flush time: %v", logBuffer.lastFlushDataTime) return nil, -2, ResumeFromDiskError } } @@ -270,14 +270,14 @@ func (logBuffer *LogBuffer) ReadFromBuffer(lastReadPosition MessagePosition) (bu return nil, logBuffer.batchIndex, nil } if lastReadPosition.After(logBuffer.stopTime) { - // glog.Fatalf("unexpected last read time %v, older than latest %v", lastReadPosition, m.stopTime) + // log.Fatalf("unexpected last read time %v, older than latest %v", lastReadPosition, m.stopTime) return nil, logBuffer.batchIndex, nil } if lastReadPosition.Before(logBuffer.startTime) { // println("checking ", lastReadPosition.UnixNano()) for _, buf := range logBuffer.prevBuffers.buffers { if buf.startTime.After(lastReadPosition.Time) { - // glog.V(4).Infof("%s return the %d sealed buffer %v", m.name, i, buf.startTime) + // log.V(-1).Infof("%s return the %d sealed buffer %v", m.name, i, buf.startTime) // println("return the", i, "th in memory", buf.startTime.UnixNano()) return copiedBytes(buf.buf[:buf.size]), buf.batchIndex, nil } @@ -287,7 +287,7 @@ func (logBuffer *LogBuffer) ReadFromBuffer(lastReadPosition MessagePosition) (bu return copiedBytes(buf.buf[pos:buf.size]), buf.batchIndex, nil } } - // glog.V(4).Infof("%s return the current buf %v", m.name, lastReadPosition) + // log.V(-1).Infof("%s return the current buf %v", m.name, lastReadPosition) return copiedBytes(logBuffer.buf[:logBuffer.pos]), logBuffer.batchIndex, nil } @@ -358,7 +358,7 @@ func readTs(buf []byte, pos int) (size int, ts int64) { err := proto.Unmarshal(entryData, logEntry) if err != nil { - glog.Fatalf("unexpected unmarshal filer_pb.LogEntry: %v", err) + log.Fatalf("unexpected unmarshal filer_pb.LogEntry: %v", err) } return size, logEntry.TsNs diff --git a/weed/util/log_buffer/log_read.go b/weed/util/log_buffer/log_read.go index cf83de1e5..ac7ffec53 100644 --- a/weed/util/log_buffer/log_read.go +++ b/weed/util/log_buffer/log_read.go @@ -7,7 +7,7 @@ import ( "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -57,7 +57,7 @@ func (logBuffer *LogBuffer) LoopProcessLogData(readerName string, startPosition if bytesBuf != nil { readSize = bytesBuf.Len() } - glog.V(4).Infof("%s ReadFromBuffer at %v batch %d. Read bytes %v batch %d", readerName, lastReadPosition, lastReadPosition.BatchIndex, readSize, batchIndex) + log.V(-1).Infof("%s ReadFromBuffer at %v batch %d. Read bytes %v batch %d", readerName, lastReadPosition, lastReadPosition.BatchIndex, readSize, batchIndex) if bytesBuf == nil { if batchIndex >= 0 { lastReadPosition = NewMessagePosition(lastReadPosition.UnixNano(), batchIndex) @@ -93,14 +93,14 @@ func (logBuffer *LogBuffer) LoopProcessLogData(readerName string, startPosition size := util.BytesToUint32(buf[pos : pos+4]) if pos+4+int(size) > len(buf) { err = ResumeError - glog.Errorf("LoopProcessLogData: %s read buffer %v read %d entries [%d,%d) from [0,%d)", readerName, lastReadPosition, batchSize, pos, pos+int(size)+4, len(buf)) + log.Errorf("LoopProcessLogData: %s read buffer %v read %d entries [%d,%d) from [0,%d)", readerName, lastReadPosition, batchSize, pos, pos+int(size)+4, len(buf)) return } entryData := buf[pos+4 : pos+4+int(size)] logEntry := &filer_pb.LogEntry{} if err = proto.Unmarshal(entryData, logEntry); err != nil { - glog.Errorf("unexpected unmarshal mq_pb.Message: %v", err) + log.Errorf("unexpected unmarshal mq_pb.Message: %v", err) pos += 4 + int(size) continue } @@ -112,11 +112,11 @@ func (logBuffer *LogBuffer) LoopProcessLogData(readerName string, startPosition lastReadPosition = NewMessagePosition(logEntry.TsNs, batchIndex) if isDone, err = eachLogDataFn(logEntry); err != nil { - glog.Errorf("LoopProcessLogData: %s process log entry %d %v: %v", readerName, batchSize+1, logEntry, err) + log.Errorf("LoopProcessLogData: %s process log entry %d %v: %v", readerName, batchSize+1, logEntry, err) return } if isDone { - glog.V(0).Infof("LoopProcessLogData2: %s process log entry %d", readerName, batchSize+1) + log.V(3).Infof("LoopProcessLogData2: %s process log entry %d", readerName, batchSize+1) return } @@ -126,7 +126,7 @@ func (logBuffer *LogBuffer) LoopProcessLogData(readerName string, startPosition } - glog.V(4).Infof("%s sent messages ts[%+v,%+v] size %d\n", readerName, startPosition, lastReadPosition, batchSize) + log.V(-1).Infof("%s sent messages ts[%+v,%+v] size %d\n", readerName, startPosition, lastReadPosition, batchSize) } } diff --git a/weed/util/minfreespace.go b/weed/util/minfreespace.go index 0c4461ff1..6743fbd6a 100644 --- a/weed/util/minfreespace.go +++ b/weed/util/minfreespace.go @@ -3,7 +3,7 @@ package util import ( "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "strconv" "strings" ) @@ -61,7 +61,7 @@ func MustParseMinFreeSpace(minFreeSpace string, minFreeSpacePercent string) (spa if vv, e := ParseMinFreeSpace(freeString); e == nil { spaces = append(spaces, *vv) } else { - glog.Fatalf("The value specified in -minFreeSpace not a valid value %s", freeString) + log.Fatalf("The value specified in -minFreeSpace not a valid value %s", freeString) } } diff --git a/weed/util/net_timeout.go b/weed/util/net_timeout.go index f235a77b3..1069acc01 100644 --- a/weed/util/net_timeout.go +++ b/weed/util/net_timeout.go @@ -1,7 +1,7 @@ package util import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "net" "time" @@ -113,7 +113,7 @@ func NewIpAndLocalListeners(host string, port int, timeout time.Duration) (ipLis if host != "localhost" && host != "" && host != "0.0.0.0" && host != "127.0.0.1" && host != "[::]" && host != "[::1]" { listener, err = net.Listen("tcp", JoinHostPort("localhost", port)) if err != nil { - glog.V(0).Infof("skip starting on %s:%d: %v", host, port, err) + log.V(3).Infof("skip starting on %s:%d: %v", host, port, err) return ipListener, nil, nil } diff --git a/weed/util/network.go b/weed/util/network.go index 69559b5f0..1e1132266 100644 --- a/weed/util/network.go +++ b/weed/util/network.go @@ -5,13 +5,13 @@ import ( "strconv" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func DetectedHostAddress() string { netInterfaces, err := net.Interfaces() if err != nil { - glog.V(0).Infof("failed to detect net interfaces: %v", err) + log.V(3).Infof("failed to detect net interfaces: %v", err) return "" } @@ -33,7 +33,7 @@ func selectIpV4(netInterfaces []net.Interface, isIpV4 bool) string { } addrs, err := netInterface.Addrs() if err != nil { - glog.V(0).Infof("get interface addresses: %v", err) + log.V(3).Infof("get interface addresses: %v", err) } for _, a := range addrs { diff --git a/weed/util/retry.go b/weed/util/retry.go index 006cda466..705729995 100644 --- a/weed/util/retry.go +++ b/weed/util/retry.go @@ -4,7 +4,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) var RetryWaitTime = 6 * time.Second @@ -16,14 +16,14 @@ func Retry(name string, job func() error) (err error) { err = job() if err == nil { if hasErr { - glog.V(0).Infof("retry %s successfully", name) + log.V(3).Infof("retry %s successfully", name) } waitTime = time.Second break } if strings.Contains(err.Error(), "transport") { hasErr = true - glog.V(0).Infof("retry %s: err: %v", name, err) + log.V(3).Infof("retry %s: err: %v", name, err) } else { break } @@ -40,14 +40,14 @@ func MultiRetry(name string, errList []string, job func() error) (err error) { err = job() if err == nil { if hasErr { - glog.V(0).Infof("retry %s successfully", name) + log.V(3).Infof("retry %s successfully", name) } waitTime = time.Second break } if containErr(err.Error(), errList) { hasErr = true - glog.V(0).Infof("retry %s: err: %v", name, err) + log.V(3).Infof("retry %s: err: %v", name, err) } else { break } @@ -68,7 +68,7 @@ func RetryUntil(name string, job func() error, onErrFn func(err error) (shouldCo } if onErrFn(err) { if strings.Contains(err.Error(), "transport") { - glog.V(0).Infof("retry %s: err: %v", name, err) + log.V(3).Infof("retry %s: err: %v", name, err) } time.Sleep(waitTime) if waitTime < RetryWaitTime { diff --git a/weed/util/skiplist/name_batch.go b/weed/util/skiplist/name_batch.go index 1ab2a6b1f..55b831bdf 100644 --- a/weed/util/skiplist/name_batch.go +++ b/weed/util/skiplist/name_batch.go @@ -1,7 +1,7 @@ package skiplist import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "google.golang.org/protobuf/proto" "slices" "strings" @@ -63,7 +63,7 @@ func LoadNameBatch(data []byte) *NameBatch { if len(data) > 0 { err := proto.Unmarshal(data, t) if err != nil { - glog.Errorf("unmarshal into NameBatchData{} : %v", err) + log.Errorf("unmarshal into NameBatchData{} : %v", err) return nil } } diff --git a/weed/util/skiplist/name_list_serde.go b/weed/util/skiplist/name_list_serde.go index 364c0f87a..4ec0da5e9 100644 --- a/weed/util/skiplist/name_list_serde.go +++ b/weed/util/skiplist/name_list_serde.go @@ -1,7 +1,7 @@ package skiplist import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "google.golang.org/protobuf/proto" ) @@ -18,7 +18,7 @@ func LoadNameList(data []byte, store ListStore, batchSize int) *NameList { message := &SkipListProto{} if err := proto.Unmarshal(data, message); err != nil { - glog.Errorf("loading skiplist: %v", err) + log.Errorf("loading skiplist: %v", err) } nl.skipList.MaxNewLevel = int(message.MaxNewLevel) nl.skipList.MaxLevel = int(message.MaxLevel) @@ -65,7 +65,7 @@ func (nl *NameList) ToBytes() []byte { } data, err := proto.Marshal(message) if err != nil { - glog.Errorf("marshal skiplist: %v", err) + log.Errorf("marshal skiplist: %v", err) } return data } diff --git a/weed/util/throttler.go b/weed/util/throttler.go index 873161e37..506b80a51 100644 --- a/weed/util/throttler.go +++ b/weed/util/throttler.go @@ -25,7 +25,7 @@ func (wt *WriteThrottler) MaybeSlowdown(delta int64) { if overLimitBytes > 0 { overRatio := float64(overLimitBytes) / float64(wt.compactionBytePerSecond) sleepTime := time.Duration(overRatio*1000) * time.Millisecond - // glog.V(0).Infof("currently %d bytes, limit to %d bytes, over by %d bytes, sleeping %v over %.4f", wt.lastSizeCounter, wt.compactionBytePerSecond/10, overLimitBytes, sleepTime, overRatio) + // log.V(3).Infof("currently %d bytes, limit to %d bytes, over by %d bytes, sleeping %v over %.4f", wt.lastSizeCounter, wt.compactionBytePerSecond/10, overLimitBytes, sleepTime, overRatio) time.Sleep(sleepTime) } wt.lastSizeCounter, wt.lastSizeCheckTime = 0, time.Now() |
