aboutsummaryrefslogtreecommitdiff
path: root/weed/storage
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2020-11-16 22:26:58 -0800
committerChris Lu <chris.lu@gmail.com>2020-11-16 22:26:58 -0800
commit6c9156b25f8b1c28fb0cc909310a20aeeec0e087 (patch)
tree343e30d98e46a081aa57adfc334b807d0b3255dc /weed/storage
parent9add554feb53706d1d878cc9636d234e622b8a80 (diff)
downloadseaweedfs-origin/logrus.tar.xz
seaweedfs-origin/logrus.zip
switch to logrusorigin/logrus
losing filename and line number. Critical for debugging.
Diffstat (limited to 'weed/storage')
-rw-r--r--weed/storage/backend/backend.go10
-rw-r--r--weed/storage/backend/s3_backend/s3_backend.go16
-rw-r--r--weed/storage/backend/s3_backend/s3_download.go4
-rw-r--r--weed/storage/backend/s3_backend/s3_upload.go4
-rw-r--r--weed/storage/backend/volume_create.go4
-rw-r--r--weed/storage/backend/volume_create_linux.go4
-rw-r--r--weed/storage/backend/volume_create_windows.go4
-rw-r--r--weed/storage/disk_location.go20
-rw-r--r--weed/storage/erasure_coding/ec_encoder.go6
-rw-r--r--weed/storage/idx/walk.go6
-rw-r--r--weed/storage/needle/needle_parse_upload.go10
-rw-r--r--weed/storage/needle/needle_read_write.go6
-rw-r--r--weed/storage/needle_map/compact_map_test.go2
-rw-r--r--weed/storage/needle_map/memdb.go4
-rw-r--r--weed/storage/needle_map_leveldb.go18
-rw-r--r--weed/storage/needle_map_memory.go6
-rw-r--r--weed/storage/needle_map_metric.go2
-rw-r--r--weed/storage/needle_map_metric_test.go12
-rw-r--r--weed/storage/needle_map_sorted_file.go12
-rw-r--r--weed/storage/store.go24
-rw-r--r--weed/storage/store_ec.go32
-rw-r--r--weed/storage/store_ec_delete.go6
-rw-r--r--weed/storage/store_vacuum.go4
-rw-r--r--weed/storage/super_block/super_block.go6
-rw-r--r--weed/storage/volume.go10
-rw-r--r--weed/storage/volume_checking.go10
-rw-r--r--weed/storage/volume_loading.go30
-rw-r--r--weed/storage/volume_read_write.go34
-rw-r--r--weed/storage/volume_super_block.go4
-rw-r--r--weed/storage/volume_tier.go4
-rw-r--r--weed/storage/volume_vacuum.go48
31 files changed, 181 insertions, 181 deletions
diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go
index daab29621..e32cadf08 100644
--- a/weed/storage/backend/backend.go
+++ b/weed/storage/backend/backend.go
@@ -6,7 +6,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/spf13/viper"
@@ -52,7 +52,7 @@ func LoadConfiguration(config *viper.Viper) {
for backendTypeName := range config.GetStringMap(StorageBackendPrefix) {
backendStorageFactory, found := BackendStorageFactories[StorageType(backendTypeName)]
if !found {
- glog.Fatalf("backend storage type %s not found", backendTypeName)
+ log.Fatalf("backend storage type %s not found", backendTypeName)
}
for backendStorageId := range config.GetStringMap(StorageBackendPrefix + "." + backendTypeName) {
if !config.GetBool(StorageBackendPrefix + "." + backendTypeName + "." + backendStorageId + ".enabled") {
@@ -61,7 +61,7 @@ func LoadConfiguration(config *viper.Viper) {
backendStorage, buildErr := backendStorageFactory.BuildStorage(config,
StorageBackendPrefix+"."+backendTypeName+"."+backendStorageId+".", backendStorageId)
if buildErr != nil {
- glog.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId)
+ log.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId)
}
BackendStorages[backendTypeName+"."+backendStorageId] = backendStorage
if backendStorageId == "default" {
@@ -78,12 +78,12 @@ func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) {
for _, storageBackend := range storageBackends {
backendStorageFactory, found := BackendStorageFactories[StorageType(storageBackend.Type)]
if !found {
- glog.Warningf("storage type %s not found", storageBackend.Type)
+ log.Warnf("storage type %s not found", storageBackend.Type)
continue
}
backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), "", storageBackend.Id)
if buildErr != nil {
- glog.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id)
+ log.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id)
}
BackendStorages[storageBackend.Type+"."+storageBackend.Id] = backendStorage
if storageBackend.Id == "default" {
diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go
index 4706c9334..94c71fef6 100644
--- a/weed/storage/backend/s3_backend/s3_backend.go
+++ b/weed/storage/backend/s3_backend/s3_backend.go
@@ -11,7 +11,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/google/uuid"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
)
@@ -51,7 +51,7 @@ func newS3BackendStorage(configuration backend.StringProperties, configPrefix st
s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint)
- glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
+ log.Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
return
}
@@ -83,7 +83,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn
randomUuid, _ := uuid.NewRandom()
key = randomUuid.String()
- glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
+ log.Debugf("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn)
@@ -92,7 +92,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn
func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) {
- glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key)
+ log.Debugf("download dat file of %s from remote s3.%s as %s", fileName, s.id, key)
size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn)
@@ -101,7 +101,7 @@ func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(pro
func (s *S3BackendStorage) DeleteFile(key string) (err error) {
- glog.V(1).Infof("delete dat file %s from remote", key)
+ log.Debugf("delete dat file %s from remote", key)
err = deleteFromS3(s.conn, s.bucket, key)
@@ -118,7 +118,7 @@ func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n
bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1)
- // glog.V(0).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
+ // log.Infof("read %s %s", s3backendStorageFile.key, bytesRange)
getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{
Bucket: &s3backendStorageFile.backendStorage.bucket,
@@ -131,8 +131,8 @@ func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n
}
defer getObjectOutput.Body.Close()
- glog.V(4).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
- glog.V(4).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength)
+ log.Tracef("read %s %s", s3backendStorageFile.key, bytesRange)
+ log.Tracef("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength)
for {
if n, err = getObjectOutput.Body.Read(p); err == nil && n < len(p) {
diff --git a/weed/storage/backend/s3_backend/s3_download.go b/weed/storage/backend/s3_backend/s3_download.go
index dbc28446a..fd2c56939 100644
--- a/weed/storage/backend/s3_backend/s3_download.go
+++ b/weed/storage/backend/s3_backend/s3_download.go
@@ -10,7 +10,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string, sourceKey string,
@@ -50,7 +50,7 @@ func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string
return fileSize, fmt.Errorf("failed to download file %s: %v", destFileName, err)
}
- glog.V(1).Infof("downloaded file %s\n", destFileName)
+ log.Debugf("downloaded file %s\n", destFileName)
return
}
diff --git a/weed/storage/backend/s3_backend/s3_upload.go b/weed/storage/backend/s3_backend/s3_upload.go
index 500a85590..c7b23d005 100644
--- a/weed/storage/backend/s3_backend/s3_upload.go
+++ b/weed/storage/backend/s3_backend/s3_upload.go
@@ -9,7 +9,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string,
@@ -73,7 +73,7 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
if err != nil {
return 0, fmt.Errorf("failed to upload file %s: %v", filename, err)
}
- glog.V(1).Infof("file %s uploaded to %s\n", filename, result.Location)
+ log.Debugf("file %s uploaded to %s\n", filename, result.Location)
return
}
diff --git a/weed/storage/backend/volume_create.go b/weed/storage/backend/volume_create.go
index d4bd8e40f..d408581f3 100644
--- a/weed/storage/backend/volume_create.go
+++ b/weed/storage/backend/volume_create.go
@@ -5,7 +5,7 @@ package backend
import (
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
@@ -14,7 +14,7 @@ func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
return nil, e
}
if preallocate > 0 {
- glog.V(2).Infof("Preallocated disk space for %s is not supported", fileName)
+ log.Debugf("Preallocated disk space for %s is not supported", fileName)
}
return NewDiskFile(file), nil
}
diff --git a/weed/storage/backend/volume_create_linux.go b/weed/storage/backend/volume_create_linux.go
index 260c2c2a3..ecb5f6378 100644
--- a/weed/storage/backend/volume_create_linux.go
+++ b/weed/storage/backend/volume_create_linux.go
@@ -6,7 +6,7 @@ import (
"os"
"syscall"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
@@ -16,7 +16,7 @@ func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
}
if preallocate != 0 {
syscall.Fallocate(int(file.Fd()), 1, 0, preallocate)
- glog.V(1).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName)
+ log.Debugf("Preallocated %d bytes disk space for %s", preallocate, fileName)
}
return NewDiskFile(file), nil
}
diff --git a/weed/storage/backend/volume_create_windows.go b/weed/storage/backend/volume_create_windows.go
index 7d40ec0d7..1ffb2c9d8 100644
--- a/weed/storage/backend/volume_create_windows.go
+++ b/weed/storage/backend/volume_create_windows.go
@@ -6,13 +6,13 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map"
"golang.org/x/sys/windows"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map/os_overloads"
)
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
if preallocate > 0 {
- glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
+ log.Infof("Preallocated disk space for %s is not supported", fileName)
}
if memoryMapSizeMB > 0 {
diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go
index 775ebf092..41f6e9adf 100644
--- a/weed/storage/disk_location.go
+++ b/weed/storage/disk_location.go
@@ -9,7 +9,7 @@ import (
"sync"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -66,13 +66,13 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne
noteFile := l.Directory + "/" + name + ".note"
if util.FileExists(noteFile) {
note, _ := ioutil.ReadFile(noteFile)
- glog.Warningf("volume %s was not completed: %s", name, string(note))
+ log.Warnf("volume %s was not completed: %s", name, string(note))
removeVolumeFiles(l.Directory + "/" + name)
return false
}
vid, collection, err := l.volumeIdFromPath(fileInfo)
if err != nil {
- glog.Warningf("get volume id failed, %s, err : %s", name, err)
+ log.Warnf("get volume id failed, %s, err : %s", name, err)
return false
}
@@ -81,20 +81,20 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne
_, found := l.volumes[vid]
l.volumesLock.RUnlock()
if found {
- glog.V(1).Infof("loaded volume, %v", vid)
+ log.Debugf("loaded volume, %v", vid)
return true
}
v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0)
if e != nil {
- glog.V(0).Infof("new volume %s error %s", name, e)
+ log.Infof("new volume %s error %s", name, e)
return false
}
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
- glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s",
+ log.Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s",
l.Directory+"/"+name+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
return true
}
@@ -130,10 +130,10 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) {
l.concurrentLoadingVolumes(needleMapKind, 10)
- glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
+ log.Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
l.loadAllEcShards()
- glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
+ log.Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
}
@@ -322,9 +322,9 @@ func (l *DiskLocation) CheckDiskSpace() {
l.isDiskSpaceLow = !l.isDiskSpaceLow
}
if l.isDiskSpaceLow {
- glog.V(0).Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
+ log.Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
} else {
- glog.V(4).Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
+ log.Tracef("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
}
}
time.Sleep(time.Minute)
diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go
index 34b639407..8111f24f7 100644
--- a/weed/storage/erasure_coding/ec_encoder.go
+++ b/weed/storage/erasure_coding/ec_encoder.go
@@ -7,7 +7,7 @@ import (
"github.com/klauspost/reedsolomon"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
"github.com/chrislusf/seaweedfs/weed/storage/types"
@@ -78,7 +78,7 @@ func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64,
return fmt.Errorf("failed to stat dat file: %v", err)
}
- glog.V(0).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size())
+ log.Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size())
err = encodeDatFile(fi.Size(), err, baseFileName, bufferSize, largeBlockSize, file, smallBlockSize)
if err != nil {
return fmt.Errorf("encodeDatFile: %v", err)
@@ -122,7 +122,7 @@ func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize i
bufferSize := int64(len(buffers[0]))
batchCount := blockSize / bufferSize
if blockSize%bufferSize != 0 {
- glog.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize)
+ log.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize)
}
for b := int64(0); b < batchCount; b++ {
diff --git a/weed/storage/idx/walk.go b/weed/storage/idx/walk.go
index 5215d3c4f..f55adf8cb 100644
--- a/weed/storage/idx/walk.go
+++ b/weed/storage/idx/walk.go
@@ -3,7 +3,7 @@ package idx
import (
"io"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -16,7 +16,7 @@ func WalkIndexFile(r io.ReaderAt, fn func(key types.NeedleId, offset types.Offse
if count == 0 && e == io.EOF {
return nil
}
- glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e)
+ log.Tracef("readerOffset %d count %d err: %v", readerOffset, count, e)
readerOffset += int64(count)
var (
key types.NeedleId
@@ -36,7 +36,7 @@ func WalkIndexFile(r io.ReaderAt, fn func(key types.NeedleId, offset types.Offse
return nil
}
count, e = r.ReadAt(bytes, readerOffset)
- glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e)
+ log.Tracef("readerOffset %d count %d err: %v", readerOffset, count, e)
readerOffset += int64(count)
}
return e
diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go
index 4d244046e..3cb62ebb8 100644
--- a/weed/storage/needle/needle_parse_upload.go
+++ b/weed/storage/needle/needle_parse_upload.go
@@ -13,7 +13,7 @@ import (
"strconv"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -120,7 +120,7 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
}()
form, fe := r.MultipartReader()
if fe != nil {
- glog.V(0).Infoln("MultipartReader [ERROR]", fe)
+ log.Infoln("MultipartReader [ERROR]", fe)
e = fe
return
}
@@ -128,7 +128,7 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
// first multi-part item
part, fe := form.NextPart()
if fe != nil {
- glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
+ log.Infoln("Reading Multi part [ERROR]", fe)
e = fe
return
}
@@ -140,7 +140,7 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
pu.Data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1))
if e != nil {
- glog.V(0).Infoln("Reading Content [ERROR]", e)
+ log.Infoln("Reading Content [ERROR]", e)
return
}
if len(pu.Data) == int(sizeLimit)+1 {
@@ -161,7 +161,7 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
if fName != "" {
data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1))
if fe2 != nil {
- glog.V(0).Infoln("Reading Content [ERROR]", fe2)
+ log.Infoln("Reading Content [ERROR]", fe2)
e = fe2
return
}
diff --git a/weed/storage/needle/needle_read_write.go b/weed/storage/needle/needle_read_write.go
index e758a6fee..69b63afa9 100644
--- a/weed/storage/needle/needle_read_write.go
+++ b/weed/storage/needle/needle_read_write.go
@@ -6,7 +6,7 @@ import (
"io"
"math"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -133,7 +133,7 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u
defer func(w backend.BackendStorageFile, off int64) {
if err != nil {
if te := w.Truncate(end); te != nil {
- glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te)
+ log.Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te)
}
}
}(w, end)
@@ -172,7 +172,7 @@ func (n *Needle) ReadBytes(bytes []byte, offset int64, size Size, version Versio
if n.Size != size {
// cookie is not always passed in for this API. Use size to do preliminary checking.
if OffsetSize == 4 && offset < int64(MaxPossibleVolumeSize) {
- glog.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
+ log.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
return ErrorSizeMismatch
}
return fmt.Errorf("entry not found: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
diff --git a/weed/storage/needle_map/compact_map_test.go b/weed/storage/needle_map/compact_map_test.go
index 199cb26b3..9125723eb 100644
--- a/weed/storage/needle_map/compact_map_test.go
+++ b/weed/storage/needle_map/compact_map_test.go
@@ -62,7 +62,7 @@ func TestCompactMap(t *testing.T) {
// for i := uint32(0); i < 100; i++ {
// if v := m.Get(Key(i)); v != nil {
- // glog.V(4).Infoln(i, "=", v.Key, v.Offset, v.Size)
+ // log.Trace(i, "=", v.Key, v.Offset, v.Size)
// }
// }
diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go
index b25b5e89a..f88c3ecc4 100644
--- a/weed/storage/needle_map/memdb.go
+++ b/weed/storage/needle_map/memdb.go
@@ -8,7 +8,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -24,7 +24,7 @@ func NewMemDb() *MemDb {
var err error
t := &MemDb{}
if t.db, err = leveldb.Open(storage.NewMemStorage(), opts); err != nil {
- glog.V(0).Infof("MemDb fails to open: %v", err)
+ log.Infof("MemDb fails to open: %v", err)
return nil
}
diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go
index 415cd14dd..22f0b8262 100644
--- a/weed/storage/needle_map_leveldb.go
+++ b/weed/storage/needle_map_leveldb.go
@@ -12,7 +12,7 @@ import (
"github.com/syndtr/goleveldb/leveldb"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -27,11 +27,11 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option
m = &LevelDbNeedleMap{dbFileName: dbFileName}
m.indexFile = indexFile
if !isLevelDbFresh(dbFileName, indexFile) {
- glog.V(1).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name())
+ log.Debugf("Start to Generate %s from %s", dbFileName, indexFile.Name())
generateLevelDbFile(dbFileName, indexFile)
- glog.V(1).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name())
+ log.Debugf("Finished Generating %s from %s", dbFileName, indexFile.Name())
}
- glog.V(1).Infof("Opening %s...", dbFileName)
+ log.Debugf("Opening %s...", dbFileName)
if m.db, err = leveldb.OpenFile(dbFileName, opts); err != nil {
if errors.IsCorrupted(err) {
@@ -41,7 +41,7 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option
return
}
}
- glog.V(1).Infof("Loading %s...", indexFile.Name())
+ log.Debugf("Loading %s...", indexFile.Name())
mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
if indexLoadError != nil {
return nil, indexLoadError
@@ -60,7 +60,7 @@ func isLevelDbFresh(dbFileName string, indexFile *os.File) bool {
dbStat, dbStatErr := dbLogFile.Stat()
indexStat, indexStatErr := indexFile.Stat()
if dbStatErr != nil || indexStatErr != nil {
- glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr)
+ log.Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr)
return false
}
@@ -141,14 +141,14 @@ func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error {
func (m *LevelDbNeedleMap) Close() {
indexFileName := m.indexFile.Name()
if err := m.indexFile.Sync(); err != nil {
- glog.Warningf("sync file %s failed: %v", indexFileName, err)
+ log.Warnf("sync file %s failed: %v", indexFileName, err)
}
if err := m.indexFile.Close(); err != nil {
- glog.Warningf("close index file %s failed: %v", indexFileName, err)
+ log.Warnf("close index file %s failed: %v", indexFileName, err)
}
if err := m.db.Close(); err != nil {
- glog.Warningf("close levelDB failed: %v", err)
+ log.Warnf("close levelDB failed: %v", err)
}
}
diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go
index d0891dc98..0ac93e8b1 100644
--- a/weed/storage/needle_map_memory.go
+++ b/weed/storage/needle_map_memory.go
@@ -3,7 +3,7 @@ package storage
import (
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
@@ -45,7 +45,7 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
}
return nil
})
- glog.V(1).Infof("max file key: %d for file: %s", nm.MaxFileKey(), file.Name())
+ log.Debugf("max file key: %d for file: %s", nm.MaxFileKey(), file.Name())
return nm, e
}
@@ -66,7 +66,7 @@ func (nm *NeedleMap) Delete(key NeedleId, offset Offset) error {
func (nm *NeedleMap) Close() {
indexFileName := nm.indexFile.Name()
if err := nm.indexFile.Sync(); err != nil {
- glog.Warningf("sync file %s failed, %v", indexFileName, err)
+ log.Warnf("sync file %s failed, %v", indexFileName, err)
}
_ = nm.indexFile.Close()
}
diff --git a/weed/storage/needle_map_metric.go b/weed/storage/needle_map_metric.go
index 3618dada9..5029748ce 100644
--- a/weed/storage/needle_map_metric.go
+++ b/weed/storage/needle_map_metric.go
@@ -145,7 +145,7 @@ func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key
for remainingCount >= 0 {
_, e := r.ReadAt(bytes[:NeedleMapEntrySize*nextBatchSize], NeedleMapEntrySize*remainingCount)
- // glog.V(0).Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e)
+ // log.Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e)
if e != nil {
return e
}
diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go
index 362659a11..3d9dbefb2 100644
--- a/weed/storage/needle_map_metric_test.go
+++ b/weed/storage/needle_map_metric_test.go
@@ -5,7 +5,7 @@ import (
"math/rand"
"testing"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -23,9 +23,9 @@ func TestFastLoadingNeedleMapMetrics(t *testing.T) {
mm, _ := newNeedleMapMetricFromIndexFile(idxFile)
- glog.V(0).Infof("FileCount expected %d actual %d", nm.FileCount(), mm.FileCount())
- glog.V(0).Infof("DeletedSize expected %d actual %d", nm.DeletedSize(), mm.DeletedSize())
- glog.V(0).Infof("ContentSize expected %d actual %d", nm.ContentSize(), mm.ContentSize())
- glog.V(0).Infof("DeletedCount expected %d actual %d", nm.DeletedCount(), mm.DeletedCount())
- glog.V(0).Infof("MaxFileKey expected %d actual %d", nm.MaxFileKey(), mm.MaxFileKey())
+ log.Infof("FileCount expected %d actual %d", nm.FileCount(), mm.FileCount())
+ log.Infof("DeletedSize expected %d actual %d", nm.DeletedSize(), mm.DeletedSize())
+ log.Infof("ContentSize expected %d actual %d", nm.ContentSize(), mm.ContentSize())
+ log.Infof("DeletedCount expected %d actual %d", nm.DeletedCount(), mm.DeletedCount())
+ log.Infof("MaxFileKey expected %d actual %d", nm.MaxFileKey(), mm.MaxFileKey())
}
diff --git a/weed/storage/needle_map_sorted_file.go b/weed/storage/needle_map_sorted_file.go
index 1ca113ca9..47a30e5a8 100644
--- a/weed/storage/needle_map_sorted_file.go
+++ b/weed/storage/needle_map_sorted_file.go
@@ -3,7 +3,7 @@ package storage
import (
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
@@ -21,18 +21,18 @@ func NewSortedFileNeedleMap(baseFileName string, indexFile *os.File) (m *SortedF
m.indexFile = indexFile
fileName := baseFileName + ".sdx"
if !isSortedFileFresh(fileName, indexFile) {
- glog.V(0).Infof("Start to Generate %s from %s", fileName, indexFile.Name())
+ log.Infof("Start to Generate %s from %s", fileName, indexFile.Name())
erasure_coding.WriteSortedFileFromIdx(baseFileName, ".sdx")
- glog.V(0).Infof("Finished Generating %s from %s", fileName, indexFile.Name())
+ log.Infof("Finished Generating %s from %s", fileName, indexFile.Name())
}
- glog.V(1).Infof("Opening %s...", fileName)
+ log.Debugf("Opening %s...", fileName)
if m.dbFile, err = os.Open(baseFileName + ".sdx"); err != nil {
return
}
dbStat, _ := m.dbFile.Stat()
m.dbFileSize = dbStat.Size()
- glog.V(1).Infof("Loading %s...", indexFile.Name())
+ log.Debugf("Loading %s...", indexFile.Name())
mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
if indexLoadError != nil {
return nil, indexLoadError
@@ -51,7 +51,7 @@ func isSortedFileFresh(dbFileName string, indexFile *os.File) bool {
dbStat, dbStatErr := dbFile.Stat()
indexStat, indexStatErr := indexFile.Stat()
if dbStatErr != nil || indexStatErr != nil {
- glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr)
+ log.Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr)
return false
}
diff --git a/weed/storage/store.go b/weed/storage/store.go
index 7f2415448..c43c3b02f 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -8,7 +8,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/stats"
@@ -120,11 +120,11 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind
return fmt.Errorf("Volume Id %d already exists!", vid)
}
if location := s.FindFreeLocation(); location != nil {
- glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
+ log.Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
location.Directory, vid, collection, replicaPlacement, ttl)
if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb); err == nil {
location.SetVolume(vid, volume)
- glog.V(0).Infof("add volume %d", vid)
+ log.Infof("add volume %d", vid)
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
Id: uint32(vid),
Collection: collection,
@@ -222,7 +222,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) {
deleteVids = append(deleteVids, v.Id)
} else {
- glog.V(0).Infoln("volume", v.Id, "is expired.")
+ log.Infoln("volume", v.Id, "is expired.")
}
}
collectionVolumeSize[v.Collection] += volumeMessage.Size
@@ -256,9 +256,9 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
found, err := location.deleteVolumeById(vid)
if found {
if err == nil {
- glog.V(0).Infof("volume %d is deleted", vid)
+ log.Infof("volume %d is deleted", vid)
} else {
- glog.V(0).Infof("delete volume %d: %v", vid, err)
+ log.Infof("delete volume %d: %v", vid, err)
}
}
}
@@ -305,7 +305,7 @@ func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, fsync boo
_, _, isUnchanged, err = v.writeNeedle2(n, fsync)
return
}
- glog.V(0).Infoln("volume", i, "not found!")
+ log.Infoln("volume", i, "not found!")
err = fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port)
return
}
@@ -360,7 +360,7 @@ func (s *Store) MarkVolumeWritable(i needle.VolumeId) error {
func (s *Store) MountVolume(i needle.VolumeId) error {
for _, location := range s.Locations {
if found := location.LoadVolume(i, s.NeedleMapType); found == true {
- glog.V(0).Infof("mount volume %d", i)
+ log.Infof("mount volume %d", i)
v := s.findVolume(i)
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
Id: uint32(v.Id),
@@ -391,7 +391,7 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error {
for _, location := range s.Locations {
if err := location.UnloadVolume(i); err == nil {
- glog.V(0).Infof("UnmountVolume %d", i)
+ log.Infof("UnmountVolume %d", i)
s.DeletedVolumesChan <- message
return nil
}
@@ -414,11 +414,11 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error {
}
for _, location := range s.Locations {
if err := location.DeleteVolume(i); err == nil {
- glog.V(0).Infof("DeleteVolume %d", i)
+ log.Infof("DeleteVolume %d", i)
s.DeletedVolumesChan <- message
return nil
} else {
- glog.Errorf("DeleteVolume %d: %v", i, err)
+ log.Errorf("DeleteVolume %d: %v", i, err)
}
}
@@ -472,7 +472,7 @@ func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1
}
diskLocation.MaxVolumeCount = maxVolumeCount
- glog.V(2).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
+ log.Debugf("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024)
hasChanges = hasChanges || currentMaxVolumeCount != diskLocation.MaxVolumeCount
}
diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go
index 853757ce3..0739b7453 100644
--- a/weed/storage/store_ec.go
+++ b/weed/storage/store_ec.go
@@ -11,7 +11,7 @@ import (
"github.com/klauspost/reedsolomon"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
@@ -50,7 +50,7 @@ func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat {
func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) error {
for _, location := range s.Locations {
if err := location.LoadEcShard(collection, vid, shardId); err == nil {
- glog.V(0).Infof("MountEcShards %d.%d", vid, shardId)
+ log.Infof("MountEcShards %d.%d", vid, shardId)
var shardBits erasure_coding.ShardBits
@@ -86,7 +86,7 @@ func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.Shar
for _, location := range s.Locations {
if deleted := location.UnloadEcShard(vid, shardId); deleted {
- glog.V(0).Infof("UnmountEcShards %d.%d", vid, shardId)
+ log.Infof("UnmountEcShards %d.%d", vid, shardId)
s.DeletedEcShardsChan <- message
return nil
}
@@ -131,10 +131,10 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, ErrorDeleted
}
- glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)
+ log.Tracef("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)
if len(intervals) > 1 {
- glog.V(3).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals)
+ log.Tracef("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals)
}
bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals)
if err != nil {
@@ -183,7 +183,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur
data = make([]byte, interval.Size)
if shard, found := ecVolume.FindEcVolumeShard(shardId); found {
if _, err = shard.ReadAt(data, actualOffset); err != nil {
- glog.V(0).Infof("read local ec shard %d.%d offset %d: %v", ecVolume.VolumeId, shardId, actualOffset, err)
+ log.Infof("read local ec shard %d.%d offset %d: %v", ecVolume.VolumeId, shardId, actualOffset, err)
return
}
} else {
@@ -197,7 +197,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur
if err == nil {
return
}
- glog.V(0).Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err)
+ log.Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err)
forgetShardId(ecVolume, shardId)
}
@@ -206,7 +206,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur
if err == nil {
return
}
- glog.V(0).Infof("recover ec shard %d.%d : %v", ecVolume.VolumeId, shardId, err)
+ log.Infof("recover ec shard %d.%d : %v", ecVolume.VolumeId, shardId, err)
}
return
}
@@ -231,7 +231,7 @@ func (s *Store) cachedLookupEcShardLocations(ecVolume *erasure_coding.EcVolume)
return nil
}
- glog.V(3).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId)
+ log.Tracef("lookup and cache ec volume %d locations", ecVolume.VolumeId)
err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.LookupEcVolumeRequest{
@@ -268,12 +268,12 @@ func (s *Store) readRemoteEcShardInterval(sourceDataNodes []string, needleId typ
}
for _, sourceDataNode := range sourceDataNodes {
- glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode)
+ log.Tracef("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode)
n, is_deleted, err = s.doReadRemoteEcShardInterval(sourceDataNode, needleId, vid, shardId, buf, offset)
if err == nil {
return
}
- glog.V(1).Infof("read remote ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
+ log.Debugf("read remote ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
}
return
@@ -320,7 +320,7 @@ func (s *Store) doReadRemoteEcShardInterval(sourceDataNode string, needleId type
}
func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
- glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
+ log.Tracef("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount)
if err != nil {
@@ -338,7 +338,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum
continue
}
if len(locations) == 0 {
- glog.V(3).Infof("readRemoteEcShardInterval missing %d.%d from %+v", ecVolume.VolumeId, shardId, locations)
+ log.Tracef("readRemoteEcShardInterval missing %d.%d from %+v", ecVolume.VolumeId, shardId, locations)
continue
}
@@ -349,7 +349,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum
data := make([]byte, len(buf))
nRead, isDeleted, readErr := s.readRemoteEcShardInterval(locations, needleId, ecVolume.VolumeId, shardId, data, offset)
if readErr != nil {
- glog.V(3).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr)
+ log.Tracef("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr)
forgetShardId(ecVolume, shardId)
}
if isDeleted {
@@ -365,10 +365,10 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum
wg.Wait()
if err = enc.ReconstructData(bufs); err != nil {
- glog.V(3).Infof("recovered ec shard %d.%d failed: %v", ecVolume.VolumeId, shardIdToRecover, err)
+ log.Tracef("recovered ec shard %d.%d failed: %v", ecVolume.VolumeId, shardIdToRecover, err)
return 0, false, err
}
- glog.V(4).Infof("recovered ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
+ log.Tracef("recovered ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
copy(buf, bufs[shardIdToRecover])
diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go
index 4a75fb20b..73d5fcfde 100644
--- a/weed/storage/store_ec_delete.go
+++ b/weed/storage/store_ec_delete.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
@@ -73,12 +73,12 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(shardId erasure_coding.Sh
}
for _, sourceDataNode := range sourceDataNodes {
- glog.V(4).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode)
+ log.Tracef("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode)
err := s.doDeleteNeedleFromRemoteEcShard(sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId)
if err != nil {
return err
}
- glog.V(1).Infof("delete from remote ec shard %d.%d from %s: %v", ecVolume.VolumeId, shardId, sourceDataNode, err)
+ log.Debugf("delete from remote ec shard %d.%d from %s: %v", ecVolume.VolumeId, shardId, sourceDataNode, err)
}
return nil
diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go
index 32666a417..d4ad1b1a9 100644
--- a/weed/storage/store_vacuum.go
+++ b/weed/storage/store_vacuum.go
@@ -4,13 +4,13 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/stats"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
if v := s.findVolume(volumeId); v != nil {
- glog.V(3).Infof("volumd %d garbage level: %f", volumeId, v.garbageLevel())
+ log.Tracef("volumd %d garbage level: %f", volumeId, v.garbageLevel())
return v.garbageLevel(), nil
}
return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId)
diff --git a/weed/storage/super_block/super_block.go b/weed/storage/super_block/super_block.go
index f48cd0bdc..333c4261e 100644
--- a/weed/storage/super_block/super_block.go
+++ b/weed/storage/super_block/super_block.go
@@ -3,7 +3,7 @@ package super_block
import (
"github.com/golang/protobuf/proto"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -48,12 +48,12 @@ func (s *SuperBlock) Bytes() []byte {
if s.Extra != nil {
extraData, err := proto.Marshal(s.Extra)
if err != nil {
- glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err)
+ log.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err)
}
extraSize := len(extraData)
if extraSize > 256*256-2 {
// reserve a couple of bits for future extension
- glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2)
+ log.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2)
}
s.ExtraSize = uint16(extraSize)
util.Uint16toBytes(header[6:8], s.ExtraSize)
diff --git a/weed/storage/volume.go b/weed/storage/volume.go
index a7a963a59..4e6eee9f0 100644
--- a/weed/storage/volume.go
+++ b/weed/storage/volume.go
@@ -15,7 +15,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
)
type Volume struct {
@@ -97,7 +97,7 @@ func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time)
if e == nil {
return uint64(datFileSize), v.nm.IndexFileSize(), modTime
}
- glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
+ log.Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
return // -1 causes integer overflow and the volume to become unwritable.
}
@@ -189,9 +189,9 @@ func (v *Volume) expired(contentSize uint64, volumeSizeLimit uint64) bool {
if v.Ttl == nil || v.Ttl.Minutes() == 0 {
return false
}
- glog.V(2).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds)
+ log.Debugf("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds)
livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60
- glog.V(2).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes)
+ log.Debugf("ttl:%v lived:%v", v.Ttl, livedMinutes)
if int64(v.Ttl.Minutes()) < livedMinutes {
return true
}
@@ -217,7 +217,7 @@ func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool {
func (v *Volume) CollectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64) {
v.dataFileAccessLock.RLock()
defer v.dataFileAccessLock.RUnlock()
- glog.V(3).Infof("CollectStatus volume %d", v.Id)
+ log.Tracef("CollectStatus volume %d", v.Id)
maxFileKey = v.nm.MaxFileKey()
datFileSize, modTime, _ = v.DataBackend.GetStat()
diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go
index 00e04047f..cc91619b6 100644
--- a/weed/storage/volume_checking.go
+++ b/weed/storage/volume_checking.go
@@ -5,7 +5,7 @@ import (
"io"
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -34,10 +34,10 @@ func CheckAndFixVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAt
}
}
if healthyIndexSize < indexSize {
- glog.Warningf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d", indexFile.Name(), indexSize, healthyIndexSize)
+ log.Warnf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d", indexFile.Name(), indexSize, healthyIndexSize)
err = indexFile.Truncate(healthyIndexSize)
if err != nil {
- glog.Warningf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d: %v", indexFile.Name(), indexSize, healthyIndexSize, err)
+ log.Warnf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d: %v", indexFile.Name(), indexSize, healthyIndexSize, err)
}
}
return
@@ -114,14 +114,14 @@ func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version,
return n.AppendAtNs, nil
}
if fileSize > fileTailOffset {
- glog.Warningf("Truncate %s from %d bytes to %d bytes!", datFile.Name(), fileSize, fileTailOffset)
+ log.Warnf("Truncate %s from %d bytes to %d bytes!", datFile.Name(), fileSize, fileTailOffset)
err = datFile.Truncate(fileTailOffset)
if err == nil {
return n.AppendAtNs, nil
}
return n.AppendAtNs, fmt.Errorf("truncate file %s: %v", datFile.Name(), err)
}
- glog.Warningf("data file %s has %d bytes, less than expected %d bytes!", datFile.Name(), fileSize, fileTailOffset)
+ log.Warnf("data file %s has %d bytes, less than expected %d bytes!", datFile.Name(), fileSize, fileTailOffset)
}
if err = n.ReadData(datFile, offset, size, v); err != nil {
return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err)
diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go
index 05684cbdb..906cbcdd4 100644
--- a/weed/storage/volume_loading.go
+++ b/weed/storage/volume_loading.go
@@ -6,7 +6,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/opt"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@@ -31,7 +31,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
if v.HasRemoteFile() {
v.noWriteCanDelete = true
v.noWriteOrDelete = false
- glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo.Files)
+ log.Infof("loading volume %d from remote %v", v.Id, v.volumeInfo.Files)
v.LoadRemoteFile()
alreadyHasSuperBlock = true
} else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(fileName + ".dat"); exists {
@@ -43,7 +43,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
if canWrite {
dataFile, err = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644)
} else {
- glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode")
+ log.Infoln("opening " + fileName + ".dat in READONLY mode")
dataFile, err = os.Open(fileName + ".dat")
v.noWriteOrDelete = true
}
@@ -79,61 +79,61 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
if err == nil && alsoLoadIndex {
var indexFile *os.File
if v.noWriteOrDelete {
- glog.V(0).Infoln("open to read file", fileName+".idx")
+ log.Infoln("open to read file", fileName+".idx")
if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); err != nil {
return fmt.Errorf("cannot read Volume Index %s.idx: %v", fileName, err)
}
} else {
- glog.V(1).Infoln("open to write file", fileName+".idx")
+ log.Debug("open to write file", fileName+".idx")
if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil {
return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, err)
}
}
if v.lastAppendAtNs, err = CheckAndFixVolumeDataIntegrity(v, indexFile); err != nil {
v.noWriteOrDelete = true
- glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err)
+ log.Infof("volumeDataIntegrityChecking failed %v", err)
}
if v.noWriteOrDelete || v.noWriteCanDelete {
if v.nm, err = NewSortedFileNeedleMap(fileName, indexFile); err != nil {
- glog.V(0).Infof("loading sorted db %s error: %v", fileName+".sdx", err)
+ log.Infof("loading sorted db %s error: %v", fileName+".sdx", err)
}
} else {
switch needleMapKind {
case NeedleMapInMemory:
- glog.V(0).Infoln("loading index", fileName+".idx", "to memory")
+ log.Infoln("loading index", fileName+".idx", "to memory")
if v.nm, err = LoadCompactNeedleMap(indexFile); err != nil {
- glog.V(0).Infof("loading index %s to memory error: %v", fileName+".idx", err)
+ log.Infof("loading index %s to memory error: %v", fileName+".idx", err)
}
case NeedleMapLevelDb:
- glog.V(0).Infoln("loading leveldb", fileName+".ldb")
+ log.Infoln("loading leveldb", fileName+".ldb")
opts := &opt.Options{
BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil {
- glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err)
+ log.Infof("loading leveldb %s error: %v", fileName+".ldb", err)
}
case NeedleMapLevelDbMedium:
- glog.V(0).Infoln("loading leveldb medium", fileName+".ldb")
+ log.Infoln("loading leveldb medium", fileName+".ldb")
opts := &opt.Options{
BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil {
- glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err)
+ log.Infof("loading leveldb %s error: %v", fileName+".ldb", err)
}
case NeedleMapLevelDbLarge:
- glog.V(0).Infoln("loading leveldb large", fileName+".ldb")
+ log.Infoln("loading leveldb large", fileName+".ldb")
opts := &opt.Options{
BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil {
- glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err)
+ log.Infof("loading leveldb %s error: %v", fileName+".ldb", err)
}
}
}
diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go
index 869796a3f..d2aed8c58 100644
--- a/weed/storage/volume_read_write.go
+++ b/weed/storage/volume_read_write.go
@@ -8,7 +8,7 @@ import (
"os"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@@ -31,7 +31,7 @@ func (v *Volume) isFileUnchanged(n *needle.Needle) bool {
oldNeedle := new(needle.Needle)
err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), nv.Size, v.Version())
if err != nil {
- glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToAcutalOffset(), nv.Size, err)
+ log.Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToAcutalOffset(), nv.Size, err)
return false
}
if oldNeedle.Cookie == n.Cookie && oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) {
@@ -76,7 +76,7 @@ func (v *Volume) asyncRequestAppend(request *needle.AsyncRequest) {
}
func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchanged bool, err error) {
- // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ // log.Tracef("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
actualSize := needle.GetActualSize(Size(len(n.Data)), v.Version())
v.dataFileAccessLock.Lock()
@@ -101,7 +101,7 @@ func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchan
return
}
if existingNeedle.Cookie != n.Cookie {
- glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
+ log.Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
err = fmt.Errorf("mismatching cookie %x", n.Cookie)
return
}
@@ -118,7 +118,7 @@ func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchan
// add to needle map
if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
- glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
+ log.Tracef("failed to save in needle map %d: %v", n.Id, err)
}
}
if v.lastModifiedTsSeconds < n.LastModified {
@@ -128,7 +128,7 @@ func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchan
}
func (v *Volume) writeNeedle2(n *needle.Needle, fsync bool) (offset uint64, size Size, isUnchanged bool, err error) {
- // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ // log.Tracef("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if n.Ttl == needle.EMPTY_TTL && v.Ttl != needle.EMPTY_TTL {
n.SetHasTtl()
n.Ttl = v.Ttl
@@ -149,7 +149,7 @@ func (v *Volume) writeNeedle2(n *needle.Needle, fsync bool) (offset uint64, size
}
func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isUnchanged bool, err error) {
- // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ // log.Tracef("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if v.isFileUnchanged(n) {
size = Size(n.DataSize)
isUnchanged = true
@@ -165,7 +165,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isU
return
}
if existingNeedle.Cookie != n.Cookie {
- glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
+ log.Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
err = fmt.Errorf("mismatching cookie %x", n.Cookie)
return
}
@@ -181,7 +181,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isU
// add to needle map
if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
- glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
+ log.Tracef("failed to save in needle map %d: %v", n.Id, err)
}
}
if v.lastModifiedTsSeconds < n.LastModified {
@@ -191,7 +191,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isU
}
func (v *Volume) syncDelete(n *needle.Needle) (Size, error) {
- // glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ // log.Tracef("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
actualSize := needle.GetActualSize(0, v.Version())
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
@@ -238,7 +238,7 @@ func (v *Volume) deleteNeedle2(n *needle.Needle) (Size, error) {
}
func (v *Volume) doDeleteRequest(n *needle.Needle) (Size, error) {
- glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ log.Tracef("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
nv, ok := v.nm.Get(n.Id)
// fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
if ok && nv.Size.IsValid() {
@@ -270,7 +270,7 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
readSize := nv.Size
if readSize.IsDeleted() {
if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize {
- glog.V(3).Infof("reading deleted %s", n.String())
+ log.Tracef("reading deleted %s", n.String())
readSize = -readSize
} else {
return -1, ErrorDeleted
@@ -361,7 +361,7 @@ func (v *Volume) startWorker() {
if err := v.DataBackend.Sync(); err != nil {
// todo: this may generate dirty data or cause data inconsistent, may be weed need to panic?
if te := v.DataBackend.Truncate(end); te != nil {
- glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te)
+ log.Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te)
}
for i := 0; i < len(currentRequests); i++ {
if currentRequests[i].IsSucceed() {
@@ -416,7 +416,7 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag
if volumeFileScanner.ReadNeedleBody() {
// println("needle", n.Id.String(), "offset", offset, "size", n.Size, "rest", rest)
if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil {
- glog.V(0).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err)
+ log.Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err)
// err = fmt.Errorf("cannot read needle body: %v", err)
// return
}
@@ -426,18 +426,18 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag
return nil
}
if err != nil {
- glog.V(0).Infof("visit needle error: %v", err)
+ log.Infof("visit needle error: %v", err)
return fmt.Errorf("visit needle error: %v", err)
}
offset += NeedleHeaderSize + rest
- glog.V(4).Infof("==> new entry offset %d", offset)
+ log.Tracef("==> new entry offset %d", offset)
if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil {
if err == io.EOF {
return nil
}
return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err)
}
- glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest)
+ log.Tracef("new entry needle size:%d rest:%d", n.Size, rest)
}
return nil
}
diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go
index 20223ac1b..f5e78024c 100644
--- a/weed/storage/volume_super_block.go
+++ b/weed/storage/volume_super_block.go
@@ -4,7 +4,7 @@ import (
"fmt"
"os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@@ -14,7 +14,7 @@ func (v *Volume) maybeWriteSuperBlock() error {
datSize, _, e := v.DataBackend.GetStat()
if e != nil {
- glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e)
+ log.Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e)
return e
}
if datSize == 0 {
diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go
index fd7b08654..3fbcd9785 100644
--- a/weed/storage/volume_tier.go
+++ b/weed/storage/volume_tier.go
@@ -1,7 +1,7 @@
package storage
import (
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
@@ -17,7 +17,7 @@ func (v *Volume) maybeLoadVolumeInfo() (found bool) {
v.volumeInfo, v.hasRemoteFile, _ = pb.MaybeLoadVolumeInfo(v.FileName() + ".vif")
if v.hasRemoteFile {
- glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id,
+ log.Infof("volume %d is tiered to %s as %s and read only", v.Id,
v.volumeInfo.Files[0].BackendName(), v.volumeInfo.Files[0].Key)
}
diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go
index a3e5800df..81c105635 100644
--- a/weed/storage/volume_vacuum.go
+++ b/weed/storage/volume_vacuum.go
@@ -6,7 +6,7 @@ import (
"runtime"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
idx2 "github.com/chrislusf/seaweedfs/weed/storage/idx"
@@ -39,11 +39,11 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error
if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
return nil
}
- glog.V(3).Infof("Compacting volume %d ...", v.Id)
+ log.Tracef("Compacting volume %d ...", v.Id)
//no need to lock for copy on write
//v.accessLock.Lock()
//defer v.accessLock.Unlock()
- //glog.V(3).Infof("Got Compaction lock...")
+ //log.Tracef("Got Compaction lock...")
v.isCompacting = true
defer func() {
v.isCompacting = false
@@ -52,12 +52,12 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error
filePath := v.FileName()
v.lastCompactIndexOffset = v.IndexFileSize()
v.lastCompactRevision = v.SuperBlock.CompactionRevision
- glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset)
+ log.Tracef("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset)
if err := v.DataBackend.Sync(); err != nil {
- glog.V(0).Infof("compact fail to sync volume %d", v.Id)
+ log.Infof("compact fail to sync volume %d", v.Id)
}
if err := v.nm.Sync(); err != nil {
- glog.V(0).Infof("compact fail to sync volume idx %d", v.Id)
+ log.Infof("compact fail to sync volume idx %d", v.Id)
}
return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond)
}
@@ -68,7 +68,7 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) erro
if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
return nil
}
- glog.V(3).Infof("Compact2 volume %d ...", v.Id)
+ log.Tracef("Compact2 volume %d ...", v.Id)
v.isCompacting = true
defer func() {
@@ -78,12 +78,12 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) erro
filePath := v.FileName()
v.lastCompactIndexOffset = v.IndexFileSize()
v.lastCompactRevision = v.SuperBlock.CompactionRevision
- glog.V(3).Infof("creating copies for volume %d ...", v.Id)
+ log.Tracef("creating copies for volume %d ...", v.Id)
if err := v.DataBackend.Sync(); err != nil {
- glog.V(0).Infof("compact2 fail to sync volume dat %d: %v", v.Id, err)
+ log.Infof("compact2 fail to sync volume dat %d: %v", v.Id, err)
}
if err := v.nm.Sync(); err != nil {
- glog.V(0).Infof("compact2 fail to sync volume idx %d: %v", v.Id, err)
+ log.Infof("compact2 fail to sync volume idx %d: %v", v.Id, err)
}
return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate, compactionBytePerSecond)
}
@@ -92,7 +92,7 @@ func (v *Volume) CommitCompact() error {
if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
return nil
}
- glog.V(0).Infof("Committing volume %d vacuuming...", v.Id)
+ log.Infof("Committing volume %d vacuuming...", v.Id)
v.isCompacting = true
defer func() {
@@ -102,11 +102,11 @@ func (v *Volume) CommitCompact() error {
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
- glog.V(3).Infof("Got volume %d committing lock...", v.Id)
+ log.Tracef("Got volume %d committing lock...", v.Id)
v.nm.Close()
if v.DataBackend != nil {
if err := v.DataBackend.Close(); err != nil {
- glog.V(0).Infof("fail to close volume %d", v.Id)
+ log.Infof("fail to close volume %d", v.Id)
}
}
v.DataBackend = nil
@@ -114,7 +114,7 @@ func (v *Volume) CommitCompact() error {
var e error
if e = v.makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx"); e != nil {
- glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e)
+ log.Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e)
e = os.Remove(v.FileName() + ".cpd")
if e != nil {
return e
@@ -143,12 +143,12 @@ func (v *Volume) CommitCompact() error {
}
}
- //glog.V(3).Infof("Pretending to be vacuuming...")
+ //log.Tracef("Pretending to be vacuuming...")
//time.Sleep(20 * time.Second)
os.RemoveAll(v.FileName() + ".ldb")
- glog.V(3).Infof("Loading volume %d commit file...", v.Id)
+ log.Tracef("Loading volume %d commit file...", v.Id)
if e = v.load(true, false, v.needleMapKind, 0); e != nil {
return e
}
@@ -156,7 +156,7 @@ func (v *Volume) CommitCompact() error {
}
func (v *Volume) cleanupCompact() error {
- glog.V(0).Infof("Cleaning up volume %d vacuuming...", v.Id)
+ log.Infof("Cleaning up volume %d vacuuming...", v.Id)
e1 := os.Remove(v.FileName() + ".cpd")
e2 := os.Remove(v.FileName() + ".cpx")
@@ -217,7 +217,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
return fmt.Errorf("readIndexEntry %s at offset %d failed: %v", oldIdxFileName, idxOffset, err)
}
key, offset, size := idx2.IdxFileEntry(IdxEntry)
- glog.V(4).Infof("key %d offset %d size %d", key, offset, size)
+ log.Tracef("key %d offset %d size %d", key, offset, size)
if _, found := incrementedHasUpdatedIndexEntry[key]; !found {
incrementedHasUpdatedIndexEntry[key] = keyField{
offset: offset,
@@ -261,14 +261,14 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
var offset int64
if offset, err = dst.Seek(0, 2); err != nil {
- glog.V(0).Infof("failed to seek the end of file: %v", err)
+ log.Infof("failed to seek the end of file: %v", err)
return
}
//ensure file writing starting from aligned positions
if offset%NeedlePaddingSize != 0 {
offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize)
if offset, err = dst.Seek(offset, 0); err != nil {
- glog.V(0).Infof("failed to align in datafile %s: %v", dst.Name(), err)
+ log.Infof("failed to align in datafile %s: %v", dst.Name(), err)
return
}
}
@@ -276,7 +276,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
//updated needle
if !increIdxEntry.offset.IsZero() && increIdxEntry.size != 0 && increIdxEntry.size.IsValid() {
//even the needle cache in memory is hit, the need_bytes is correct
- glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size)
+ log.Tracef("file %d offset %d size %d", key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size)
var needleBytes []byte
needleBytes, err = needle.ReadNeedleBlob(oldDatBackend, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, v.Version())
if err != nil {
@@ -334,7 +334,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in
return nil
}
nv, ok := scanner.v.nm.Get(n.Id)
- glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
+ log.Trace("needle expected offset ", offset, "ok", ok, "nv", nv)
if ok && nv.Offset.ToAcutalOffset() == offset && nv.Size > 0 && nv.Size.IsValid() {
if err := scanner.nm.Set(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil {
return fmt.Errorf("cannot put needle: %s", err)
@@ -345,7 +345,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in
delta := n.DiskSize(scanner.version)
scanner.newOffset += delta
scanner.writeThrottler.MaybeSlowdown(delta)
- glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size)
+ log.Trace("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size)
}
return nil
}
@@ -436,7 +436,7 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str
delta := n.DiskSize(version)
newOffset += delta
writeThrottler.MaybeSlowdown(delta)
- glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size)
+ log.Trace("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size)
return nil
})