diff options
Diffstat (limited to 'weed/storage')
38 files changed, 258 insertions, 258 deletions
diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go index c17bec822..4a2553c0a 100644 --- a/weed/storage/backend/backend.go +++ b/weed/storage/backend/backend.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" ) @@ -52,7 +52,7 @@ func LoadConfiguration(config *util.ViperProxy) { for backendTypeName := range config.GetStringMap(StorageBackendPrefix) { backendStorageFactory, found := BackendStorageFactories[StorageType(backendTypeName)] if !found { - glog.Fatalf("backend storage type %s not found", backendTypeName) + log.Fatalf("backend storage type %s not found", backendTypeName) } for backendStorageId := range config.GetStringMap(StorageBackendPrefix + "." + backendTypeName) { if !config.GetBool(StorageBackendPrefix + "." + backendTypeName + "." + backendStorageId + ".enabled") { @@ -64,7 +64,7 @@ func LoadConfiguration(config *util.ViperProxy) { backendStorage, buildErr := backendStorageFactory.BuildStorage(config, StorageBackendPrefix+"."+backendTypeName+"."+backendStorageId+".", backendStorageId) if buildErr != nil { - glog.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId) + log.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId) } BackendStorages[backendTypeName+"."+backendStorageId] = backendStorage if backendStorageId == "default" { @@ -81,7 +81,7 @@ func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) { for _, storageBackend := range storageBackends { backendStorageFactory, found := BackendStorageFactories[StorageType(storageBackend.Type)] if !found { - glog.Warningf("storage type %s not found", storageBackend.Type) + log.Warningf("storage type %s not found", storageBackend.Type) continue } if _, found := BackendStorages[storageBackend.Type+"."+storageBackend.Id]; found { @@ -89,7 +89,7 @@ func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) { } backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), "", storageBackend.Id) if buildErr != nil { - glog.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id) + log.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id) } BackendStorages[storageBackend.Type+"."+storageBackend.Id] = backendStorage if storageBackend.Id == "default" { diff --git a/weed/storage/backend/disk_file.go b/weed/storage/backend/disk_file.go index 070f79865..d1ac5704c 100644 --- a/weed/storage/backend/disk_file.go +++ b/weed/storage/backend/disk_file.go @@ -1,7 +1,7 @@ package backend import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" . "github.com/seaweedfs/seaweedfs/weed/storage/types" "io" "os" @@ -25,7 +25,7 @@ type DiskFile struct { func NewDiskFile(f *os.File) *DiskFile { stat, err := f.Stat() if err != nil { - glog.Fatalf("stat file %s: %v", f.Name(), err) + log.Fatalf("stat file %s: %v", f.Name(), err) } offset := stat.Size() if offset%NeedlePaddingSize != 0 { diff --git a/weed/storage/backend/rclone_backend/rclone_backend.go b/weed/storage/backend/rclone_backend/rclone_backend.go index e47c2f908..705a761ba 100644 --- a/weed/storage/backend/rclone_backend/rclone_backend.go +++ b/weed/storage/backend/rclone_backend/rclone_backend.go @@ -21,7 +21,7 @@ import ( "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/object" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage/backend" ) @@ -66,11 +66,11 @@ func newRcloneBackendStorage(configuration backend.StringProperties, configPrefi fsPath := fmt.Sprintf("%s:", s.remoteName) s.fs, err = fs.NewFs(ctx, fsPath) if err != nil { - glog.Errorf("failed to instantiate Rclone filesystem: %s", err) + log.Errorf("failed to instantiate Rclone filesystem: %s", err) return } - glog.V(0).Infof("created backend storage rclone.%s for remote name %s", s.id, s.remoteName) + log.V(3).Infof("created backend storage rclone.%s for remote name %s", s.id, s.remoteName) return } @@ -118,7 +118,7 @@ func (s *RcloneBackendStorage) CopyFile(f *os.File, fn func(progressed int64, pe return key, 0, err } - glog.V(1).Infof("copy dat file of %s to remote rclone.%s as %s", f.Name(), s.id, key) + log.V(2).Infof("copy dat file of %s to remote rclone.%s as %s", f.Name(), s.id, key) util.Retry("upload via Rclone", func() error { size, err = uploadViaRclone(s.fs, f.Name(), key, fn) @@ -164,7 +164,7 @@ func uploadViaRclone(rfs fs.Fs, filename string, key string, fn func(progressed } func (s *RcloneBackendStorage) DownloadFile(filename string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) { - glog.V(1).Infof("download dat file of %s from remote rclone.%s as %s", filename, s.id, key) + log.V(2).Infof("download dat file of %s from remote rclone.%s as %s", filename, s.id, key) util.Retry("download via Rclone", func() error { size, err = downloadViaRclone(s.fs, filename, key, fn) @@ -216,7 +216,7 @@ func downloadViaRclone(fs fs.Fs, filename string, key string, fn func(progressed } func (s *RcloneBackendStorage) DeleteFile(key string) (err error) { - glog.V(1).Infof("delete dat file %s from remote", key) + log.V(2).Infof("delete dat file %s from remote", key) util.Retry("delete via Rclone", func() error { err = deleteViaRclone(s.fs, key) diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 139073fe4..33e876bcf 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -13,7 +13,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage/backend" ) @@ -60,7 +60,7 @@ func newS3BackendStorage(configuration backend.StringProperties, configPrefix st s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint, s.forcePathStyle) - glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) + log.V(3).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) return } @@ -94,7 +94,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percen randomUuid, _ := uuid.NewRandom() key = randomUuid.String() - glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) + log.V(2).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) util.Retry("upload to S3", func() error { size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, s.storageClass, fn) @@ -106,7 +106,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percen func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) { - glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key) + log.V(2).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key) size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn) @@ -115,7 +115,7 @@ func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(pro func (s *S3BackendStorage) DeleteFile(key string) (err error) { - glog.V(1).Infof("delete dat file %s from remote", key) + log.V(2).Infof("delete dat file %s from remote", key) err = deleteFromS3(s.conn, s.bucket, key) @@ -143,8 +143,8 @@ func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n } defer getObjectOutput.Body.Close() - // glog.V(3).Infof("read %s %s", s3backendStorageFile.key, bytesRange) - // glog.V(3).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength) + // log.V(0).Infof("read %s %s", s3backendStorageFile.key, bytesRange) + // log.V(0).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength) var readCount int for { diff --git a/weed/storage/backend/s3_backend/s3_download.go b/weed/storage/backend/s3_backend/s3_download.go index b0d30fbdb..5caca47c7 100644 --- a/weed/storage/backend/s3_backend/s3_download.go +++ b/weed/storage/backend/s3_backend/s3_download.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string, sourceKey string, @@ -50,7 +50,7 @@ func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string return fileSize, fmt.Errorf("failed to download /buckets/%s%s to %s: %v", sourceBucket, sourceKey, destFileName, err) } - glog.V(1).Infof("downloaded file %s\n", destFileName) + log.V(2).Infof("downloaded file %s\n", destFileName) return } diff --git a/weed/storage/backend/s3_backend/s3_upload.go b/weed/storage/backend/s3_backend/s3_upload.go index 537e6bd1d..26c88db29 100644 --- a/weed/storage/backend/s3_backend/s3_upload.go +++ b/weed/storage/backend/s3_backend/s3_upload.go @@ -8,7 +8,7 @@ import ( "os" "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, storageClass string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) { @@ -58,7 +58,7 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey if err != nil { return 0, fmt.Errorf("failed to upload file %s: %v", filename, err) } - glog.V(1).Infof("file %s uploaded to %s\n", filename, result.Location) + log.V(2).Infof("file %s uploaded to %s\n", filename, result.Location) return } diff --git a/weed/storage/backend/volume_create.go b/weed/storage/backend/volume_create.go index def376822..ad1665071 100644 --- a/weed/storage/backend/volume_create.go +++ b/weed/storage/backend/volume_create.go @@ -6,7 +6,7 @@ package backend import ( "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) { @@ -15,7 +15,7 @@ func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32 return nil, e } if preallocate > 0 { - glog.V(2).Infof("Preallocated disk space for %s is not supported", fileName) + log.V(1).Infof("Preallocated disk space for %s is not supported", fileName) } return NewDiskFile(file), nil } diff --git a/weed/storage/backend/volume_create_linux.go b/weed/storage/backend/volume_create_linux.go index 2e52cce1b..2319c8c5b 100644 --- a/weed/storage/backend/volume_create_linux.go +++ b/weed/storage/backend/volume_create_linux.go @@ -7,7 +7,7 @@ import ( "os" "syscall" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) { @@ -17,7 +17,7 @@ func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32 } if preallocate != 0 { syscall.Fallocate(int(file.Fd()), 1, 0, preallocate) - glog.V(1).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName) + log.V(2).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName) } return NewDiskFile(file), nil } diff --git a/weed/storage/backend/volume_create_windows.go b/weed/storage/backend/volume_create_windows.go index b2e0ea92b..fbd1840e9 100644 --- a/weed/storage/backend/volume_create_windows.go +++ b/weed/storage/backend/volume_create_windows.go @@ -7,13 +7,13 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/backend/memory_map" "golang.org/x/sys/windows" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend/memory_map/os_overloads" ) func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) { if preallocate > 0 { - glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) + log.V(3).Infof("Preallocated disk space for %s is not supported", fileName) } if memoryMapSizeMB > 0 { diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index cc89c4ca1..459708e49 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -11,7 +11,7 @@ import ( "time" "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -39,7 +39,7 @@ type DiskLocation struct { } func GenerateDirUuid(dir string) (dirUuidString string, err error) { - glog.V(1).Infof("Getting uuid of volume directory:%s", dir) + log.V(2).Infof("Getting uuid of volume directory:%s", dir) fileName := dir + "/vol_dir.uuid" if !util.FileExists(fileName) { dirUuidString, err = writeNewUuid(fileName) @@ -67,7 +67,7 @@ func writeNewUuid(fileName string) (string, error) { } func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation { - glog.V(4).Infof("Added new Disk %s: maxVolumes=%d", dir, maxVolumeCount) + log.V(-1).Infof("Added new Disk %s: maxVolumes=%d", dir, maxVolumeCount) dir = util.ResolvePath(dir) if idxDir == "" { idxDir = dir @@ -76,7 +76,7 @@ func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFree } dirUuid, err := GenerateDirUuid(dir) if err != nil { - glog.Fatalf("cannot generate uuid of dir %s: %v", dir, err) + log.Fatalf("cannot generate uuid of dir %s: %v", dir, err) } location := &DiskLocation{ Directory: dir, @@ -155,7 +155,7 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne noteFile := l.Directory + "/" + volumeName + ".note" if util.FileExists(noteFile) { note, _ := os.ReadFile(noteFile) - glog.Warningf("volume %s was not completed: %s", volumeName, string(note)) + log.Warningf("volume %s was not completed: %s", volumeName, string(note)) removeVolumeFiles(l.Directory + "/" + volumeName) removeVolumeFiles(l.IdxDirectory + "/" + volumeName) return false @@ -164,7 +164,7 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne // parse out collection, volume id vid, collection, err := volumeIdFromFileName(basename) if err != nil { - glog.Warningf("get volume id failed, %s, err : %s", volumeName, err) + log.Warningf("get volume id failed, %s, err : %s", volumeName, err) return false } @@ -173,21 +173,21 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne _, found := l.volumes[vid] l.volumesLock.RUnlock() if found { - glog.V(1).Infof("loaded volume, %v", vid) + log.V(2).Infof("loaded volume, %v", vid) return true } // load the volume v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0, ldbTimeout) if e != nil { - glog.V(0).Infof("new volume %s error %s", volumeName, e) + log.V(3).Infof("new volume %s error %s", volumeName, e) return false } l.SetVolume(vid, v) size, _, _ := v.FileStat() - glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s", + log.V(3).Infof("data file %s, replication=%s v=%d size=%d ttl=%s", l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String()) return true } @@ -234,7 +234,7 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeo num, err := strconv.Atoi(val) if err != nil || num < 1 { num = 10 - glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10") + log.Warningf("failed to set worker number from GOMAXPROCS , set to default:10") } workerNum = num } else { @@ -243,10 +243,10 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeo } } l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout) - glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount) + log.V(3).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount) l.loadAllEcShards() - glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes)) + log.V(3).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes)) } @@ -434,7 +434,7 @@ func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) } datSize, idxSize, _ := vol.FileStat() unUsedSpaceVolume := int64(volumeSizeLimit) - int64(datSize+idxSize) - glog.V(4).Infof("Volume stats for %d: volumeSizeLimit=%d, datSize=%d idxSize=%d unused=%d", vol.Id, volumeSizeLimit, datSize, idxSize, unUsedSpaceVolume) + log.V(-1).Infof("Volume stats for %d: volumeSizeLimit=%d, datSize=%d idxSize=%d unused=%d", vol.Id, volumeSizeLimit, datSize, idxSize, unUsedSpaceVolume) if unUsedSpaceVolume >= 0 { unUsedSpace += uint64(unUsedSpaceVolume) } @@ -455,11 +455,11 @@ func (l *DiskLocation) CheckDiskSpace() { l.isDiskSpaceLow = !l.isDiskSpaceLow } - logLevel := glog.Level(4) + logLevel := log.Level(4) if l.isDiskSpaceLow { - logLevel = glog.Level(0) + logLevel = log.Level(0) } - glog.V(logLevel).Infof("dir %s %s", dir, desc) + log.V(logLevel).Infof("dir %s %s", dir, desc) } } diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index a46643f57..6836382b0 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -7,7 +7,7 @@ import ( "github.com/klauspost/reedsolomon" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -78,7 +78,7 @@ func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, return fmt.Errorf("failed to stat dat file: %v", err) } - glog.V(0).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size()) + log.V(3).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size()) err = encodeDatFile(fi.Size(), baseFileName, bufferSize, largeBlockSize, file, smallBlockSize) if err != nil { return fmt.Errorf("encodeDatFile: %v", err) @@ -121,12 +121,12 @@ func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize i bufferSize := int64(len(buffers[0])) if bufferSize == 0 { - glog.Fatal("unexpected zero buffer size") + log.Fatal("unexpected zero buffer size") } batchCount := blockSize / bufferSize if blockSize%bufferSize != 0 { - glog.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize) + log.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize) } for b := int64(0); b < batchCount; b++ { diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index b3744807a..d14166502 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" @@ -74,7 +74,7 @@ func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection ev.datFileSize = volumeInfo.DatFileSize ev.ExpireAtSec = volumeInfo.ExpireAtSec } else { - glog.Warningf("vif file not found,volumeId:%d, filename:%s", vid, dataBaseFileName) + log.Warningf("vif file not found,volumeId:%d, filename:%s", vid, dataBaseFileName) volume_info.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)}) } diff --git a/weed/storage/idx/walk.go b/weed/storage/idx/walk.go index e31c44a46..aa3aa01bf 100644 --- a/weed/storage/idx/walk.go +++ b/weed/storage/idx/walk.go @@ -3,7 +3,7 @@ package idx import ( "io" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/types" ) @@ -16,7 +16,7 @@ func WalkIndexFile(r io.ReaderAt, startFrom uint64, fn func(key types.NeedleId, if count == 0 && e == io.EOF { return nil } - glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e) + log.V(0).Infof("readerOffset %d count %d err: %v", readerOffset, count, e) readerOffset += int64(count) var ( key types.NeedleId @@ -36,7 +36,7 @@ func WalkIndexFile(r io.ReaderAt, startFrom uint64, fn func(key types.NeedleId, return nil } count, e = r.ReadAt(bytes, readerOffset) - glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e) + log.V(0).Infof("readerOffset %d count %d err: %v", readerOffset, count, e) readerOffset += int64(count) } return e diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go index 89708303d..1bc00e7d0 100644 --- a/weed/storage/needle/needle_parse_upload.go +++ b/weed/storage/needle/needle_parse_upload.go @@ -13,7 +13,7 @@ import ( "strconv" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -113,7 +113,7 @@ func parseUpload(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { form, fe := r.MultipartReader() if fe != nil { - glog.V(0).Infoln("MultipartReader [ERROR]", fe) + log.V(3).Infoln("MultipartReader [ERROR]", fe) e = fe return } @@ -121,7 +121,7 @@ func parseUpload(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { // first multi-part item part, fe := form.NextPart() if fe != nil { - glog.V(0).Infoln("Reading Multi part [ERROR]", fe) + log.V(3).Infoln("Reading Multi part [ERROR]", fe) e = fe return } @@ -133,7 +133,7 @@ func parseUpload(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { dataSize, e = pu.bytesBuffer.ReadFrom(io.LimitReader(part, sizeLimit+1)) if e != nil { - glog.V(0).Infoln("Reading Content [ERROR]", e) + log.V(3).Infoln("Reading Content [ERROR]", e) return } if dataSize == sizeLimit+1 { @@ -158,7 +158,7 @@ func parseUpload(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { pu.bytesBuffer.Reset() dataSize2, fe2 := pu.bytesBuffer.ReadFrom(io.LimitReader(part2, sizeLimit+1)) if fe2 != nil { - glog.V(0).Infoln("Reading Content [ERROR]", fe2) + log.V(3).Infoln("Reading Content [ERROR]", fe2) e = fe2 return } @@ -215,7 +215,7 @@ func parseUpload(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { dataSize, e = pu.bytesBuffer.ReadFrom(io.LimitReader(r.Body, sizeLimit+1)) if e != nil { - glog.V(0).Infoln("Reading Content [ERROR]", e) + log.V(3).Infoln("Reading Content [ERROR]", e) return } if dataSize == sizeLimit+1 { diff --git a/weed/storage/needle/needle_read.go b/weed/storage/needle/needle_read.go index 1907efad3..a84f1a5af 100644 --- a/weed/storage/needle/needle_read.go +++ b/weed/storage/needle/needle_read.go @@ -3,7 +3,7 @@ package needle import ( "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/backend" . "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -42,7 +42,7 @@ func ReadNeedleBlob(r backend.BackendStorageFile, offset int64, size Size, versi } if err != nil { fileSize, _, _ := r.GetStat() - glog.Errorf("%s read %d dataSize %d offset %d fileSize %d: %v", r.Name(), n, dataSize, offset, fileSize, err) + log.Errorf("%s read %d dataSize %d offset %d fileSize %d: %v", r.Name(), n, dataSize, offset, fileSize, err) } return dataSlice, err @@ -55,7 +55,7 @@ func (n *Needle) ReadBytes(bytes []byte, offset int64, size Size, version Versio // cookie is not always passed in for this API. Use size to do preliminary checking. if OffsetSize == 4 && offset < int64(MaxPossibleVolumeSize) { stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorSizeMismatchOffsetSize).Inc() - glog.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size) + log.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size) return ErrorSizeMismatch } stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorSizeMismatch).Inc() @@ -238,7 +238,7 @@ func (n *Needle) ReadNeedleBody(r backend.BackendStorageFile, version Version, o err = nil } if err != nil { - glog.Errorf("%s read %d bodyLength %d offset %d: %v", r.Name(), readCount, bodyLength, offset, err) + log.Errorf("%s read %d bodyLength %d offset %d: %v", r.Name(), readCount, bodyLength, offset, err) return } diff --git a/weed/storage/needle/needle_read_page.go b/weed/storage/needle/needle_read_page.go index 4e1032de8..c8857223a 100644 --- a/weed/storage/needle/needle_read_page.go +++ b/weed/storage/needle/needle_read_page.go @@ -2,7 +2,7 @@ package needle import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" . "github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/util" @@ -26,7 +26,7 @@ func (n *Needle) ReadNeedleData(r backend.BackendStorageFile, volumeOffset int64 } if err != nil { fileSize, _, _ := r.GetStat() - glog.Errorf("%s read %d %d size %d at offset %d fileSize %d: %v", r.Name(), n.Id, needleOffset, sizeToRead, volumeOffset, fileSize, err) + log.Errorf("%s read %d %d size %d at offset %d fileSize %d: %v", r.Name(), n.Id, needleOffset, sizeToRead, volumeOffset, fileSize, err) } return diff --git a/weed/storage/needle/needle_write.go b/weed/storage/needle/needle_write.go index 95854bc27..de6071046 100644 --- a/weed/storage/needle/needle_write.go +++ b/weed/storage/needle/needle_write.go @@ -3,7 +3,7 @@ package needle import ( "bytes" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" . "github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/util" @@ -112,7 +112,7 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u defer func(w backend.BackendStorageFile, off int64) { if err != nil { if te := w.Truncate(end); te != nil { - glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) + log.V(3).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) } } }(w, end) @@ -147,7 +147,7 @@ func WriteNeedleBlob(w backend.BackendStorageFile, dataSlice []byte, size Size, defer func(w backend.BackendStorageFile, off int64) { if err != nil { if te := w.Truncate(end); te != nil { - glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) + log.V(3).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) } } }(w, end) diff --git a/weed/storage/needle_map/compact_map_test.go b/weed/storage/needle_map/compact_map_test.go index 58d2a6e3a..8fc0f1a74 100644 --- a/weed/storage/needle_map/compact_map_test.go +++ b/weed/storage/needle_map/compact_map_test.go @@ -79,7 +79,7 @@ func TestCompactMap(t *testing.T) { // for i := uint32(0); i < 100; i++ { // if v := m.Get(Key(i)); v != nil { - // glog.V(4).Infoln(i, "=", v.Key, v.Offset, v.Size) + // log.V(-1).Infoln(i, "=", v.Key, v.Offset, v.Size) // } // } diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go index d3d47b605..a3cbd88ed 100644 --- a/weed/storage/needle_map/memdb.go +++ b/weed/storage/needle_map/memdb.go @@ -10,7 +10,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/idx" . "github.com/seaweedfs/seaweedfs/weed/storage/types" ) @@ -26,7 +26,7 @@ func NewMemDb() *MemDb { var err error t := &MemDb{} if t.db, err = leveldb.Open(storage.NewMemStorage(), opts); err != nil { - glog.V(0).Infof("MemDb fails to open: %v", err) + log.V(3).Infof("MemDb fails to open: %v", err) return nil } diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index a5a543ba2..3448e7407 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -16,7 +16,7 @@ import ( "github.com/syndtr/goleveldb/leveldb" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" . "github.com/seaweedfs/seaweedfs/weed/storage/types" ) @@ -43,16 +43,16 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option m = &LevelDbNeedleMap{dbFileName: dbFileName} m.indexFile = indexFile if !isLevelDbFresh(dbFileName, indexFile) { - glog.V(1).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name()) + log.V(2).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name()) generateLevelDbFile(dbFileName, indexFile) - glog.V(1).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) + log.V(2).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) } if stat, err := indexFile.Stat(); err != nil { - glog.Fatalf("stat file %s: %v", indexFile.Name(), err) + log.Fatalf("stat file %s: %v", indexFile.Name(), err) } else { m.indexFileOffset = stat.Size() } - glog.V(1).Infof("Opening %s...", dbFileName) + log.V(2).Infof("Opening %s...", dbFileName) if m.ldbTimeout == 0 { if m.db, err = leveldb.OpenFile(dbFileName, opts); err != nil { @@ -63,12 +63,12 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option return } } - glog.V(0).Infof("Loading %s... , watermark: %d", dbFileName, getWatermark(m.db)) + log.V(3).Infof("Loading %s... , watermark: %d", dbFileName, getWatermark(m.db)) m.recordCount = uint64(m.indexFileOffset / NeedleMapEntrySize) watermark := (m.recordCount / watermarkBatchSize) * watermarkBatchSize err = setWatermark(m.db, watermark) if err != nil { - glog.Fatalf("set watermark for %s error: %s\n", dbFileName, err) + log.Fatalf("set watermark for %s error: %s\n", dbFileName, err) return } } @@ -97,7 +97,7 @@ func isLevelDbFresh(dbFileName string, indexFile *os.File) bool { dbStat, dbStatErr := dbLogFile.Stat() indexStat, indexStatErr := indexFile.Stat() if dbStatErr != nil || indexStatErr != nil { - glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) + log.V(3).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) return false } @@ -113,13 +113,13 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error { watermark := getWatermark(db) if stat, err := indexFile.Stat(); err != nil { - glog.Fatalf("stat file %s: %v", indexFile.Name(), err) + log.Fatalf("stat file %s: %v", indexFile.Name(), err) return err } else { if watermark*NeedleMapEntrySize > uint64(stat.Size()) { - glog.Warningf("wrong watermark %d for filesize %d", watermark, stat.Size()) + log.Warningf("wrong watermark %d for filesize %d", watermark, stat.Size()) } - glog.V(0).Infof("generateLevelDbFile %s, watermark %d, num of entries:%d", dbFileName, watermark, (uint64(stat.Size())-watermark*NeedleMapEntrySize)/NeedleMapEntrySize) + log.V(3).Infof("generateLevelDbFile %s, watermark %d, num of entries:%d", dbFileName, watermark, (uint64(stat.Size())-watermark*NeedleMapEntrySize)/NeedleMapEntrySize) } return idx.WalkIndexFile(indexFile, watermark, func(key NeedleId, offset Offset, size Size) error { if !offset.IsZero() && size.IsValid() { @@ -175,7 +175,7 @@ func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size Size) error { watermark = 0 } else { watermark = (m.recordCount / watermarkBatchSize) * watermarkBatchSize - glog.V(1).Infof("put cnt:%d for %s,watermark: %d", m.recordCount, m.dbFileName, watermark) + log.V(2).Infof("put cnt:%d for %s,watermark: %d", m.recordCount, m.dbFileName, watermark) } return levelDbWrite(m.db, key, offset, size, watermark == 0, watermark) } @@ -183,14 +183,14 @@ func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size Size) error { func getWatermark(db *leveldb.DB) uint64 { data, err := db.Get(watermarkKey, nil) if err != nil || len(data) != 8 { - glog.V(1).Infof("read previous watermark from db: %v, %d", err, len(data)) + log.V(2).Infof("read previous watermark from db: %v, %d", err, len(data)) return 0 } return util.BytesToUint64(data) } func setWatermark(db *leveldb.DB, watermark uint64) error { - glog.V(3).Infof("set watermark %d", watermark) + log.V(0).Infof("set watermark %d", watermark) var wmBytes = make([]byte, 8) util.Uint64toBytes(wmBytes, watermark) if err := db.Put(watermarkKey, wmBytes, nil); err != nil { @@ -252,16 +252,16 @@ func (m *LevelDbNeedleMap) Close() { if m.indexFile != nil { indexFileName := m.indexFile.Name() if err := m.indexFile.Sync(); err != nil { - glog.Warningf("sync file %s failed: %v", indexFileName, err) + log.Warningf("sync file %s failed: %v", indexFileName, err) } if err := m.indexFile.Close(); err != nil { - glog.Warningf("close index file %s failed: %v", indexFileName, err) + log.Warningf("close index file %s failed: %v", indexFileName, err) } } if m.db != nil { if err := m.db.Close(); err != nil { - glog.Warningf("close levelDB failed: %v", err) + log.Warningf("close levelDB failed: %v", err) } } if m.ldbTimeout > 0 { @@ -309,7 +309,7 @@ func (m *LevelDbNeedleMap) UpdateNeedleMap(v *Volume, indexFile *os.File, opts * stat, e := indexFile.Stat() if e != nil { - glog.Fatalf("stat file %s: %v", indexFile.Name(), e) + log.Fatalf("stat file %s: %v", indexFile.Name(), e) return e } m.indexFileOffset = stat.Size() @@ -319,7 +319,7 @@ func (m *LevelDbNeedleMap) UpdateNeedleMap(v *Volume, indexFile *os.File, opts * watermark := (m.recordCount / watermarkBatchSize) * watermarkBatchSize err = setWatermark(db, uint64(watermark)) if err != nil { - glog.Fatalf("setting watermark failed %s: %v", indexFile.Name(), err) + log.Fatalf("setting watermark failed %s: %v", indexFile.Name(), err) return err } v.nm = m @@ -335,7 +335,7 @@ func (m *LevelDbNeedleMap) UpdateNeedleMap(v *Volume, indexFile *os.File, opts * } func (m *LevelDbNeedleMap) DoOffsetLoading(v *Volume, indexFile *os.File, startFrom uint64) (err error) { - glog.V(0).Infof("loading idx to leveldb from offset %d for file: %s", startFrom, indexFile.Name()) + log.V(3).Infof("loading idx to leveldb from offset %d for file: %s", startFrom, indexFile.Name()) dbFileName := v.FileName(".cpldb") db, dbErr := leveldb.OpenFile(dbFileName, nil) defer func() { @@ -404,14 +404,14 @@ func reloadLdb(m *LevelDbNeedleMap) (err error) { if m.db != nil { return nil } - glog.V(1).Infof("reloading leveldb %s", m.dbFileName) + log.V(2).Infof("reloading leveldb %s", m.dbFileName) m.accessFlag = 1 if m.db, err = leveldb.OpenFile(m.dbFileName, m.ldbOpts); err != nil { if errors.IsCorrupted(err) { m.db, err = leveldb.RecoverFile(m.dbFileName, m.ldbOpts) } if err != nil { - glog.Fatalf("RecoverFile %s failed:%v", m.dbFileName, err) + log.Fatalf("RecoverFile %s failed:%v", m.dbFileName, err) return err } } @@ -422,7 +422,7 @@ func unloadLdb(m *LevelDbNeedleMap) (err error) { m.ldbAccessLock.Lock() defer m.ldbAccessLock.Unlock() if m.db != nil { - glog.V(1).Infof("reached max idle count, unload leveldb, %s", m.dbFileName) + log.V(2).Infof("reached max idle count, unload leveldb, %s", m.dbFileName) m.db.Close() m.db = nil } @@ -430,26 +430,26 @@ func unloadLdb(m *LevelDbNeedleMap) (err error) { } func lazyLoadingRoutine(m *LevelDbNeedleMap) (err error) { - glog.V(1).Infof("lazyLoadingRoutine %s", m.dbFileName) + log.V(2).Infof("lazyLoadingRoutine %s", m.dbFileName) var accessRecord int64 accessRecord = 1 for { select { case exit := <-m.exitChan: if exit { - glog.V(1).Infof("exit from lazyLoadingRoutine") + log.V(2).Infof("exit from lazyLoadingRoutine") return nil } case <-time.After(time.Hour * 1): - glog.V(1).Infof("timeout %s", m.dbFileName) + log.V(2).Infof("timeout %s", m.dbFileName) if m.accessFlag == 0 { accessRecord++ - glog.V(1).Infof("accessRecord++") + log.V(2).Infof("accessRecord++") if accessRecord >= m.ldbTimeout { unloadLdb(m) } } else { - glog.V(1).Infof("reset accessRecord %s", m.dbFileName) + log.V(2).Infof("reset accessRecord %s", m.dbFileName) // reset accessRecord accessRecord = 0 } diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go index c75514a31..c65104e37 100644 --- a/weed/storage/needle_map_memory.go +++ b/weed/storage/needle_map_memory.go @@ -3,7 +3,7 @@ package storage import ( "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" . "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -22,7 +22,7 @@ func NewCompactNeedleMap(file *os.File) *NeedleMap { nm.indexFile = file stat, err := file.Stat() if err != nil { - glog.Fatalf("stat file %s: %v", file.Name(), err) + log.Fatalf("stat file %s: %v", file.Name(), err) } nm.indexFileOffset = stat.Size() return nm @@ -51,7 +51,7 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { } return nil }) - glog.V(1).Infof("max file key: %v count: %d deleted: %d for file: %s", nm.MaxFileKey(), nm.FileCount(), nm.DeletedCount(), file.Name()) + log.V(2).Infof("max file key: %v count: %d deleted: %d for file: %s", nm.MaxFileKey(), nm.FileCount(), nm.DeletedCount(), file.Name()) return nm, e } @@ -75,7 +75,7 @@ func (nm *NeedleMap) Close() { } indexFileName := nm.indexFile.Name() if err := nm.indexFile.Sync(); err != nil { - glog.Warningf("sync file %s failed, %v", indexFileName, err) + log.Warningf("sync file %s failed, %v", indexFileName, err) } _ = nm.indexFile.Close() } @@ -98,7 +98,7 @@ func (nm *NeedleMap) UpdateNeedleMap(v *Volume, indexFile *os.File, opts *opt.Op nm.indexFile = indexFile stat, err := indexFile.Stat() if err != nil { - glog.Fatalf("stat file %s: %v", indexFile.Name(), err) + log.Fatalf("stat file %s: %v", indexFile.Name(), err) return err } nm.indexFileOffset = stat.Size() @@ -108,7 +108,7 @@ func (nm *NeedleMap) UpdateNeedleMap(v *Volume, indexFile *os.File, opts *opt.Op } func (nm *NeedleMap) DoOffsetLoading(v *Volume, indexFile *os.File, startFrom uint64) error { - glog.V(0).Infof("loading idx from offset %d for file: %s", startFrom, indexFile.Name()) + log.V(3).Infof("loading idx from offset %d for file: %s", startFrom, indexFile.Name()) e := idx.WalkIndexFile(indexFile, startFrom, func(key NeedleId, offset Offset, size Size) error { nm.MaybeSetMaxFileKey(key) nm.FileCounter++ diff --git a/weed/storage/needle_map_metric.go b/weed/storage/needle_map_metric.go index d6d0a8730..58055f788 100644 --- a/weed/storage/needle_map_metric.go +++ b/weed/storage/needle_map_metric.go @@ -154,7 +154,7 @@ func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key for remainingCount >= 0 { n, e := r.ReadAt(bytes[:NeedleMapEntrySize*nextBatchSize], NeedleMapEntrySize*remainingCount) - // glog.V(0).Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e) + // log.V(3).Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e) if e == io.EOF && n == int(NeedleMapEntrySize*nextBatchSize) { e = nil } diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go index 96919d103..0873aac65 100644 --- a/weed/storage/needle_map_metric_test.go +++ b/weed/storage/needle_map_metric_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" . "github.com/seaweedfs/seaweedfs/weed/storage/types" ) @@ -23,9 +23,9 @@ func TestFastLoadingNeedleMapMetrics(t *testing.T) { mm, _ := newNeedleMapMetricFromIndexFile(idxFile) - glog.V(0).Infof("FileCount expected %d actual %d", nm.FileCount(), mm.FileCount()) - glog.V(0).Infof("DeletedSize expected %d actual %d", nm.DeletedSize(), mm.DeletedSize()) - glog.V(0).Infof("ContentSize expected %d actual %d", nm.ContentSize(), mm.ContentSize()) - glog.V(0).Infof("DeletedCount expected %d actual %d", nm.DeletedCount(), mm.DeletedCount()) - glog.V(0).Infof("MaxFileKey expected %d actual %d", nm.MaxFileKey(), mm.MaxFileKey()) + log.V(3).Infof("FileCount expected %d actual %d", nm.FileCount(), mm.FileCount()) + log.V(3).Infof("DeletedSize expected %d actual %d", nm.DeletedSize(), mm.DeletedSize()) + log.V(3).Infof("ContentSize expected %d actual %d", nm.ContentSize(), mm.ContentSize()) + log.V(3).Infof("DeletedCount expected %d actual %d", nm.DeletedCount(), mm.DeletedCount()) + log.V(3).Infof("MaxFileKey expected %d actual %d", nm.MaxFileKey(), mm.MaxFileKey()) } diff --git a/weed/storage/needle_map_sorted_file.go b/weed/storage/needle_map_sorted_file.go index 5bd67ea86..cf4885cdc 100644 --- a/weed/storage/needle_map_sorted_file.go +++ b/weed/storage/needle_map_sorted_file.go @@ -3,7 +3,7 @@ package storage import ( "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" . "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -21,18 +21,18 @@ func NewSortedFileNeedleMap(indexBaseFileName string, indexFile *os.File) (m *So m.indexFile = indexFile fileName := indexBaseFileName + ".sdx" if !isSortedFileFresh(fileName, indexFile) { - glog.V(0).Infof("Start to Generate %s from %s", fileName, indexFile.Name()) + log.V(3).Infof("Start to Generate %s from %s", fileName, indexFile.Name()) erasure_coding.WriteSortedFileFromIdx(indexBaseFileName, ".sdx") - glog.V(0).Infof("Finished Generating %s from %s", fileName, indexFile.Name()) + log.V(3).Infof("Finished Generating %s from %s", fileName, indexFile.Name()) } - glog.V(1).Infof("Opening %s...", fileName) + log.V(2).Infof("Opening %s...", fileName) if m.dbFile, err = os.OpenFile(indexBaseFileName+".sdx", os.O_RDWR, 0); err != nil { return } dbStat, _ := m.dbFile.Stat() m.dbFileSize = dbStat.Size() - glog.V(1).Infof("Loading %s...", indexFile.Name()) + log.V(2).Infof("Loading %s...", indexFile.Name()) mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile) if indexLoadError != nil { _ = m.dbFile.Close() @@ -52,7 +52,7 @@ func isSortedFileFresh(dbFileName string, indexFile *os.File) bool { dbStat, dbStatErr := dbFile.Stat() indexStat, indexStatErr := indexFile.Stat() if dbStatErr != nil || indexStatErr != nil { - glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) + log.V(3).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) return false } diff --git a/weed/storage/store.go b/weed/storage/store.go index 3b2869a2e..b849cbf36 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -14,7 +14,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" @@ -166,11 +166,11 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind if location := s.FindFreeLocation(func(location *DiskLocation) bool { return location.DiskType == diskType }); location != nil { - glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v", + log.V(3).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v", location.Directory, vid, collection, replicaPlacement, ttl) if volume, err := NewVolume(location.Directory, location.IdxDirectory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb, ldbTimeout); err == nil { location.SetVolume(vid, volume) - glog.V(0).Infof("add volume %d", vid) + log.V(3).Infof("add volume %d", vid) s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{ Id: uint32(vid), Collection: collection, @@ -276,12 +276,12 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { deleteVids = append(deleteVids, v.Id) shouldDeleteVolume = true } else { - glog.V(0).Infof("volume %d is expired", v.Id) + log.V(3).Infof("volume %d is expired", v.Id) } if v.lastIoError != nil { deleteVids = append(deleteVids, v.Id) shouldDeleteVolume = true - glog.Warningf("volume %d has IO error: %v", v.Id, v.lastIoError) + log.Warningf("volume %d has IO error: %v", v.Id, v.lastIoError) } } @@ -329,10 +329,10 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { found, err := location.deleteVolumeById(vid, false) if err == nil { if found { - glog.V(0).Infof("volume %d is deleted", vid) + log.V(3).Infof("volume %d is deleted", vid) } } else { - glog.Warningf("delete volume %d: %v", vid, err) + log.Warningf("delete volume %d: %v", vid, err) } } location.volumesLock.Unlock() @@ -401,7 +401,7 @@ func (s *Store) deleteExpiredEcVolumes() (ecShards, deleted []*master_pb.VolumeE err := location.deleteEcVolumeById(ev.VolumeId) if err != nil { ecShards = append(ecShards, messages...) - glog.Errorf("delete EcVolume err %d: %v", ev.VolumeId, err) + log.Errorf("delete EcVolume err %d: %v", ev.VolumeId, err) continue } // No need for additional lock here since we only need the messages @@ -440,7 +440,7 @@ func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, checkCook _, _, isUnchanged, err = v.writeNeedle2(n, checkCookie, fsync || s.isStopping) return } - glog.V(0).Infoln("volume", i, "not found!") + log.V(3).Infoln("volume", i, "not found!") err = fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port) return } @@ -513,7 +513,7 @@ func (s *Store) MarkVolumeWritable(i needle.VolumeId) error { func (s *Store) MountVolume(i needle.VolumeId) error { for _, location := range s.Locations { if found := location.LoadVolume(i, s.NeedleMapKind); found == true { - glog.V(0).Infof("mount volume %d", i) + log.V(3).Infof("mount volume %d", i) v := s.findVolume(i) s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{ Id: uint32(v.Id), @@ -547,7 +547,7 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error { for _, location := range s.Locations { err := location.UnloadVolume(i) if err == nil { - glog.V(0).Infof("UnmountVolume %d", i) + log.V(3).Infof("UnmountVolume %d", i) s.DeletedVolumesChan <- message return nil } else if err == ErrVolumeNotFound { @@ -574,7 +574,7 @@ func (s *Store) DeleteVolume(i needle.VolumeId, onlyEmpty bool) error { for _, location := range s.Locations { err := location.DeleteVolume(i, onlyEmpty) if err == nil { - glog.V(0).Infof("DeleteVolume %d", i) + log.V(3).Infof("DeleteVolume %d", i) s.DeletedVolumesChan <- message return nil } else if err == ErrVolumeNotFound { @@ -582,7 +582,7 @@ func (s *Store) DeleteVolume(i needle.VolumeId, onlyEmpty bool) error { } else if err == ErrVolumeNotEmpty { return fmt.Errorf("DeleteVolume %d: %v", i, err) } else { - glog.Errorf("DeleteVolume %d: %v", i, err) + log.Errorf("DeleteVolume %d: %v", i, err) } } @@ -654,7 +654,7 @@ func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) { } newMaxVolumeCount = newMaxVolumeCount + maxVolumeCount atomic.StoreInt32(&diskLocation.MaxVolumeCount, maxVolumeCount) - glog.V(4).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB", + log.V(-1).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB", diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024) hasChanges = hasChanges || currentMaxVolumeCount != atomic.LoadInt32(&diskLocation.MaxVolumeCount) } else { diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go index 38cf41550..f2ad46463 100644 --- a/weed/storage/store_ec.go +++ b/weed/storage/store_ec.go @@ -11,7 +11,7 @@ import ( "github.com/klauspost/reedsolomon" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" @@ -51,7 +51,7 @@ func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat { func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) error { for _, location := range s.Locations { if ecVolume, err := location.LoadEcShard(collection, vid, shardId); err == nil { - glog.V(0).Infof("MountEcShards %d.%d", vid, shardId) + log.V(3).Infof("MountEcShards %d.%d", vid, shardId) var shardBits erasure_coding.ShardBits @@ -90,7 +90,7 @@ func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.Shar for _, location := range s.Locations { if deleted := location.UnloadEcShard(vid, shardId); deleted { - glog.V(0).Infof("UnmountEcShards %d.%d", vid, shardId) + log.V(3).Infof("UnmountEcShards %d.%d", vid, shardId) s.DeletedEcShardsChan <- message return nil } @@ -150,10 +150,10 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle, onReadS onReadSizeFn(size) } - glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToActualOffset(), size, intervals) + log.V(0).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToActualOffset(), size, intervals) if len(intervals) > 1 { - glog.V(3).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals) + log.V(0).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals) } bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals) if err != nil { @@ -204,7 +204,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur var readSize int if readSize, err = shard.ReadAt(data, actualOffset); err != nil { if readSize != int(interval.Size) { - glog.V(0).Infof("read local ec shard %d.%d offset %d: %v", ecVolume.VolumeId, shardId, actualOffset, err) + log.V(3).Infof("read local ec shard %d.%d offset %d: %v", ecVolume.VolumeId, shardId, actualOffset, err) return } } @@ -219,7 +219,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur if err == nil { return } - glog.V(0).Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err) + log.V(3).Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err) } // try reading by recovering from other shards @@ -227,7 +227,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur if err == nil { return } - glog.V(0).Infof("recover ec shard %d.%d : %v", ecVolume.VolumeId, shardId, err) + log.V(3).Infof("recover ec shard %d.%d : %v", ecVolume.VolumeId, shardId, err) } return } @@ -252,7 +252,7 @@ func (s *Store) cachedLookupEcShardLocations(ecVolume *erasure_coding.EcVolume) return nil } - glog.V(3).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId) + log.V(0).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId) err = operation.WithMasterServerClient(false, s.MasterAddress, s.grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupEcVolumeRequest{ @@ -289,12 +289,12 @@ func (s *Store) readRemoteEcShardInterval(sourceDataNodes []pb.ServerAddress, ne } for _, sourceDataNode := range sourceDataNodes { - glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode) + log.V(0).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode) n, is_deleted, err = s.doReadRemoteEcShardInterval(sourceDataNode, needleId, vid, shardId, buf, offset) if err == nil { return } - glog.V(1).Infof("read remote ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err) + log.V(2).Infof("read remote ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err) } return @@ -341,7 +341,7 @@ func (s *Store) doReadRemoteEcShardInterval(sourceDataNode pb.ServerAddress, nee } func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { - glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) + log.V(0).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount) if err != nil { @@ -359,7 +359,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum continue } if len(locations) == 0 { - glog.V(3).Infof("readRemoteEcShardInterval missing %d.%d from %+v", ecVolume.VolumeId, shardId, locations) + log.V(0).Infof("readRemoteEcShardInterval missing %d.%d from %+v", ecVolume.VolumeId, shardId, locations) continue } @@ -370,7 +370,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum data := make([]byte, len(buf)) nRead, isDeleted, readErr := s.readRemoteEcShardInterval(locations, needleId, ecVolume.VolumeId, shardId, data, offset) if readErr != nil { - glog.V(3).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr) + log.V(0).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr) forgetShardId(ecVolume, shardId) } if isDeleted { @@ -386,10 +386,10 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum wg.Wait() if err = enc.ReconstructData(bufs); err != nil { - glog.V(3).Infof("recovered ec shard %d.%d failed: %v", ecVolume.VolumeId, shardIdToRecover, err) + log.V(0).Infof("recovered ec shard %d.%d failed: %v", ecVolume.VolumeId, shardIdToRecover, err) return 0, false, err } - glog.V(4).Infof("recovered ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) + log.V(-1).Infof("recovered ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) copy(buf, bufs[shardIdToRecover]) diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go index a3e028bbb..708c2a636 100644 --- a/weed/storage/store_ec_delete.go +++ b/weed/storage/store_ec_delete.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" @@ -76,12 +76,12 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(shardId erasure_coding.Sh } for _, sourceDataNode := range sourceDataNodes { - glog.V(4).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode) + log.V(-1).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode) err := s.doDeleteNeedleFromRemoteEcShard(sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId) if err != nil { return err } - glog.V(1).Infof("delete from remote ec shard %d.%d from %s: %v", ecVolume.VolumeId, shardId, sourceDataNode, err) + log.V(2).Infof("delete from remote ec shard %d.%d from %s: %v", ecVolume.VolumeId, shardId, sourceDataNode, err) } return nil diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go index 531d859b8..6cf8eb002 100644 --- a/weed/storage/store_vacuum.go +++ b/weed/storage/store_vacuum.go @@ -5,13 +5,13 @@ import ( "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/needle" ) func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) { if v := s.findVolume(volumeId); v != nil { - glog.V(3).Infof("volume %d garbage level: %f", volumeId, v.garbageLevel()) + log.V(0).Infof("volume %d garbage level: %f", volumeId, v.garbageLevel()) return v.garbageLevel(), nil } return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId) diff --git a/weed/storage/super_block/super_block.go b/weed/storage/super_block/super_block.go index d2ef09e6a..f92f92fc2 100644 --- a/weed/storage/super_block/super_block.go +++ b/weed/storage/super_block/super_block.go @@ -3,7 +3,7 @@ package super_block import ( "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/util" @@ -48,12 +48,12 @@ func (s *SuperBlock) Bytes() []byte { if s.Extra != nil { extraData, err := proto.Marshal(s.Extra) if err != nil { - glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err) + log.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err) } extraSize := len(extraData) if extraSize > 256*256-2 { // reserve a couple of bits for future extension - glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2) + log.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2) } s.ExtraSize = uint16(extraSize) util.Uint16toBytes(header[6:8], s.ExtraSize) diff --git a/weed/storage/volume.go b/weed/storage/volume.go index e55564652..46cafe098 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -15,7 +15,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/super_block" "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) type Volume struct { @@ -121,7 +121,7 @@ func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time) if e == nil { return uint64(datFileSize), v.nm.IndexFileSize(), modTime } - glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e) + log.V(3).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e) return // -1 causes integer overflow and the volume to become unwritable. } @@ -141,7 +141,7 @@ func (v *Volume) doIsEmpty() (bool, error) { } else { datFileSize, _, e := v.DataBackend.GetStat() if e != nil { - glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e) + log.V(3).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e) return false, fmt.Errorf("v.DataBackend.GetStat(): %v", e) } if datFileSize > super_block.SuperBlockSize { @@ -211,12 +211,12 @@ func (v *Volume) SyncToDisk() { defer v.dataFileAccessLock.Unlock() if v.nm != nil { if err := v.nm.Sync(); err != nil { - glog.Warningf("Volume Close fail to sync volume idx %d", v.Id) + log.Warningf("Volume Close fail to sync volume idx %d", v.Id) } } if v.DataBackend != nil { if err := v.DataBackend.Sync(); err != nil { - glog.Warningf("Volume Close fail to sync volume %d", v.Id) + log.Warningf("Volume Close fail to sync volume %d", v.Id) } } } @@ -232,19 +232,19 @@ func (v *Volume) Close() { func (v *Volume) doClose() { for v.isCommitCompacting { time.Sleep(521 * time.Millisecond) - glog.Warningf("Volume Close wait for compaction %d", v.Id) + log.Warningf("Volume Close wait for compaction %d", v.Id) } if v.nm != nil { if err := v.nm.Sync(); err != nil { - glog.Warningf("Volume Close fail to sync volume idx %d", v.Id) + log.Warningf("Volume Close fail to sync volume idx %d", v.Id) } v.nm.Close() v.nm = nil } if v.DataBackend != nil { if err := v.DataBackend.Close(); err != nil { - glog.Warningf("Volume Close fail to sync volume %d", v.Id) + log.Warningf("Volume Close fail to sync volume %d", v.Id) } v.DataBackend = nil stats.VolumeServerVolumeGauge.WithLabelValues(v.Collection, "volume").Dec() @@ -270,9 +270,9 @@ func (v *Volume) expired(contentSize uint64, volumeSizeLimit uint64) bool { if v.Ttl == nil || v.Ttl.Minutes() == 0 { return false } - glog.V(2).Infof("volume %d now:%v lastModified:%v", v.Id, time.Now().Unix(), v.lastModifiedTsSeconds) + log.V(1).Infof("volume %d now:%v lastModified:%v", v.Id, time.Now().Unix(), v.lastModifiedTsSeconds) livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60 - glog.V(2).Infof("volume %d ttl:%v lived:%v", v.Id, v.Ttl, livedMinutes) + log.V(1).Infof("volume %d ttl:%v lived:%v", v.Id, v.Ttl, livedMinutes) if int64(v.Ttl.Minutes()) < livedMinutes { return true } @@ -298,7 +298,7 @@ func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool { func (v *Volume) collectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64, ok bool) { v.dataFileAccessLock.RLock() defer v.dataFileAccessLock.RUnlock() - glog.V(4).Infof("collectStatus volume %d", v.Id) + log.V(-1).Infof("collectStatus volume %d", v.Id) if v.nm == nil || v.DataBackend == nil { return diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go index 6d2335f70..d69234db0 100644 --- a/weed/storage/volume_checking.go +++ b/weed/storage/volume_checking.go @@ -6,7 +6,7 @@ import ( "io" "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -122,10 +122,10 @@ func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, return n.AppendAtNs, nil } if fileSize > fileTailOffset { - glog.Warningf("data file %s actual %d bytes expected %d bytes!", datFile.Name(), fileSize, fileTailOffset) + log.Warningf("data file %s actual %d bytes expected %d bytes!", datFile.Name(), fileSize, fileTailOffset) return n.AppendAtNs, fmt.Errorf("data file %s actual %d bytes expected %d bytes", datFile.Name(), fileSize, fileTailOffset) } - glog.Warningf("data file %s has %d bytes, less than expected %d bytes!", datFile.Name(), fileSize, fileTailOffset) + log.Warningf("data file %s has %d bytes, less than expected %d bytes!", datFile.Name(), fileSize, fileTailOffset) } if err = n.ReadData(datFile, offset, size, v); err != nil { return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err) diff --git a/weed/storage/volume_info/volume_info.go b/weed/storage/volume_info/volume_info.go index 24e2b17bc..008e47f49 100644 --- a/weed/storage/volume_info/volume_info.go +++ b/weed/storage/volume_info/volume_info.go @@ -5,7 +5,7 @@ import ( jsonpb "google.golang.org/protobuf/encoding/protojson" "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" _ "github.com/seaweedfs/seaweedfs/weed/storage/backend/rclone_backend" _ "github.com/seaweedfs/seaweedfs/weed/storage/backend/s3_backend" @@ -17,14 +17,14 @@ func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeIn volumeInfo = &volume_server_pb.VolumeInfo{} - glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName) + log.V(2).Infof("maybeLoadVolumeInfo checks %s", fileName) if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead { if !exists { return } hasVolumeInfoFile = true if !canRead { - glog.Warningf("can not read %s", fileName) + log.Warningf("can not read %s", fileName) err = fmt.Errorf("can not read %s", fileName) return } @@ -33,19 +33,19 @@ func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeIn hasVolumeInfoFile = true - glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) + log.V(2).Infof("maybeLoadVolumeInfo reads %s", fileName) fileData, readErr := os.ReadFile(fileName) if readErr != nil { - glog.Warningf("fail to read %s : %v", fileName, readErr) + log.Warningf("fail to read %s : %v", fileName, readErr) err = fmt.Errorf("fail to read %s : %v", fileName, readErr) return } - glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) + log.V(2).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) if err = jsonpb.Unmarshal(fileData, volumeInfo); err != nil { if oldVersionErr := tryOldVersionVolumeInfo(fileData, volumeInfo); oldVersionErr != nil { - glog.Warningf("unmarshal error: %v oldFormat: %v", err, oldVersionErr) + log.Warningf("unmarshal error: %v oldFormat: %v", err, oldVersionErr) err = fmt.Errorf("unmarshal error: %v oldFormat: %v", err, oldVersionErr) return } else { diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index 3334159ed..ac4122957 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -8,7 +8,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -51,7 +51,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind if v.HasRemoteFile() { v.noWriteCanDelete = true v.noWriteOrDelete = false - glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo) + log.V(3).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo) if err := v.LoadRemoteFile(); err != nil { return fmt.Errorf("load remote file %v: %v", v.volumeInfo, err) } @@ -65,7 +65,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind if canWrite { dataFile, err = os.OpenFile(v.FileName(".dat"), os.O_RDWR|os.O_CREATE, 0644) } else { - glog.V(0).Infof("opening %s in READONLY mode", v.FileName(".dat")) + log.V(3).Infof("opening %s in READONLY mode", v.FileName(".dat")) dataFile, err = os.Open(v.FileName(".dat")) v.noWriteOrDelete = true } @@ -95,10 +95,10 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind if err == nil { v.volumeInfo.Version = uint32(v.SuperBlock.Version) } - glog.V(0).Infof("readSuperBlock volume %d version %v", v.Id, v.SuperBlock.Version) + log.V(3).Infof("readSuperBlock volume %d version %v", v.Id, v.SuperBlock.Version) if v.HasRemoteFile() { // maybe temporary network problem - glog.Errorf("readSuperBlock remote volume %d: %v", v.Id, err) + log.Errorf("readSuperBlock remote volume %d: %v", v.Id, err) err = nil } } else { @@ -116,16 +116,16 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind } // check volume idx files if err := v.checkIdxFile(); err != nil { - glog.Fatalf("check volume idx file %s: %v", v.FileName(".idx"), err) + log.Fatalf("check volume idx file %s: %v", v.FileName(".idx"), err) } var indexFile *os.File if v.noWriteOrDelete { - glog.V(0).Infoln("open to read file", v.FileName(".idx")) + log.V(3).Infoln("open to read file", v.FileName(".idx")) if indexFile, err = os.OpenFile(v.FileName(".idx"), os.O_RDONLY, 0644); err != nil { return fmt.Errorf("cannot read Volume Index %s: %v", v.FileName(".idx"), err) } } else { - glog.V(1).Infoln("open to write file", v.FileName(".idx")) + log.V(2).Infoln("open to write file", v.FileName(".idx")) if indexFile, err = os.OpenFile(v.FileName(".idx"), os.O_RDWR|os.O_CREATE, 0644); err != nil { return fmt.Errorf("cannot write Volume Index %s: %v", v.FileName(".idx"), err) } @@ -136,27 +136,27 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind // storage tier, and download to local storage, which may cause the // capactiy overloading. if !v.HasRemoteFile() { - glog.V(0).Infof("checking volume data integrity for volume %d", v.Id) + log.V(3).Infof("checking volume data integrity for volume %d", v.Id) if v.lastAppendAtNs, err = CheckVolumeDataIntegrity(v, indexFile); err != nil { v.noWriteOrDelete = true - glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err) + log.V(3).Infof("volumeDataIntegrityChecking failed %v", err) } } if v.noWriteOrDelete || v.noWriteCanDelete { if v.nm, err = NewSortedFileNeedleMap(v.IndexFileName(), indexFile); err != nil { - glog.V(0).Infof("loading sorted db %s error: %v", v.FileName(".sdx"), err) + log.V(3).Infof("loading sorted db %s error: %v", v.FileName(".sdx"), err) } } else { switch needleMapKind { case NeedleMapInMemory: if v.tmpNm != nil { - glog.V(0).Infof("updating memory compact index %s ", v.FileName(".idx")) + log.V(3).Infof("updating memory compact index %s ", v.FileName(".idx")) err = v.tmpNm.UpdateNeedleMap(v, indexFile, nil, 0) } else { - glog.V(0).Infoln("loading memory index", v.FileName(".idx"), "to memory") + log.V(3).Infoln("loading memory index", v.FileName(".idx"), "to memory") if v.nm, err = LoadCompactNeedleMap(indexFile); err != nil { - glog.V(0).Infof("loading index %s to memory error: %v", v.FileName(".idx"), err) + log.V(3).Infof("loading index %s to memory error: %v", v.FileName(".idx"), err) } } case NeedleMapLevelDb: @@ -166,12 +166,12 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind CompactionTableSizeMultiplier: 10, // default value is 1 } if v.tmpNm != nil { - glog.V(0).Infoln("updating leveldb index", v.FileName(".ldb")) + log.V(3).Infoln("updating leveldb index", v.FileName(".ldb")) err = v.tmpNm.UpdateNeedleMap(v, indexFile, opts, v.ldbTimeout) } else { - glog.V(0).Infoln("loading leveldb index", v.FileName(".ldb")) + log.V(3).Infoln("loading leveldb index", v.FileName(".ldb")) if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts, v.ldbTimeout); err != nil { - glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) + log.V(3).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) } } case NeedleMapLevelDbMedium: @@ -181,12 +181,12 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind CompactionTableSizeMultiplier: 10, // default value is 1 } if v.tmpNm != nil { - glog.V(0).Infoln("updating leveldb medium index", v.FileName(".ldb")) + log.V(3).Infoln("updating leveldb medium index", v.FileName(".ldb")) err = v.tmpNm.UpdateNeedleMap(v, indexFile, opts, v.ldbTimeout) } else { - glog.V(0).Infoln("loading leveldb medium index", v.FileName(".ldb")) + log.V(3).Infoln("loading leveldb medium index", v.FileName(".ldb")) if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts, v.ldbTimeout); err != nil { - glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) + log.V(3).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) } } case NeedleMapLevelDbLarge: @@ -196,12 +196,12 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind CompactionTableSizeMultiplier: 10, // default value is 1 } if v.tmpNm != nil { - glog.V(0).Infoln("updating leveldb large index", v.FileName(".ldb")) + log.V(3).Infoln("updating leveldb large index", v.FileName(".ldb")) err = v.tmpNm.UpdateNeedleMap(v, indexFile, opts, v.ldbTimeout) } else { - glog.V(0).Infoln("loading leveldb large index", v.FileName(".ldb")) + log.V(3).Infoln("loading leveldb large index", v.FileName(".ldb")) if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts, v.ldbTimeout); err != nil { - glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) + log.V(3).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) } } } @@ -212,7 +212,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind v.volumeInfo.Version = uint32(v.SuperBlock.Version) v.volumeInfo.BytesOffset = uint32(types.OffsetSize) if err := v.SaveVolumeInfo(); err != nil { - glog.Warningf("volume %d failed to save file info: %v", v.Id, err) + log.Warningf("volume %d failed to save file info: %v", v.Id, err) } } diff --git a/weed/storage/volume_read.go b/weed/storage/volume_read.go index f82e3e72d..964a73ca3 100644 --- a/weed/storage/volume_read.go +++ b/weed/storage/volume_read.go @@ -6,7 +6,7 @@ import ( "io" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -28,7 +28,7 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption, onReadSize readSize := nv.Size if readSize.IsDeleted() { if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize { - glog.V(3).Infof("reading deleted %s", n.String()) + log.V(0).Infof("reading deleted %s", n.String()) readSize = -readSize } else { return -1, ErrorDeleted @@ -118,7 +118,7 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr readSize := nv.Size if readSize.IsDeleted() { if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize { - glog.V(3).Infof("reading deleted %s", n.String()) + log.V(0).Infof("reading deleted %s", n.String()) readSize = -readSize } else { return ErrorDeleted @@ -253,7 +253,7 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag if volumeFileScanner.ReadNeedleBody() { // println("needle", n.Id.String(), "offset", offset, "size", n.Size, "rest", rest) if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil { - glog.V(0).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err) + log.V(3).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err) // err = fmt.Errorf("cannot read needle body: %v", err) // return } @@ -263,18 +263,18 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag return nil } if err != nil { - glog.V(0).Infof("visit needle error: %v", err) + log.V(3).Infof("visit needle error: %v", err) return fmt.Errorf("visit needle error: %v", err) } offset += NeedleHeaderSize + rest - glog.V(4).Infof("==> new entry offset %d", offset) + log.V(-1).Infof("==> new entry offset %d", offset) if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil { if err == io.EOF { return nil } return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err) } - glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest) + log.V(-1).Infof("new entry needle size:%d rest:%d", n.Size, rest) } return nil } diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go index 096d46906..652f71058 100644 --- a/weed/storage/volume_super_block.go +++ b/weed/storage/volume_super_block.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/super_block" @@ -14,7 +14,7 @@ func (v *Volume) maybeWriteSuperBlock() error { datSize, _, e := v.DataBackend.GetStat() if e != nil { - glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e) + log.V(3).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e) return e } if datSize == 0 { diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go index 5d9b67192..8502b97ef 100644 --- a/weed/storage/volume_tier.go +++ b/weed/storage/volume_tier.go @@ -2,7 +2,7 @@ package storage import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage/backend" _ "github.com/seaweedfs/seaweedfs/weed/storage/backend/rclone_backend" @@ -27,7 +27,7 @@ func (v *Volume) maybeLoadVolumeInfo() (found bool) { } if v.hasRemoteFile { - glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id, + log.V(3).Infof("volume %d is tiered to %s as %s and read only", v.Id, v.volumeInfo.Files[0].BackendName(), v.volumeInfo.Files[0].Key) } else { if v.volumeInfo.BytesOffset == 0 { @@ -42,12 +42,12 @@ func (v *Volume) maybeLoadVolumeInfo() (found bool) { } else { m = "with" } - glog.Exitf("BytesOffset mismatch in volume info file %s, try use binary version %s large_disk", v.FileName(".vif"), m) + log.Exitf("BytesOffset mismatch in volume info file %s, try use binary version %s large_disk", v.FileName(".vif"), m) return } if err != nil { - glog.Warningf("load volume %d.vif file: %v", v.Id, err) + log.Warningf("load volume %d.vif file: %v", v.Id, err) return } diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 9f277d4f5..cb310781d 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -6,7 +6,7 @@ import ( "runtime" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/backend" idx2 "github.com/seaweedfs/seaweedfs/weed/storage/idx" @@ -41,13 +41,13 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil } - glog.V(3).Infof("Compacting volume %d ...", v.Id) + log.V(0).Infof("Compacting volume %d ...", v.Id) //no need to lock for copy on write //v.accessLock.Lock() //defer v.accessLock.Unlock() - //glog.V(3).Infof("Got Compaction lock...") + //log.V(0).Infof("Got Compaction lock...") if v.isCompacting || v.isCommitCompacting { - glog.V(0).Infof("Volume %d is already compacting...", v.Id) + log.V(3).Infof("Volume %d is already compacting...", v.Id) return nil } v.isCompacting = true @@ -57,12 +57,12 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error v.lastCompactIndexOffset = v.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactionRevision - glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) + log.V(0).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) if err := v.DataBackend.Sync(); err != nil { - glog.V(0).Infof("compact failed to sync volume %d", v.Id) + log.V(3).Infof("compact failed to sync volume %d", v.Id) } if err := v.nm.Sync(); err != nil { - glog.V(0).Infof("compact failed to sync volume idx %d", v.Id) + log.V(3).Infof("compact failed to sync volume idx %d", v.Id) } return v.copyDataAndGenerateIndexFile(v.FileName(".cpd"), v.FileName(".cpx"), preallocate, compactionBytePerSecond) } @@ -73,10 +73,10 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64, prog if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil } - glog.V(3).Infof("Compact2 volume %d ...", v.Id) + log.V(0).Infof("Compact2 volume %d ...", v.Id) if v.isCompacting || v.isCommitCompacting { - glog.V(0).Infof("Volume %d is already compacting2 ...", v.Id) + log.V(3).Infof("Volume %d is already compacting2 ...", v.Id) return nil } v.isCompacting = true @@ -86,15 +86,15 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64, prog v.lastCompactIndexOffset = v.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactionRevision - glog.V(3).Infof("creating copies for volume %d ...", v.Id) + log.V(0).Infof("creating copies for volume %d ...", v.Id) if v.DataBackend == nil { return fmt.Errorf("volume %d backend is empty remote:%v", v.Id, v.HasRemoteFile()) } if err := v.DataBackend.Sync(); err != nil { - glog.V(0).Infof("compact2 failed to sync volume dat %d: %v", v.Id, err) + log.V(3).Infof("compact2 failed to sync volume dat %d: %v", v.Id, err) } if err := v.nm.Sync(); err != nil { - glog.V(0).Infof("compact2 failed to sync volume idx %d: %v", v.Id, err) + log.V(3).Infof("compact2 failed to sync volume idx %d: %v", v.Id, err) } return v.copyDataBasedOnIndexFile( v.FileName(".dat"), v.FileName(".idx"), @@ -111,10 +111,10 @@ func (v *Volume) CommitCompact() error { if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil } - glog.V(0).Infof("Committing volume %d vacuuming...", v.Id) + log.V(3).Infof("Committing volume %d vacuuming...", v.Id) if v.isCommitCompacting { - glog.V(0).Infof("Volume %d is already commit compacting ...", v.Id) + log.V(3).Infof("Volume %d is already commit compacting ...", v.Id) return nil } v.isCommitCompacting = true @@ -125,14 +125,14 @@ func (v *Volume) CommitCompact() error { v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() - glog.V(3).Infof("Got volume %d committing lock...", v.Id) + log.V(0).Infof("Got volume %d committing lock...", v.Id) if v.nm != nil { v.nm.Close() v.nm = nil } if v.DataBackend != nil { if err := v.DataBackend.Close(); err != nil { - glog.V(0).Infof("failed to close volume %d", v.Id) + log.V(3).Infof("failed to close volume %d", v.Id) } } v.DataBackend = nil @@ -140,7 +140,7 @@ func (v *Volume) CommitCompact() error { var e error if e = v.makeupDiff(v.FileName(".cpd"), v.FileName(".cpx"), v.FileName(".dat"), v.FileName(".idx")); e != nil { - glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e) + log.V(3).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e) e = os.Remove(v.FileName(".cpd")) if e != nil { return e @@ -169,21 +169,21 @@ func (v *Volume) CommitCompact() error { } } - //glog.V(3).Infof("Pretending to be vacuuming...") + //log.V(0).Infof("Pretending to be vacuuming...") //time.Sleep(20 * time.Second) os.RemoveAll(v.FileName(".ldb")) - glog.V(3).Infof("Loading volume %d commit file...", v.Id) + log.V(0).Infof("Loading volume %d commit file...", v.Id) if e = v.load(true, false, v.needleMapKind, 0); e != nil { return e } - glog.V(3).Infof("Finish committing volume %d", v.Id) + log.V(0).Infof("Finish committing volume %d", v.Id) return nil } func (v *Volume) cleanupCompact() error { - glog.V(0).Infof("Cleaning up volume %d vacuuming...", v.Id) + log.V(3).Infof("Cleaning up volume %d vacuuming...", v.Id) e1 := os.Remove(v.FileName(".cpd")) e2 := os.Remove(v.FileName(".cpx")) @@ -254,7 +254,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI return fmt.Errorf("readIndexEntry %s at offset %d failed: %v", oldIdxFileName, idxOffset, err) } key, offset, size := idx2.IdxFileEntry(IdxEntry) - glog.V(4).Infof("key %d offset %d size %d", key, offset, size) + log.V(-1).Infof("key %d offset %d size %d", key, offset, size) if _, found := incrementedHasUpdatedIndexEntry[key]; !found { incrementedHasUpdatedIndexEntry[key] = keyField{ offset: offset, @@ -308,21 +308,21 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI var offset int64 if offset, err = dst.Seek(0, 2); err != nil { - glog.V(0).Infof("failed to seek the end of file: %v", err) + log.V(3).Infof("failed to seek the end of file: %v", err) return } //ensure file writing starting from aligned positions if offset%NeedlePaddingSize != 0 { offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize) if offset, err = dst.Seek(offset, 0); err != nil { - glog.V(0).Infof("failed to align in datafile %s: %v", dst.Name(), err) + log.V(3).Infof("failed to align in datafile %s: %v", dst.Name(), err) return } } //updated needle if !increIdxEntry.offset.IsZero() && increIdxEntry.size != 0 && increIdxEntry.size.IsValid() { //even the needle cache in memory is hit, the need_bytes is correct - glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size) + log.V(-1).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size) var needleBytes []byte needleBytes, err = needle.ReadNeedleBlob(oldDatBackend, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size, v.Version()) if err != nil { @@ -386,7 +386,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in return nil } nv, ok := scanner.v.nm.Get(n.Id) - glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) + log.V(-1).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) if ok && nv.Offset.ToActualOffset() == offset && nv.Size > 0 && nv.Size.IsValid() { if err := scanner.nm.Set(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) @@ -397,7 +397,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in delta := n.DiskSize(scanner.version) scanner.newOffset += delta scanner.writeThrottler.MaybeSlowdown(delta) - glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size) + log.V(-1).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size) } return nil } @@ -492,7 +492,7 @@ func (v *Volume) copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, da delta := n.DiskSize(version) newOffset += delta writeThrottler.MaybeSlowdown(delta) - glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + log.V(-1).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) return nil }) @@ -511,7 +511,7 @@ func (v *Volume) copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, da v.Id.String(), dstDatSize, expectedContentSize) } } else { - glog.Warningf("volume %s content size: %d less deleted size: %d, new size: %d", + log.Warningf("volume %s content size: %d less deleted size: %d, new size: %d", v.Id.String(), v.nm.ContentSize(), v.nm.DeletedSize(), dstDatSize) } } @@ -522,7 +522,7 @@ func (v *Volume) copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, da indexFile, err := os.OpenFile(datIdxName, os.O_RDWR|os.O_CREATE, 0644) if err != nil { - glog.Errorf("cannot open Volume Index %s: %v", datIdxName, err) + log.Errorf("cannot open Volume Index %s: %v", datIdxName, err) return err } defer func() { diff --git a/weed/storage/volume_write.go b/weed/storage/volume_write.go index cf959b576..b84bbd7bc 100644 --- a/weed/storage/volume_write.go +++ b/weed/storage/volume_write.go @@ -6,7 +6,7 @@ import ( "fmt" "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" . "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -40,7 +40,7 @@ func (v *Volume) isFileUnchanged(n *needle.Needle) bool { oldNeedle := new(needle.Needle) err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), nv.Size, v.Version()) if err != nil { - glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToActualOffset(), nv.Size, err) + log.V(3).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToActualOffset(), nv.Size, err) return false } if oldNeedle.Cookie == n.Cookie && oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) { @@ -107,7 +107,7 @@ func (v *Volume) asyncRequestAppend(request *needle.AsyncRequest) { } func (v *Volume) syncWrite(n *needle.Needle, checkCookie bool) (offset uint64, size Size, isUnchanged bool, err error) { - // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + // log.V(-1).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() @@ -115,7 +115,7 @@ func (v *Volume) syncWrite(n *needle.Needle, checkCookie bool) (offset uint64, s } func (v *Volume) writeNeedle2(n *needle.Needle, checkCookie bool, fsync bool) (offset uint64, size Size, isUnchanged bool, err error) { - // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + // log.V(-1).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) if n.Ttl == needle.EMPTY_TTL && v.Ttl != needle.EMPTY_TTL { n.SetHasTtl() n.Ttl = v.Ttl @@ -136,7 +136,7 @@ func (v *Volume) writeNeedle2(n *needle.Needle, checkCookie bool, fsync bool) (o } func (v *Volume) doWriteRequest(n *needle.Needle, checkCookie bool) (offset uint64, size Size, isUnchanged bool, err error) { - // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + // log.V(-1).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) if v.isFileUnchanged(n) { size = Size(n.DataSize) isUnchanged = true @@ -157,7 +157,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle, checkCookie bool) (offset uint n.Cookie = existingNeedle.Cookie } if existingNeedle.Cookie != n.Cookie { - glog.V(0).Infof("write cookie mismatch: existing %s, new %s", + log.V(3).Infof("write cookie mismatch: existing %s, new %s", needle.NewFileIdFromNeedle(v.Id, existingNeedle), needle.NewFileIdFromNeedle(v.Id, n)) err = fmt.Errorf("mismatching cookie %x", n.Cookie) return @@ -178,7 +178,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle, checkCookie bool) (offset uint // add to needle map if !ok || uint64(nv.Offset.ToActualOffset()) < offset { if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil { - glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err) + log.V(-1).Infof("failed to save in needle map %d: %v", n.Id, err) } } if v.lastModifiedTsSeconds < n.LastModified { @@ -188,7 +188,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle, checkCookie bool) (offset uint } func (v *Volume) syncDelete(n *needle.Needle) (Size, error) { - // glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + // log.V(-1).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() @@ -217,7 +217,7 @@ func (v *Volume) deleteNeedle2(n *needle.Needle) (Size, error) { } func (v *Volume) doDeleteRequest(n *needle.Needle) (Size, error) { - glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + log.V(-1).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) nv, ok := v.nm.Get(n.Id) // fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size) if ok && nv.Size.IsValid() { @@ -300,7 +300,7 @@ func (v *Volume) startWorker() { if err := v.DataBackend.Sync(); err != nil { // todo: this may generate dirty data or cause data inconsistent, may be weed need to panic? if te := v.DataBackend.Truncate(end); te != nil { - glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te) + log.V(3).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te) } for i := 0; i < len(currentRequests); i++ { if currentRequests[i].IsSucceed() { @@ -334,7 +334,7 @@ func (v *Volume) WriteNeedleBlob(needleId NeedleId, needleBlob []byte, size Size newNeedle := new(needle.Needle) err = newNeedle.ReadBytes(needleBlob, nv.Offset.ToActualOffset(), size, v.Version()) if err == nil && oldNeedle.Cookie == newNeedle.Cookie && oldNeedle.Checksum == newNeedle.Checksum && bytes.Equal(oldNeedle.Data, newNeedle.Data) { - glog.V(0).Infof("needle %v already exists", needleId) + log.V(3).Infof("needle %v already exists", needleId) return nil } } @@ -350,7 +350,7 @@ func (v *Volume) WriteNeedleBlob(needleId NeedleId, needleBlob []byte, size Size // add to needle map if err = v.nm.Put(needleId, ToOffset(int64(offset)), size); err != nil { - glog.V(4).Infof("failed to put in needle map %d: %v", needleId, err) + log.V(-1).Infof("failed to put in needle map %d: %v", needleId, err) } return err |
