aboutsummaryrefslogtreecommitdiff
path: root/weed/filesys
diff options
context:
space:
mode:
Diffstat (limited to 'weed/filesys')
-rw-r--r--weed/filesys/dir.go70
-rw-r--r--weed/filesys/dir_link.go16
-rw-r--r--weed/filesys/dir_rename.go14
-rw-r--r--weed/filesys/dirty_page.go8
-rw-r--r--weed/filesys/dirty_page_interval.go8
-rw-r--r--weed/filesys/file.go40
-rw-r--r--weed/filesys/filehandle.go34
-rw-r--r--weed/filesys/meta_cache/meta_cache.go8
-rw-r--r--weed/filesys/meta_cache/meta_cache_init.go6
-rw-r--r--weed/filesys/meta_cache/meta_cache_subscribe.go10
-rw-r--r--weed/filesys/wfs.go16
-rw-r--r--weed/filesys/wfs_deletion.go6
-rw-r--r--weed/filesys/wfs_write.go8
-rw-r--r--weed/filesys/xattr.go2
14 files changed, 123 insertions, 123 deletions
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index a8481a435..8a411b60b 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -13,7 +13,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -48,12 +48,12 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
dir.setRootDirAttributes(attr)
- glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
+ log.Tracef("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
if err := dir.maybeLoadEntry(); err != nil {
- glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
+ log.Tracef("dir Attr %s,err: %+v", dir.FullPath(), err)
return err
}
@@ -64,14 +64,14 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Gid = dir.entry.Attributes.Gid
attr.Uid = dir.entry.Attributes.Uid
- glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
+ log.Tracef("dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
- glog.V(4).Infof("dir Getxattr %s", dir.FullPath())
+ log.Tracef("dir Getxattr %s", dir.FullPath())
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -96,7 +96,7 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filerGrpcAddress
- glog.V(3).Infof("dir %s fsync %+v", dir.FullPath(), req)
+ log.Tracef("dir %s fsync %+v", dir.FullPath(), req)
return nil
}
@@ -146,7 +146,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
OExcl: req.Flags&fuse.OpenExclusive != 0,
Signatures: []int32{dir.wfs.signature},
}
- glog.V(1).Infof("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
+ log.Debugf("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@@ -157,7 +157,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
if strings.Contains(err.Error(), "EEXIST") {
return fuse.EEXIST
}
- glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
+ log.Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
return fuse.EIO
}
@@ -182,21 +182,21 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
if req.Mode&os.ModeNamedPipe != 0 {
- glog.V(1).Infof("mknod named pipe %s", req.String())
+ log.Debugf("mknod named pipe %s", req.String())
return nil, fuse.ENOSYS
}
if req.Mode&req.Mode&os.ModeSocket != 0 {
- glog.V(1).Infof("mknod socket %s", req.String())
+ log.Debugf("mknod socket %s", req.String())
return nil, fuse.ENOSYS
}
// not going to support mknod for normal files either
- glog.V(1).Infof("mknod %s", req.String())
+ log.Debugf("mknod %s", req.String())
return nil, fuse.ENOSYS
}
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
- glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name)
+ log.Tracef("mkdir %s: %s", dir.FullPath(), req.Name)
newEntry := &filer_pb.Entry{
Name: req.Name,
@@ -221,9 +221,9 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
Signatures: []int32{dir.wfs.signature},
}
- glog.V(1).Infof("mkdir: %v", request)
+ log.Debugf("mkdir: %v", request)
if err := filer_pb.CreateEntry(client, request); err != nil {
- glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
+ log.Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
return err
}
@@ -238,20 +238,20 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
return node, nil
}
- glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
+ log.Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
return nil, fuse.EIO
}
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
- glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
+ log.Tracef("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
dirPath := util.FullPath(dir.FullPath())
visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath)
if visitErr != nil {
- glog.Errorf("dir Lookup %s: %v", dirPath, visitErr)
+ log.Errorf("dir Lookup %s: %v", dirPath, visitErr)
return nil, fuse.EIO
}
cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
@@ -261,14 +261,14 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
entry := cachedEntry.ToProtoEntry()
if entry == nil {
- // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath)
+ // log.Tracef("dir Lookup cache miss %s", fullFilePath)
entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath)
if err != nil {
- glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err)
+ log.Debugf("dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
}
} else {
- glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath)
+ log.Tracef("dir Lookup cache hit %s", fullFilePath)
}
if entry != nil {
@@ -293,13 +293,13 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
return node, nil
}
- glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err)
+ log.Tracef("not found dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
}
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
- glog.V(4).Infof("dir ReadDirAll %s", dir.FullPath())
+ log.Tracef("dir ReadDirAll %s", dir.FullPath())
processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
fullpath := util.NewFullPath(dir.FullPath(), entry.Name)
@@ -316,12 +316,12 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
dirPath := util.FullPath(dir.FullPath())
if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil {
- glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
+ log.Errorf("dir ReadDirAll %s: %v", dirPath, err)
return nil, fuse.EIO
}
listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(math.MaxInt32))
if listErr != nil {
- glog.Errorf("list meta cache: %v", listErr)
+ log.Errorf("list meta cache: %v", listErr)
return nil, fuse.EIO
}
for _, cachedEntry := range listedEntries {
@@ -352,11 +352,11 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
}
// first, ensure the filer store can correctly delete
- glog.V(3).Infof("remove file: %v", req)
+ log.Tracef("remove file: %v", req)
isDeleteData := entry.HardLinkCounter <= 1
err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature})
if err != nil {
- glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
+ log.Tracef("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
return fuse.ENOENT
}
@@ -389,11 +389,11 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
- glog.V(3).Infof("remove directory entry: %v", req)
+ log.Tracef("remove directory entry: %v", req)
ignoreRecursiveErr := true // ignore recursion error since the OS should manage it
err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, ignoreRecursiveErr, false, []int32{dir.wfs.signature})
if err != nil {
- glog.V(0).Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
+ log.Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
if strings.Contains(err.Error(), "non-empty") {
return fuse.EEXIST
}
@@ -410,7 +410,7 @@ func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req)
+ log.Tracef("%v dir setattr %+v", dir.FullPath(), req)
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -438,7 +438,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus
func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
- glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name)
+ log.Tracef("dir Setxattr %s: %s", dir.FullPath(), req.Name)
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -454,7 +454,7 @@ func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
- glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name)
+ log.Tracef("dir Removexattr %s: %s", dir.FullPath(), req.Name)
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -470,7 +470,7 @@ func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) e
func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
- glog.V(4).Infof("dir Listxattr %s", dir.FullPath())
+ log.Tracef("dir Listxattr %s", dir.FullPath())
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -485,7 +485,7 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
}
func (dir *Dir) Forget() {
- glog.V(4).Infof("Forget dir %s", dir.FullPath())
+ log.Tracef("Forget dir %s", dir.FullPath())
dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath()))
}
@@ -517,10 +517,10 @@ func (dir *Dir) saveEntry() error {
Signatures: []int32{dir.wfs.signature},
}
- glog.V(1).Infof("save dir entry: %v", request)
+ log.Debugf("save dir entry: %v", request)
_, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
+ log.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
return fuse.EIO
}
diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go
index ba3280f03..3a5d4d12e 100644
--- a/weed/filesys/dir_link.go
+++ b/weed/filesys/dir_link.go
@@ -8,7 +8,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
@@ -26,10 +26,10 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
oldFile, ok := old.(*File)
if !ok {
- glog.Errorf("old node is not a file: %+v", old)
+ log.Errorf("old node is not a file: %+v", old)
}
- glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
+ log.Tracef("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
if _, err := oldFile.maybeLoadEntry(ctx); err != nil {
return nil, err
@@ -69,13 +69,13 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.UpdateEntry(client, updateOldEntryRequest); err != nil {
- glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
+ log.Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
return fuse.EIO
}
dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(updateOldEntryRequest.Directory, updateOldEntryRequest.Entry))
if err := filer_pb.CreateEntry(client, request); err != nil {
- glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
+ log.Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
return fuse.EIO
}
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
@@ -96,7 +96,7 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
- glog.V(4).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
+ log.Tracef("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
request := &filer_pb.CreateEntryRequest{
Directory: dir.FullPath(),
@@ -121,7 +121,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.CreateEntry(client, request); err != nil {
- glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
+ log.Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
return fuse.EIO
}
@@ -147,7 +147,7 @@ func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (stri
return "", fuse.Errno(syscall.EINVAL)
}
- glog.V(4).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget)
+ log.Tracef("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget)
return entry.Attributes.SymlinkTarget, nil
diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go
index 3f73d0eb6..4907d658a 100644
--- a/weed/filesys/dir_rename.go
+++ b/weed/filesys/dir_rename.go
@@ -6,7 +6,7 @@ import (
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -18,12 +18,12 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
newPath := util.NewFullPath(newDir.FullPath(), req.NewName)
oldPath := util.NewFullPath(dir.FullPath(), req.OldName)
- glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath)
+ log.Tracef("dir Rename %s => %s", oldPath, newPath)
// find local old entry
oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath)
if err != nil {
- glog.Errorf("dir Rename can not find source %s : %v", oldPath, err)
+ log.Errorf("dir Rename can not find source %s : %v", oldPath, err)
return fuse.ENOENT
}
@@ -41,7 +41,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
_, err := client.AtomicRenameEntry(ctx, request)
if err != nil {
- glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
+ log.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
@@ -49,18 +49,18 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
})
if err != nil {
- glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
+ log.Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
// TODO: replicate renaming logic on filer
if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil {
- glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
+ log.Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
oldEntry.FullPath = newPath
if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil {
- glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
+ log.Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 11089186f..451bc0d4b 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -7,7 +7,7 @@ import (
"sync"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -41,7 +41,7 @@ func newDirtyPages(file *File) *ContinuousDirtyPages {
func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) {
- glog.V(4).Infof("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
+ log.Tracef("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
// this is more than what buffer can hold.
@@ -111,7 +111,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
reader = io.LimitReader(reader, size)
chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset)
if err != nil {
- glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
+ log.Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
pages.chunkSaveErrChan <- err
return
}
@@ -120,7 +120,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
pages.chunkAddLock.Lock()
defer pages.chunkAddLock.Unlock()
pages.f.addChunks([]*filer_pb.FileChunk{chunk})
- glog.V(3).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
+ log.Tracef("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
}
if pages.f.wfs.concurrentWriters != nil {
diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go
index 1404bf78c..5757c00a8 100644
--- a/weed/filesys/dirty_page_interval.go
+++ b/weed/filesys/dirty_page_interval.go
@@ -30,12 +30,12 @@ func (list *IntervalLinkedList) Size() int64 {
return list.Tail.Offset + list.Tail.Size - list.Head.Offset
}
func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) {
- // glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size)
+ // log.Tracef("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size)
list.Tail.Next = node
list.Tail = node
}
func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) {
- // glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size)
+ // log.Tracef("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size)
node.Next = list.Head
list.Head = node
}
@@ -46,7 +46,7 @@ func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) {
nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size)
if nodeStart < nodeStop {
- // glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop)
+ // log.Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop)
copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset])
}
@@ -144,7 +144,7 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) {
}
if prevList != nil && nextList != nil {
- // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size)
+ // log.Tracef("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size)
prevList.Tail.Next = nextList.Head
prevList.Tail = nextList.Tail
c.removeList(nextList)
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index 3bffa156e..6dcc7ac7c 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -11,7 +11,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -45,7 +45,7 @@ func (file *File) fullpath() util.FullPath {
func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
- glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
+ log.Tracef("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
entry := file.entry
if file.isOpen <= 0 || entry == nil {
@@ -60,7 +60,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
attr.Size = filer.FileSize(entry)
if file.isOpen > 0 {
attr.Size = entry.Attributes.FileSize
- glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
+ log.Tracef("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
}
attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
@@ -78,7 +78,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
- glog.V(4).Infof("file Getxattr %s", file.fullpath())
+ log.Tracef("file Getxattr %s", file.fullpath())
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@@ -90,13 +90,13 @@ func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
- glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
+ log.Tracef("file %v open %+v", file.fullpath(), req)
handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
resp.Handle = fuse.HandleID(handle.handle)
- glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle)
+ log.Tracef("%v file open handle id = %d", file.fullpath(), handle.handle)
return handle, nil
@@ -104,7 +104,7 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req)
+ log.Tracef("%v file setattr %+v", file.fullpath(), req)
_, err := file.maybeLoadEntry(ctx)
if err != nil {
@@ -123,7 +123,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
if req.Valid.Size() {
- glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
+ log.Tracef("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
if req.Size < filer.FileSize(file.entry) {
// fmt.Printf("truncate %v \n", fullPath)
var chunks []*filer_pb.FileChunk
@@ -135,10 +135,10 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
int64Size = int64(req.Size) - chunk.Offset
if int64Size > 0 {
chunks = append(chunks, chunk)
- glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
+ log.Tracef("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
chunk.Size = uint64(int64Size)
} else {
- glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString())
+ log.Tracef("truncated whole chunk %+v\n", chunk.GetFileIdString())
truncatedChunks = append(truncatedChunks, chunk)
}
}
@@ -195,7 +195,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
- glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
+ log.Tracef("file Setxattr %s: %s", file.fullpath(), req.Name)
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@@ -212,7 +212,7 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
- glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
+ log.Tracef("file Removexattr %s: %s", file.fullpath(), req.Name)
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@@ -229,7 +229,7 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest)
func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
- glog.V(4).Infof("file Listxattr %s", file.fullpath())
+ log.Tracef("file Listxattr %s", file.fullpath())
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@@ -247,14 +247,14 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filerGrpcAddress
- glog.V(4).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
+ log.Tracef("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
return nil
}
func (file *File) Forget() {
t := util.NewFullPath(file.dir.FullPath(), file.Name)
- glog.V(4).Infof("Forget file %s", t)
+ log.Tracef("Forget file %s", t)
file.wfs.fsNodeCache.DeleteFsNode(t)
}
@@ -271,13 +271,13 @@ func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, er
}
entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
if err != nil {
- glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ log.Tracef("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return entry, err
}
if entry != nil {
file.setEntry(entry)
} else {
- glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ log.Warnf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
}
return entry, nil
}
@@ -319,7 +319,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
file.reader = nil
- glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
+ log.Tracef("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
file.entry.Chunks = append(file.entry.Chunks, newChunks...)
}
@@ -348,10 +348,10 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error {
Signatures: []int32{file.wfs.signature},
}
- glog.V(4).Infof("save file entry: %v", request)
+ log.Tracef("save file entry: %v", request)
_, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ log.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return fuse.EIO
}
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 54410a0ba..80ae6ae99 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -14,7 +14,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -57,7 +57,7 @@ var _ = fs.HandleReleaser(&FileHandle{})
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
- glog.V(4).Infof("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data))
+ log.Tracef("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data))
fh.RLock()
defer fh.RUnlock()
@@ -82,12 +82,12 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
}
if err != nil {
- glog.Warningf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err)
+ log.Warnf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err)
return fuse.EIO
}
if totalRead > int64(len(buff)) {
- glog.Warningf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
+ log.Warnf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
totalRead = min(int64(len(buff)), totalRead)
}
// resp.Data = buff[:totalRead]
@@ -106,7 +106,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
fileSize := int64(filer.FileSize(fh.f.entry))
if fileSize == 0 {
- glog.V(1).Infof("empty fh %v", fh.f.fullpath())
+ log.Debugf("empty fh %v", fh.f.fullpath())
return 0, io.EOF
}
@@ -127,10 +127,10 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
totalRead, err := fh.f.reader.ReadAt(buff, offset)
if err != nil && err != io.EOF {
- glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
+ log.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
}
- glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
+ log.Tracef("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
return int64(totalRead), err
}
@@ -150,7 +150,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
}
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize)))
- glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
+ log.Tracef("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
fh.dirtyPages.AddPage(req.Offset, data)
@@ -169,7 +169,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
- glog.V(4).Infof("Release %v fh %d", fh.f.fullpath(), fh.handle)
+ log.Tracef("Release %v fh %d", fh.f.fullpath(), fh.handle)
fh.Lock()
defer fh.Unlock()
@@ -177,7 +177,7 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
fh.f.isOpen--
if fh.f.isOpen < 0 {
- glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
+ log.Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
fh.f.isOpen = 0
return nil
}
@@ -185,7 +185,7 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
if fh.f.isOpen == 0 {
if err := fh.doFlush(ctx, req.Header); err != nil {
- glog.Errorf("Release doFlush %s: %v", fh.f.Name, err)
+ log.Errorf("Release doFlush %s: %v", fh.f.Name, err)
}
// stop the goroutine
@@ -211,7 +211,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
// flush works at fh level
// send the data to the OS
- glog.V(4).Infof("doFlush %s fh %d", fh.f.fullpath(), fh.handle)
+ log.Tracef("doFlush %s fh %d", fh.f.fullpath(), fh.handle)
fh.dirtyPages.saveExistingPagesToStorage()
@@ -250,9 +250,9 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
Signatures: []int32{fh.f.wfs.signature},
}
- glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
+ log.Tracef("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
for i, chunk := range fh.f.entry.Chunks {
- glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
+ log.Tracef("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
}
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(fh.f.entry.Chunks)
@@ -261,7 +261,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks)
if manifestErr != nil {
// not good, but should be ok
- glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
+ log.Infof("MaybeManifestize: %v", manifestErr)
}
fh.f.entry.Chunks = append(chunks, manifestChunks...)
@@ -269,7 +269,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.CreateEntry(client, request); err != nil {
- glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
+ log.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
}
@@ -283,7 +283,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
}
if err != nil {
- glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
+ log.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
return fuse.EIO
}
diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go
index 4b282253d..2db307cd9 100644
--- a/weed/filesys/meta_cache/meta_cache.go
+++ b/weed/filesys/meta_cache/meta_cache.go
@@ -8,7 +8,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/leveldb"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/bounded_tree"
)
@@ -44,7 +44,7 @@ func openMetaStore(dbFolder string) filer.VirtualFilerStore {
}
if err := store.Initialize(config, ""); err != nil {
- glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
+ log.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
}
return filer.NewFilerStoreWrapper(store)
@@ -72,7 +72,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
// skip the unnecessary deletion
// leave the update to the following InsertEntry operation
} else {
- glog.V(3).Infof("DeleteEntry %s/%s", oldPath, oldPath.Name())
+ log.Tracef("DeleteEntry %s/%s", oldPath, oldPath.Name())
if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil {
return err
}
@@ -85,7 +85,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
if newEntry != nil {
newDir, _ := newEntry.DirAndName()
if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) {
- glog.V(3).Infof("InsertEntry %s/%s", newDir, newEntry.Name())
+ log.Tracef("InsertEntry %s/%s", newDir, newEntry.Name())
if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil {
return err
}
diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go
index 4089cea28..5c2a8fc46 100644
--- a/weed/filesys/meta_cache/meta_cache_init.go
+++ b/weed/filesys/meta_cache/meta_cache_init.go
@@ -5,7 +5,7 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -14,13 +14,13 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
return mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) {
- glog.V(4).Infof("ReadDirAllEntries %s ...", path)
+ log.Tracef("ReadDirAllEntries %s ...", path)
util.Retry("ReadDirAllEntries", func() error {
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
entry := filer.FromPbEntry(string(dirPath), pbEntry)
if err := mc.doInsertEntry(context.Background(), entry); err != nil {
- glog.V(0).Infof("read %s: %v", entry.FullPath, err)
+ log.Infof("read %s: %v", entry.FullPath, err)
return err
}
if entry.IsDirectory() {
diff --git a/weed/filesys/meta_cache/meta_cache_subscribe.go b/weed/filesys/meta_cache/meta_cache_subscribe.go
index f9973f436..2e9475675 100644
--- a/weed/filesys/meta_cache/meta_cache_subscribe.go
+++ b/weed/filesys/meta_cache/meta_cache_subscribe.go
@@ -7,7 +7,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -28,7 +28,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
var newEntry *filer.Entry
if message.OldEntry != nil {
oldPath = util.NewFullPath(dir, message.OldEntry.Name)
- glog.V(4).Infof("deleting %v", oldPath)
+ log.Tracef("deleting %v", oldPath)
}
if message.NewEntry != nil {
@@ -36,7 +36,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
dir = message.NewParentPath
}
key := util.NewFullPath(dir, message.NewEntry.Name)
- glog.V(4).Infof("creating %v", key)
+ log.Tracef("creating %v", key)
newEntry = filer.FromPbEntry(dir, message.NewEntry)
}
err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
@@ -73,13 +73,13 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
}
if err := processEventFn(resp); err != nil {
- glog.Fatalf("process %v: %v", resp, err)
+ log.Fatalf("process %v: %v", resp, err)
}
lastTsNs = resp.TsNs
}
})
if err != nil {
- glog.Errorf("subscribing filer meta change: %v", err)
+ log.Errorf("subscribing filer meta change: %v", err)
}
time.Sleep(time.Second)
}
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index cd14e8032..a6a77874e 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -17,7 +17,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
@@ -128,7 +128,7 @@ func (wfs *WFS) Root() (fs.Node, error) {
func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
fullpath := file.fullpath()
- glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
+ log.Tracef("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
@@ -156,7 +156,7 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
- glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
+ log.Tracef("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
delete(wfs.handles, fullpath.AsInode())
@@ -166,7 +166,7 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
// Statfs is called to obtain file system metadata. Implements fuse.FSStatfser
func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error {
- glog.V(4).Infof("reading fs stats: %+v", req)
+ log.Tracef("reading fs stats: %+v", req)
if wfs.stats.lastChecked < time.Now().Unix()-20 {
@@ -178,13 +178,13 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec),
}
- glog.V(4).Infof("reading filer stats: %+v", request)
+ log.Tracef("reading filer stats: %+v", request)
resp, err := client.Statistics(context.Background(), request)
if err != nil {
- glog.V(0).Infof("reading filer stats %v: %v", request, err)
+ log.Infof("reading filer stats %v: %v", request, err)
return err
}
- glog.V(4).Infof("read filer stats: %+v", resp)
+ log.Tracef("read filer stats: %+v", resp)
wfs.stats.TotalSize = resp.TotalSize
wfs.stats.UsedSize = resp.UsedSize
@@ -194,7 +194,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
return nil
})
if err != nil {
- glog.V(0).Infof("filer Statistics: %v", err)
+ log.Infof("filer Statistics: %v", err)
return err
}
}
diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go
index a245b6795..50f2ec2f0 100644
--- a/weed/filesys/wfs_deletion.go
+++ b/weed/filesys/wfs_deletion.go
@@ -6,7 +6,7 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -24,7 +24,7 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
}
dataChunks, manifestResolveErr := filer.ResolveOneChunkManifest(filer.LookupFn(wfs), chunk)
if manifestResolveErr != nil {
- glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
+ log.Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
}
for _, dChunk := range dataChunks {
fileIds = append(fileIds, dChunk.GetFileIdString())
@@ -49,7 +49,7 @@ func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.Se
m := make(map[string]operation.LookupResult)
- glog.V(4).Infof("deleteFileIds lookup volume id locations: %v", vids)
+ log.Tracef("deleteFileIds lookup volume id locations: %v", vids)
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: vids,
})
diff --git a/weed/filesys/wfs_write.go b/weed/filesys/wfs_write.go
index 83e40e7f5..2540cfd5a 100644
--- a/weed/filesys/wfs_write.go
+++ b/weed/filesys/wfs_write.go
@@ -6,7 +6,7 @@ import (
"io"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@@ -32,7 +32,7 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
- glog.V(0).Infof("assign volume failure %v: %v", request, err)
+ log.Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
@@ -55,11 +55,11 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
uploadResult, err, data := operation.Upload(fileUrl, filename, wfs.option.Cipher, reader, false, "", nil, auth)
if err != nil {
- glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
+ log.Infof("upload data %v to %s: %v", filename, fileUrl, err)
return nil, "", "", fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
- glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err)
+ log.Infof("upload failure %v to %s: %v", filename, fileUrl, err)
return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error)
}
diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go
index 92e43b675..63bded303 100644
--- a/weed/filesys/xattr.go
+++ b/weed/filesys/xattr.go
@@ -111,7 +111,7 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis
func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) {
fullpath := util.NewFullPath(dir, name)
- // glog.V(3).Infof("read entry cache miss %s", fullpath)
+ // log.Tracef("read entry cache miss %s", fullpath)
// read from async meta cache
meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir))