aboutsummaryrefslogtreecommitdiff
path: root/weed/filesys
diff options
context:
space:
mode:
Diffstat (limited to 'weed/filesys')
-rw-r--r--weed/filesys/dir.go109
-rw-r--r--weed/filesys/dir_link.go91
-rw-r--r--weed/filesys/dir_rename.go20
-rw-r--r--weed/filesys/dirty_page.go52
-rw-r--r--weed/filesys/dirty_page_interval.go15
-rw-r--r--weed/filesys/dirty_page_interval_test.go24
-rw-r--r--weed/filesys/file.go98
-rw-r--r--weed/filesys/filehandle.go150
-rw-r--r--weed/filesys/fscache.go212
-rw-r--r--weed/filesys/fscache_test.go115
-rw-r--r--weed/filesys/meta_cache/id_mapper.go101
-rw-r--r--weed/filesys/meta_cache/meta_cache.go64
-rw-r--r--weed/filesys/meta_cache/meta_cache_init.go8
-rw-r--r--weed/filesys/meta_cache/meta_cache_subscribe.go24
-rw-r--r--weed/filesys/wfs.go47
-rw-r--r--weed/filesys/wfs_deletion.go17
-rw-r--r--weed/filesys/wfs_write.go4
-rw-r--r--weed/filesys/xattr.go2
18 files changed, 925 insertions, 228 deletions
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index 2214b1ac7..7d93dbd9f 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -11,7 +11,7 @@ import (
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -82,9 +82,9 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f
func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode()
attr.Valid = time.Hour
- attr.Uid = dir.wfs.option.MountUid
- attr.Gid = dir.wfs.option.MountGid
- attr.Mode = dir.wfs.option.MountMode
+ attr.Uid = dir.entry.Attributes.Uid
+ attr.Gid = dir.entry.Attributes.Gid
+ attr.Mode = os.FileMode(dir.entry.Attributes.FileMode)
attr.Crtime = dir.wfs.option.MountCtime
attr.Ctime = dir.wfs.option.MountCtime
attr.Mtime = dir.wfs.option.MountMtime
@@ -101,19 +101,26 @@ func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
}
func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node {
- return &File{
- Name: name,
- dir: dir,
- wfs: dir.wfs,
- entry: entry,
- entryViewCache: nil,
- }
+ f := dir.wfs.fsNodeCache.EnsureFsNode(util.NewFullPath(dir.FullPath(), name), func() fs.Node {
+ return &File{
+ Name: name,
+ dir: dir,
+ wfs: dir.wfs,
+ entry: entry,
+ entryViewCache: nil,
+ }
+ })
+ f.(*File).dir = dir // in case dir node was created later
+ return f
}
func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.Node {
- return &Dir{name: entry.Name, wfs: dir.wfs, entry: entry, parent: dir}
-
+ d := dir.wfs.fsNodeCache.EnsureFsNode(fullpath, func() fs.Node {
+ return &Dir{name: entry.Name, wfs: dir.wfs, entry: entry, parent: dir}
+ })
+ d.(*Dir).parent = dir // in case dir node was created later
+ return d
}
func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
@@ -135,19 +142,25 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
TtlSec: dir.wfs.option.TtlSec,
},
},
- OExcl: req.Flags&fuse.OpenExclusive != 0,
+ OExcl: req.Flags&fuse.OpenExclusive != 0,
+ Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ dir.wfs.mapPbIdFromLocalToFiler(request.Entry)
+ defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
+
if err := filer_pb.CreateEntry(client, request); err != nil {
if strings.Contains(err.Error(), "EEXIST") {
return fuse.EEXIST
}
+ glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
return fuse.EIO
}
- dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil
}); err != nil {
@@ -161,7 +174,6 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
node = dir.newFile(req.Name, request.Entry)
file := node.(*File)
- file.isOpen++
fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid)
return file, fh, nil
@@ -185,9 +197,13 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ dir.wfs.mapPbIdFromLocalToFiler(newEntry)
+ defer dir.wfs.mapPbIdFromFilerToLocal(newEntry)
+
request := &filer_pb.CreateEntryRequest{
- Directory: dir.FullPath(),
- Entry: newEntry,
+ Directory: dir.FullPath(),
+ Entry: newEntry,
+ Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("mkdir: %v", request)
@@ -196,7 +212,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
return err
}
- dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil
})
@@ -251,6 +267,9 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode)
resp.Attr.Gid = entry.Attributes.Gid
resp.Attr.Uid = entry.Attributes.Uid
+ if entry.HardLinkCounter > 0 {
+ resp.Attr.Nlink = uint32(entry.HardLinkCounter)
+ }
return node, nil
}
@@ -261,7 +280,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
- glog.V(3).Infof("dir ReadDirAll %s", dir.FullPath())
+ glog.V(4).Infof("dir ReadDirAll %s", dir.FullPath())
processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
fullpath := util.NewFullPath(dir.FullPath(), entry.Name)
@@ -310,41 +329,51 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
return nil
}
- dir.wfs.deleteFileChunks(entry.Chunks)
-
- dir.wfs.metaCache.DeleteEntry(context.Background(), filePath)
-
+ // first, ensure the filer store can correctly delete
glog.V(3).Infof("remove file: %v", req)
- err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, false, false, false, false)
+ isDeleteData := entry.HardLinkCounter <= 1
+ err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature})
if err != nil {
glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
return fuse.ENOENT
}
+ // then, delete meta cache and fsNode cache
+ dir.wfs.metaCache.DeleteEntry(context.Background(), filePath)
+ dir.wfs.fsNodeCache.DeleteFsNode(filePath)
+
+ // delete the chunks last
+ if isDeleteData {
+ dir.wfs.deleteFileChunks(entry.Chunks)
+ }
+
return nil
}
func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
- t := util.NewFullPath(dir.FullPath(), req.Name)
-
- dir.wfs.metaCache.DeleteEntry(context.Background(), t)
-
glog.V(3).Infof("remove directory entry: %v", req)
- err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, false, false)
+ err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, false, false, []int32{dir.wfs.signature})
if err != nil {
- glog.V(3).Infof("not found remove %s/%s: %v", dir.FullPath(), req.Name, err)
+ glog.V(0).Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
+ if strings.Contains(err.Error(), "non-empty") {
+ return fuse.EEXIST
+ }
return fuse.ENOENT
}
+ t := util.NewFullPath(dir.FullPath(), req.Name)
+ dir.wfs.metaCache.DeleteEntry(context.Background(), t)
+ dir.wfs.fsNodeCache.DeleteFsNode(t)
+
return nil
}
func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- glog.V(3).Infof("%v dir setattr %+v", dir.FullPath(), req)
+ glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req)
if err := dir.maybeLoadEntry(); err != nil {
return err
@@ -419,7 +448,9 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
}
func (dir *Dir) Forget() {
- glog.V(3).Infof("Forget dir %s", dir.FullPath())
+ glog.V(4).Infof("Forget dir %s", dir.FullPath())
+
+ dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath()))
}
func (dir *Dir) maybeLoadEntry() error {
@@ -440,19 +471,23 @@ func (dir *Dir) saveEntry() error {
return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ dir.wfs.mapPbIdFromLocalToFiler(dir.entry)
+ defer dir.wfs.mapPbIdFromFilerToLocal(dir.entry)
+
request := &filer_pb.UpdateEntryRequest{
- Directory: parentDir,
- Entry: dir.entry,
+ Directory: parentDir,
+ Entry: dir.entry,
+ Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("save dir entry: %v", request)
_, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.V(0).Infof("UpdateEntry dir %s/%s: %v", parentDir, name, err)
+ glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
return fuse.EIO
}
- dir.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil
})
diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go
index 4990e743c..f6bc41b56 100644
--- a/weed/filesys/dir_link.go
+++ b/weed/filesys/dir_link.go
@@ -2,23 +2,101 @@ package filesys
import (
"context"
+ "github.com/chrislusf/seaweedfs/weed/util"
"os"
"syscall"
"time"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
+var _ = fs.NodeLinker(&Dir{})
var _ = fs.NodeSymlinker(&Dir{})
var _ = fs.NodeReadlinker(&File{})
+const (
+ HARD_LINK_MARKER = '\x01'
+)
+
+func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (fs.Node, error) {
+
+ oldFile, ok := old.(*File)
+ if !ok {
+ glog.Errorf("old node is not a file: %+v", old)
+ }
+
+ glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
+
+ if err := oldFile.maybeLoadEntry(ctx); err != nil {
+ return nil, err
+ }
+
+ // update old file to hardlink mode
+ if len(oldFile.entry.HardLinkId) == 0 {
+ oldFile.entry.HardLinkId = append(util.RandomBytes(16), HARD_LINK_MARKER)
+ oldFile.entry.HardLinkCounter = 1
+ }
+ oldFile.entry.HardLinkCounter++
+ updateOldEntryRequest := &filer_pb.UpdateEntryRequest{
+ Directory: oldFile.dir.FullPath(),
+ Entry: oldFile.entry,
+ Signatures: []int32{dir.wfs.signature},
+ }
+
+ // CreateLink 1.2 : update new file to hardlink mode
+ request := &filer_pb.CreateEntryRequest{
+ Directory: dir.FullPath(),
+ Entry: &filer_pb.Entry{
+ Name: req.NewName,
+ IsDirectory: false,
+ Attributes: oldFile.entry.Attributes,
+ Chunks: oldFile.entry.Chunks,
+ Extended: oldFile.entry.Extended,
+ HardLinkId: oldFile.entry.HardLinkId,
+ HardLinkCounter: oldFile.entry.HardLinkCounter,
+ },
+ Signatures: []int32{dir.wfs.signature},
+ }
+
+ // apply changes to the filer, and also apply to local metaCache
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ dir.wfs.mapPbIdFromLocalToFiler(request.Entry)
+ defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
+
+ if err := filer_pb.UpdateEntry(client, updateOldEntryRequest); err != nil {
+ glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
+ return fuse.EIO
+ }
+ dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(updateOldEntryRequest.Directory, updateOldEntryRequest.Entry))
+
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
+ return fuse.EIO
+ }
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
+
+ return nil
+ })
+
+ // create new file node
+ newNode := dir.newFile(req.NewName, request.Entry)
+ newFile := newNode.(*File)
+ if err := newFile.maybeLoadEntry(ctx); err != nil {
+ return nil, err
+ }
+
+ return newFile, err
+
+}
+
func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
- glog.V(3).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
+ glog.V(4).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
request := &filer_pb.CreateEntryRequest{
Directory: dir.FullPath(),
@@ -34,15 +112,20 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
SymlinkTarget: req.Target,
},
},
+ Signatures: []int32{dir.wfs.signature},
}
err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ dir.wfs.mapPbIdFromLocalToFiler(request.Entry)
+ defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
+
if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
return fuse.EIO
}
- dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil
})
@@ -63,7 +146,7 @@ func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (stri
return "", fuse.Errno(syscall.EINVAL)
}
- glog.V(3).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, file.entry.Attributes.SymlinkTarget)
+ glog.V(4).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, file.entry.Attributes.SymlinkTarget)
return file.entry.Attributes.SymlinkTarget, nil
diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go
index 120dffd1d..3f73d0eb6 100644
--- a/weed/filesys/dir_rename.go
+++ b/weed/filesys/dir_rename.go
@@ -23,12 +23,14 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
// find local old entry
oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath)
if err != nil {
- glog.V(0).Infof("dir Rename can not find source %s : %v", oldPath, err)
+ glog.Errorf("dir Rename can not find source %s : %v", oldPath, err)
return fuse.ENOENT
}
// update remote filer
err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
request := &filer_pb.AtomicRenameEntryRequest{
OldDirectory: dir.FullPath(),
@@ -37,8 +39,9 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
NewName: req.NewName,
}
- _, err := client.AtomicRenameEntry(context.Background(), request)
+ _, err := client.AtomicRenameEntry(ctx, request)
if err != nil {
+ glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
@@ -62,7 +65,18 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
}
// fmt.Printf("rename path: %v => %v\n", oldPath, newPath)
- delete(dir.wfs.handles, oldPath.AsInode())
+ dir.wfs.fsNodeCache.Move(oldPath, newPath)
+
+ // change file handle
+ dir.wfs.handlesLock.Lock()
+ defer dir.wfs.handlesLock.Unlock()
+ inodeId := oldPath.AsInode()
+ existingHandle, found := dir.wfs.handles[inodeId]
+ if !found || existingHandle == nil {
+ return err
+ }
+ delete(dir.wfs.handles, inodeId)
+ dir.wfs.handles[newPath.AsInode()] = existingHandle
return err
}
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 46d20e466..1ab7d0961 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -25,17 +25,11 @@ func newDirtyPages(file *File) *ContinuousDirtyPages {
}
}
-func (pages *ContinuousDirtyPages) releaseResource() {
-}
-
var counter = int32(0)
func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
- pages.lock.Lock()
- defer pages.lock.Unlock()
-
- glog.V(3).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data)))
+ glog.V(4).Infof("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
// this is more than what buffer can hold.
@@ -85,14 +79,6 @@ func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) (chun
return
}
-func (pages *ContinuousDirtyPages) FlushToStorage() (chunks []*filer_pb.FileChunk, err error) {
-
- pages.lock.Lock()
- defer pages.lock.Unlock()
-
- return pages.saveExistingPagesToStorage()
-}
-
func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() (chunks []*filer_pb.FileChunk, err error) {
var hasSavedData bool
@@ -106,7 +92,9 @@ func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() (chunks []*filer
}
if err == nil {
- chunks = append(chunks, chunk)
+ if chunk != nil {
+ chunks = append(chunks, chunk)
+ }
} else {
return
}
@@ -121,14 +109,21 @@ func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *fi
return nil, false, nil
}
+ fileSize := int64(pages.f.entry.Attributes.FileSize)
for {
- chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size())
+ chunkSize := min(maxList.Size(), fileSize-maxList.Offset())
+ if chunkSize == 0 {
+ return
+ }
+ chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), chunkSize)
if err == nil {
- hasSavedData = true
- glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId)
+ if chunk != nil {
+ hasSavedData = true
+ }
+ glog.V(4).Infof("saveToStorage %s %s [%d,%d) of %d bytes", pages.f.fullpath(), chunk.GetFileIdString(), maxList.Offset(), maxList.Offset()+chunkSize, fileSize)
return
} else {
- glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err)
+ glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+chunkSize, err)
time.Sleep(5 * time.Second)
}
}
@@ -139,6 +134,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
dir, _ := pages.f.fullpath().DirAndName()
+ reader = io.LimitReader(reader, size)
chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(dir)(reader, pages.f.Name, offset)
if err != nil {
return nil, err
@@ -149,6 +145,13 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
}
+func maxUint64(x, y uint64) uint64 {
+ if x > y {
+ return x
+ }
+ return y
+}
+
func max(x, y int64) int64 {
if x > y {
return x
@@ -162,11 +165,6 @@ func min(x, y int64) int64 {
return y
}
-func (pages *ContinuousDirtyPages) ReadDirtyData(data []byte, startOffset int64) (offset int64, size int) {
-
- pages.lock.Lock()
- defer pages.lock.Unlock()
-
- return pages.intervals.ReadData(data, startOffset)
-
+func (pages *ContinuousDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) {
+ return pages.intervals.ReadDataAt(data, startOffset)
}
diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go
index ec94c6df1..afa2755ed 100644
--- a/weed/filesys/dirty_page_interval.go
+++ b/weed/filesys/dirty_page_interval.go
@@ -3,7 +3,6 @@ package filesys
import (
"bytes"
"io"
- "math"
)
type IntervalNode struct {
@@ -186,25 +185,15 @@ func (c *ContinuousIntervals) removeList(target *IntervalLinkedList) {
}
-func (c *ContinuousIntervals) ReadData(data []byte, startOffset int64) (offset int64, size int) {
- var minOffset int64 = math.MaxInt64
- var maxStop int64
+func (c *ContinuousIntervals) ReadDataAt(data []byte, startOffset int64) (maxStop int64) {
for _, list := range c.lists {
start := max(startOffset, list.Offset())
stop := min(startOffset+int64(len(data)), list.Offset()+list.Size())
- if start <= stop {
+ if start < stop {
list.ReadData(data[start-startOffset:], start, stop)
- minOffset = min(minOffset, start)
maxStop = max(maxStop, stop)
}
}
-
- if minOffset == math.MaxInt64 {
- return 0, 0
- }
-
- offset = minOffset
- size = int(maxStop - offset)
return
}
diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go
index ab3b37b7c..d02ad27fd 100644
--- a/weed/filesys/dirty_page_interval_test.go
+++ b/weed/filesys/dirty_page_interval_test.go
@@ -2,6 +2,7 @@ package filesys
import (
"bytes"
+ "math/rand"
"testing"
)
@@ -66,6 +67,29 @@ func TestContinuousIntervals_RealCase1(t *testing.T) {
}
+func TestRandomWrites(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
+ data := make([]byte, 1024)
+
+ for i := 0; i < 1024; i++ {
+
+ start, stop := rand.Intn(len(data)), rand.Intn(len(data))
+ if start > stop {
+ start, stop = stop, start
+ }
+
+ rand.Read(data[start : stop+1])
+
+ c.AddInterval(data[start:stop+1], int64(start))
+
+ expectedData(t, c, 0, data...)
+
+ }
+
+}
+
func expectedData(t *testing.T, c *ContinuousIntervals, offset int, data ...byte) {
start, stop := int64(offset), int64(offset+len(data))
for _, list := range c.lists {
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index f96cd8118..98ee010d8 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -7,12 +7,13 @@ import (
"sort"
"time"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/seaweedfs/fuse"
+ "github.com/seaweedfs/fuse/fs"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/seaweedfs/fuse"
- "github.com/seaweedfs/fuse/fs"
)
const blockSize = 512
@@ -32,9 +33,10 @@ type File struct {
dir *Dir
wfs *WFS
entry *filer_pb.Entry
- entryViewCache []filer2.VisibleInterval
+ entryViewCache []filer.VisibleInterval
isOpen int
reader io.ReaderAt
+ dirtyMetadata bool
}
func (file *File) fullpath() util.FullPath {
@@ -54,7 +56,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Inode = file.fullpath().AsInode()
attr.Valid = time.Second
attr.Mode = os.FileMode(file.entry.Attributes.FileMode)
- attr.Size = filer2.TotalSize(file.entry.Chunks)
+ attr.Size = filer.FileSize(file.entry)
if file.isOpen > 0 {
attr.Size = file.entry.Attributes.FileSize
glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
@@ -65,6 +67,9 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Uid = file.entry.Attributes.Uid
attr.Blocks = attr.Size/blockSize + 1
attr.BlockSize = uint32(file.wfs.option.ChunkSizeLimit)
+ if file.entry.HardLinkCounter > 0 {
+ attr.Nlink = uint32(file.entry.HardLinkCounter)
+ }
return nil
@@ -85,13 +90,11 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
- file.isOpen++
-
handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
resp.Handle = fuse.HandleID(handle.handle)
- glog.V(3).Infof("%v file open handle id = %d", file.fullpath(), handle.handle)
+ glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle)
return handle, nil
@@ -99,57 +102,90 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes)
+ glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req)
if err := file.maybeLoadEntry(ctx); err != nil {
return err
}
+ if file.isOpen > 0 {
+ file.wfs.handlesLock.Lock()
+ fileHandle := file.wfs.handles[file.fullpath().AsInode()]
+ file.wfs.handlesLock.Unlock()
+
+ if fileHandle != nil {
+ fileHandle.Lock()
+ defer fileHandle.Unlock()
+ }
+ }
if req.Valid.Size() {
- glog.V(3).Infof("%v file setattr set size=%v", file.fullpath(), req.Size)
- if req.Size < filer2.TotalSize(file.entry.Chunks) {
+ glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
+ if req.Size < filer.FileSize(file.entry) {
// fmt.Printf("truncate %v \n", fullPath)
var chunks []*filer_pb.FileChunk
+ var truncatedChunks []*filer_pb.FileChunk
for _, chunk := range file.entry.Chunks {
int64Size := int64(chunk.Size)
if chunk.Offset+int64Size > int64(req.Size) {
+ // this chunk is truncated
int64Size = int64(req.Size) - chunk.Offset
- }
- if int64Size > 0 {
- chunks = append(chunks, chunk)
+ if int64Size > 0 {
+ chunks = append(chunks, chunk)
+ glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
+ chunk.Size = uint64(int64Size)
+ } else {
+ glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString())
+ truncatedChunks = append(truncatedChunks, chunk)
+ }
}
}
file.entry.Chunks = chunks
file.entryViewCache = nil
file.reader = nil
+ file.wfs.deleteFileChunks(truncatedChunks)
}
file.entry.Attributes.FileSize = req.Size
+ file.dirtyMetadata = true
}
+
if req.Valid.Mode() {
file.entry.Attributes.FileMode = uint32(req.Mode)
+ file.dirtyMetadata = true
}
if req.Valid.Uid() {
file.entry.Attributes.Uid = req.Uid
+ file.dirtyMetadata = true
}
if req.Valid.Gid() {
file.entry.Attributes.Gid = req.Gid
+ file.dirtyMetadata = true
}
if req.Valid.Crtime() {
file.entry.Attributes.Crtime = req.Crtime.Unix()
+ file.dirtyMetadata = true
}
if req.Valid.Mtime() {
file.entry.Attributes.Mtime = req.Mtime.Unix()
+ file.dirtyMetadata = true
+ }
+
+ if req.Valid.Handle() {
+ // fmt.Printf("file handle => %d\n", req.Handle)
}
if file.isOpen > 0 {
return nil
}
+ if !file.dirtyMetadata {
+ return nil
+ }
+
return file.saveEntry()
}
@@ -205,18 +241,19 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filerGrpcAddress
- glog.V(3).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
+ glog.V(4).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
return nil
}
func (file *File) Forget() {
t := util.NewFullPath(file.dir.FullPath(), file.Name)
- glog.V(3).Infof("Forget file %s", t)
+ glog.V(4).Infof("Forget file %s", t)
+ file.wfs.fsNodeCache.DeleteFsNode(t)
}
func (file *File) maybeLoadEntry(ctx context.Context) error {
- if file.entry == nil || file.isOpen <= 0 {
+ if (file.entry == nil || len(file.entry.HardLinkId) != 0) && file.isOpen <= 0 {
entry, err := file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
if err != nil {
glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
@@ -232,46 +269,49 @@ func (file *File) maybeLoadEntry(ctx context.Context) error {
func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
sort.Slice(chunks, func(i, j int) bool {
+ if chunks[i].Mtime == chunks[j].Mtime {
+ return chunks[i].Fid.FileKey < chunks[j].Fid.FileKey
+ }
return chunks[i].Mtime < chunks[j].Mtime
})
- var newVisibles []filer2.VisibleInterval
for _, chunk := range chunks {
- newVisibles = filer2.MergeIntoVisibles(file.entryViewCache, newVisibles, chunk)
- t := file.entryViewCache[:0]
- file.entryViewCache = newVisibles
- newVisibles = t
+ file.entryViewCache = filer.MergeIntoVisibles(file.entryViewCache, chunk)
}
file.reader = nil
- glog.V(3).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
+ glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
file.entry.Chunks = append(file.entry.Chunks, chunks...)
}
func (file *File) setEntry(entry *filer_pb.Entry) {
file.entry = entry
- file.entryViewCache, _ = filer2.NonOverlappingVisibleIntervals(filer2.LookupFn(file.wfs), file.entry.Chunks)
+ file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), file.entry.Chunks)
file.reader = nil
}
func (file *File) saveEntry() error {
return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ file.wfs.mapPbIdFromLocalToFiler(file.entry)
+ defer file.wfs.mapPbIdFromFilerToLocal(file.entry)
+
request := &filer_pb.UpdateEntryRequest{
- Directory: file.dir.FullPath(),
- Entry: file.entry,
+ Directory: file.dir.FullPath(),
+ Entry: file.entry,
+ Signatures: []int32{file.wfs.signature},
}
- glog.V(1).Infof("save file entry: %v", request)
+ glog.V(4).Infof("save file entry: %v", request)
_, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ glog.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return fuse.EIO
}
- file.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ file.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil
})
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index ca35bfd02..660bbf076 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -7,21 +7,23 @@ import (
"math"
"net/http"
"os"
+ "sync"
"time"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type FileHandle struct {
// cache file has been written to
- dirtyPages *ContinuousDirtyPages
- contentType string
- dirtyMetadata bool
- handle uint64
+ dirtyPages *ContinuousDirtyPages
+ contentType string
+ handle uint64
+ sync.RWMutex
f *File
RequestId fuse.RequestID // unique ID for request
@@ -39,8 +41,9 @@ func newFileHandle(file *File, uid, gid uint32) *FileHandle {
Gid: gid,
}
if fh.f.entry != nil {
- fh.f.entry.Attributes.FileSize = filer2.TotalSize(fh.f.entry.Chunks)
+ fh.f.entry.Attributes.FileSize = filer.FileSize(fh.f.entry)
}
+
return fh
}
@@ -54,43 +57,58 @@ var _ = fs.HandleReleaser(&FileHandle{})
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
- glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size))
+ glog.V(4).Infof("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data))
+ fh.RLock()
+ defer fh.RUnlock()
+
+ if req.Size <= 0 {
+ return nil
+ }
- buff := make([]byte, req.Size)
+ buff := resp.Data[:cap(resp.Data)]
+ if req.Size > cap(resp.Data) {
+ // should not happen
+ buff = make([]byte, req.Size)
+ }
totalRead, err := fh.readFromChunks(buff, req.Offset)
if err == nil {
- dirtyOffset, dirtySize := fh.readFromDirtyPages(buff, req.Offset)
- if totalRead+req.Offset < dirtyOffset+int64(dirtySize) {
- totalRead = dirtyOffset + int64(dirtySize) - req.Offset
- }
+ maxStop := fh.readFromDirtyPages(buff, req.Offset)
+ totalRead = max(maxStop-req.Offset, totalRead)
}
- resp.Data = buff[:totalRead]
-
if err != nil {
- glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
- return fuse.EIO
+ glog.Warningf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err)
+ return nil
+ }
+
+ if totalRead > int64(len(buff)) {
+ glog.Warningf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
+ totalRead = min(int64(len(buff)), totalRead)
}
+ // resp.Data = buff[:totalRead]
+ resp.Data = buff
return err
}
-func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (offset int64, size int) {
- return fh.dirtyPages.ReadDirtyData(buff, startOffset)
+func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (maxStop int64) {
+ maxStop = fh.dirtyPages.ReadDirtyDataAt(buff, startOffset)
+ return
}
func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
- // this value should come from the filer instead of the old f
- if len(fh.f.entry.Chunks) == 0 {
+ fileSize := int64(filer.FileSize(fh.f.entry))
+
+ if fileSize == 0 {
glog.V(1).Infof("empty fh %v", fh.f.fullpath())
- return 0, nil
+ return 0, io.EOF
}
var chunkResolveErr error
if fh.f.entryViewCache == nil {
- fh.f.entryViewCache, chunkResolveErr = filer2.NonOverlappingVisibleIntervals(filer2.LookupFn(fh.f.wfs), fh.f.entry.Chunks)
+ fh.f.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(filer.LookupFn(fh.f.wfs), fh.f.entry.Chunks)
if chunkResolveErr != nil {
return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
}
@@ -98,8 +116,8 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
}
if fh.f.reader == nil {
- chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32)
- fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache)
+ chunkViews := filer.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64)
+ fh.f.reader = filer.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache, fileSize)
}
totalRead, err := fh.f.reader.ReadAt(buff, offset)
@@ -112,7 +130,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
}
- // glog.V(0).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
+ glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
return int64(totalRead), err
}
@@ -120,12 +138,15 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
// Write to the file handle
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
+ fh.Lock()
+ defer fh.Unlock()
+
// write the request to volume servers
data := make([]byte, len(req.Data))
copy(data, req.Data)
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize)))
- // glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)))
+ glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
chunks, err := fh.dirtyPages.AddPage(req.Offset, data)
if err != nil {
@@ -138,14 +159,14 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
if req.Offset == 0 {
// detect mime type
fh.contentType = http.DetectContentType(data)
- fh.dirtyMetadata = true
+ fh.f.dirtyMetadata = true
}
if len(chunks) > 0 {
fh.f.addChunks(chunks)
- fh.dirtyMetadata = true
+ fh.f.dirtyMetadata = true
}
return nil
@@ -153,37 +174,53 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
- glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle)
+ glog.V(4).Infof("Release %v fh %d", fh.f.fullpath(), fh.handle)
+
+ fh.Lock()
+ defer fh.Unlock()
fh.f.isOpen--
- if fh.f.isOpen <= 0 {
- fh.dirtyPages.releaseResource()
+ if fh.f.isOpen < 0 {
+ glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
+ fh.f.isOpen = 0
+ return nil
+ }
+
+ if fh.f.isOpen == 0 {
+ fh.doFlush(ctx, req.Header)
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
}
- fh.f.entryViewCache = nil
- fh.f.reader = nil
return nil
}
func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
+
+ fh.Lock()
+ defer fh.Unlock()
+
+ return fh.doFlush(ctx, req.Header)
+}
+
+func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
// fflush works at fh level
// send the data to the OS
- glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req)
+ glog.V(4).Infof("doFlush %s fh %d", fh.f.fullpath(), fh.handle)
- chunks, err := fh.dirtyPages.FlushToStorage()
+ chunks, err := fh.dirtyPages.saveExistingPagesToStorage()
if err != nil {
glog.Errorf("flush %s: %v", fh.f.fullpath(), err)
return fuse.EIO
}
if len(chunks) > 0 {
+
fh.f.addChunks(chunks)
- fh.dirtyMetadata = true
+ fh.f.dirtyMetadata = true
}
- if !fh.dirtyMetadata {
+ if !fh.f.dirtyMetadata {
return nil
}
@@ -192,10 +229,10 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
if fh.f.entry.Attributes != nil {
fh.f.entry.Attributes.Mime = fh.contentType
if fh.f.entry.Attributes.Uid == 0 {
- fh.f.entry.Attributes.Uid = req.Uid
+ fh.f.entry.Attributes.Uid = header.Uid
}
if fh.f.entry.Attributes.Gid == 0 {
- fh.f.entry.Attributes.Gid = req.Gid
+ fh.f.entry.Attributes.Gid = header.Gid
}
if fh.f.entry.Attributes.Crtime == 0 {
fh.f.entry.Attributes.Crtime = time.Now().Unix()
@@ -207,41 +244,42 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
}
request := &filer_pb.CreateEntryRequest{
- Directory: fh.f.dir.FullPath(),
- Entry: fh.f.entry,
+ Directory: fh.f.dir.FullPath(),
+ Entry: fh.f.entry,
+ Signatures: []int32{fh.f.wfs.signature},
}
- glog.V(3).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
+ glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
for i, chunk := range fh.f.entry.Chunks {
- glog.V(3).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
+ glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
}
- chunks, garbages := filer2.CompactFileChunks(filer2.LookupFn(fh.f.wfs), fh.f.entry.Chunks)
- chunks, manifestErr := filer2.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.dir.FullPath()), chunks)
+ manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(fh.f.entry.Chunks)
+
+ chunks, _ := filer.CompactFileChunks(filer.LookupFn(fh.f.wfs), nonManifestChunks)
+ chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.dir.FullPath()), chunks)
if manifestErr != nil {
// not good, but should be ok
glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
}
- fh.f.entry.Chunks = chunks
- // fh.f.entryViewCache = nil
+ fh.f.entry.Chunks = append(chunks, manifestChunks...)
+ fh.f.entryViewCache = nil
+
+ fh.f.wfs.mapPbIdFromLocalToFiler(request.Entry)
+ defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.CreateEntry(client, request); err != nil {
glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
}
- fh.f.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
-
- fh.f.wfs.deleteFileChunks(garbages)
- for i, chunk := range garbages {
- glog.V(3).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
- }
+ fh.f.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil
})
if err == nil {
- fh.dirtyMetadata = false
+ fh.f.dirtyMetadata = false
}
if err != nil {
diff --git a/weed/filesys/fscache.go b/weed/filesys/fscache.go
new file mode 100644
index 000000000..fdec8253c
--- /dev/null
+++ b/weed/filesys/fscache.go
@@ -0,0 +1,212 @@
+package filesys
+
+import (
+ "sync"
+
+ "github.com/seaweedfs/fuse/fs"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type FsCache struct {
+ root *FsNode
+ sync.RWMutex
+}
+type FsNode struct {
+ parent *FsNode
+ node fs.Node
+ name string
+ childrenLock sync.RWMutex
+ children map[string]*FsNode
+}
+
+func newFsCache(root fs.Node) *FsCache {
+ return &FsCache{
+ root: &FsNode{
+ node: root,
+ },
+ }
+}
+
+func (c *FsCache) GetFsNode(path util.FullPath) fs.Node {
+
+ c.RLock()
+ defer c.RUnlock()
+
+ return c.doGetFsNode(path)
+}
+
+func (c *FsCache) doGetFsNode(path util.FullPath) fs.Node {
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.findChild(p)
+ if t == nil {
+ return nil
+ }
+ }
+ return t.node
+}
+
+func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) {
+
+ c.Lock()
+ defer c.Unlock()
+
+ c.doSetFsNode(path, node)
+}
+
+func (c *FsCache) doSetFsNode(path util.FullPath, node fs.Node) {
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.ensureChild(p)
+ }
+ t.node = node
+}
+
+func (c *FsCache) EnsureFsNode(path util.FullPath, genNodeFn func() fs.Node) fs.Node {
+
+ c.Lock()
+ defer c.Unlock()
+
+ t := c.doGetFsNode(path)
+ if t != nil {
+ return t
+ }
+ t = genNodeFn()
+ c.doSetFsNode(path, t)
+ return t
+}
+
+func (c *FsCache) DeleteFsNode(path util.FullPath) {
+
+ c.Lock()
+ defer c.Unlock()
+
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.findChild(p)
+ if t == nil {
+ return
+ }
+ }
+ if t.parent != nil {
+ t.parent.disconnectChild(t)
+ }
+ t.deleteSelf()
+}
+
+// oldPath and newPath are full path including the new name
+func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode {
+
+ c.Lock()
+ defer c.Unlock()
+
+ // find old node
+ src := c.root
+ for _, p := range oldPath.Split() {
+ src = src.findChild(p)
+ if src == nil {
+ return src
+ }
+ }
+ if src.parent != nil {
+ src.parent.disconnectChild(src)
+ }
+
+ // find new node
+ target := c.root
+ for _, p := range newPath.Split() {
+ target = target.ensureChild(p)
+ }
+ parent := target.parent
+ if dir, ok := src.node.(*Dir); ok {
+ dir.name = target.name // target is not Dir, but a shortcut
+ }
+ if f, ok := src.node.(*File); ok {
+ f.Name = target.name
+ if f.entry != nil {
+ f.entry.Name = f.Name
+ }
+ }
+ parent.disconnectChild(target)
+
+ target.deleteSelf()
+
+ src.name = target.name
+ src.connectToParent(parent)
+
+ return src
+}
+
+func (n *FsNode) connectToParent(parent *FsNode) {
+ n.parent = parent
+ oldNode := parent.findChild(n.name)
+ if oldNode != nil {
+ oldNode.deleteSelf()
+ }
+ if dir, ok := n.node.(*Dir); ok {
+ if parent.node != nil {
+ dir.parent = parent.node.(*Dir)
+ }
+ }
+ if f, ok := n.node.(*File); ok {
+ if parent.node != nil {
+ f.dir = parent.node.(*Dir)
+ }
+ }
+ n.childrenLock.Lock()
+ parent.children[n.name] = n
+ n.childrenLock.Unlock()
+}
+
+func (n *FsNode) findChild(name string) *FsNode {
+ n.childrenLock.RLock()
+ defer n.childrenLock.RUnlock()
+
+ child, found := n.children[name]
+ if found {
+ return child
+ }
+ return nil
+}
+
+func (n *FsNode) ensureChild(name string) *FsNode {
+ n.childrenLock.Lock()
+ defer n.childrenLock.Unlock()
+
+ if n.children == nil {
+ n.children = make(map[string]*FsNode)
+ }
+ child, found := n.children[name]
+ if found {
+ return child
+ }
+ t := &FsNode{
+ parent: n,
+ node: nil,
+ name: name,
+ children: nil,
+ }
+ n.children[name] = t
+ return t
+}
+
+func (n *FsNode) disconnectChild(child *FsNode) {
+ n.childrenLock.Lock()
+ delete(n.children, child.name)
+ n.childrenLock.Unlock()
+ child.parent = nil
+}
+
+func (n *FsNode) deleteSelf() {
+ n.childrenLock.Lock()
+ for _, child := range n.children {
+ child.deleteSelf()
+ }
+ n.children = nil
+ n.childrenLock.Unlock()
+
+ n.node = nil
+ n.parent = nil
+
+}
diff --git a/weed/filesys/fscache_test.go b/weed/filesys/fscache_test.go
new file mode 100644
index 000000000..1152eb32e
--- /dev/null
+++ b/weed/filesys/fscache_test.go
@@ -0,0 +1,115 @@
+package filesys
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TestPathSplit(t *testing.T) {
+ parts := util.FullPath("/").Split()
+ if len(parts) != 0 {
+ t.Errorf("expecting an empty list, but getting %d", len(parts))
+ }
+
+ parts = util.FullPath("/readme.md").Split()
+ if len(parts) != 1 {
+ t.Errorf("expecting an empty list, but getting %d", len(parts))
+ }
+
+}
+
+func TestFsCache(t *testing.T) {
+
+ cache := newFsCache(nil)
+
+ x := cache.GetFsNode(util.FullPath("/y/x"))
+ if x != nil {
+ t.Errorf("wrong node!")
+ }
+
+ p := util.FullPath("/a/b/c")
+ cache.SetFsNode(p, &File{Name: "cc"})
+ tNode := cache.GetFsNode(p)
+ tFile := tNode.(*File)
+ if tFile.Name != "cc" {
+ t.Errorf("expecting a FsNode")
+ }
+
+ cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"})
+ cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"})
+ cache.SetFsNode(util.FullPath("/a/b/f"), &File{Name: "ff"})
+ cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"})
+ cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"})
+
+ b := cache.GetFsNode(util.FullPath("/a/b"))
+ if b != nil {
+ t.Errorf("unexpected node!")
+ }
+
+ a := cache.GetFsNode(util.FullPath("/a"))
+ if a == nil {
+ t.Errorf("missing node!")
+ }
+
+ cache.DeleteFsNode(util.FullPath("/a"))
+ if b != nil {
+ t.Errorf("unexpected node!")
+ }
+
+ a = cache.GetFsNode(util.FullPath("/a"))
+ if a != nil {
+ t.Errorf("wrong DeleteFsNode!")
+ }
+
+ z := cache.GetFsNode(util.FullPath("/z"))
+ if z == nil {
+ t.Errorf("missing node!")
+ }
+
+ y := cache.GetFsNode(util.FullPath("/x/y"))
+ if y != nil {
+ t.Errorf("wrong node!")
+ }
+
+}
+
+func TestFsCacheMove(t *testing.T) {
+
+ cache := newFsCache(nil)
+
+ cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"})
+ cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"})
+ cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"})
+ cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"})
+
+ cache.Move(util.FullPath("/a/b"), util.FullPath("/z/x"))
+
+ d := cache.GetFsNode(util.FullPath("/z/x/d"))
+ if d == nil {
+ t.Errorf("unexpected nil node!")
+ }
+ if d.(*File).Name != "dd" {
+ t.Errorf("unexpected non dd node!")
+ }
+
+}
+
+func TestFsCacheMove2(t *testing.T) {
+
+ cache := newFsCache(nil)
+
+ cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"})
+ cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"})
+
+ cache.Move(util.FullPath("/a/b/d"), util.FullPath("/a/b/e"))
+
+ d := cache.GetFsNode(util.FullPath("/a/b/e"))
+ if d == nil {
+ t.Errorf("unexpected nil node!")
+ }
+ if d.(*File).Name != "e" {
+ t.Errorf("unexpected node!")
+ }
+
+}
diff --git a/weed/filesys/meta_cache/id_mapper.go b/weed/filesys/meta_cache/id_mapper.go
new file mode 100644
index 000000000..4a2179f31
--- /dev/null
+++ b/weed/filesys/meta_cache/id_mapper.go
@@ -0,0 +1,101 @@
+package meta_cache
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type UidGidMapper struct {
+ uidMapper *IdMapper
+ gidMapper *IdMapper
+}
+
+type IdMapper struct {
+ localToFiler map[uint32]uint32
+ filerToLocal map[uint32]uint32
+}
+
+// UidGidMapper translates local uid/gid to filer uid/gid
+// The local storage always persists the same as the filer.
+// The local->filer translation happens when updating the filer first and later saving to meta_cache.
+// And filer->local happens when reading from the meta_cache.
+func NewUidGidMapper(uidPairsStr, gidPairStr string) (*UidGidMapper, error) {
+ uidMapper, err := newIdMapper(uidPairsStr)
+ if err != nil {
+ return nil, err
+ }
+ gidMapper, err := newIdMapper(gidPairStr)
+ if err != nil {
+ return nil, err
+ }
+
+ return &UidGidMapper{
+ uidMapper: uidMapper,
+ gidMapper: gidMapper,
+ }, nil
+}
+
+func (m *UidGidMapper) LocalToFiler(uid, gid uint32) (uint32, uint32) {
+ return m.uidMapper.LocalToFiler(uid), m.gidMapper.LocalToFiler(gid)
+}
+func (m *UidGidMapper) FilerToLocal(uid, gid uint32) (uint32, uint32) {
+ return m.uidMapper.FilerToLocal(uid), m.gidMapper.FilerToLocal(gid)
+}
+
+func (m *IdMapper) LocalToFiler(id uint32) uint32 {
+ value, found := m.localToFiler[id]
+ if found {
+ return value
+ }
+ return id
+}
+func (m *IdMapper) FilerToLocal(id uint32) uint32 {
+ value, found := m.filerToLocal[id]
+ if found {
+ return value
+ }
+ return id
+}
+
+func newIdMapper(pairsStr string) (*IdMapper, error) {
+
+ localToFiler, filerToLocal, err := parseUint32Pairs(pairsStr)
+ if err != nil {
+ return nil, err
+ }
+
+ return &IdMapper{
+ localToFiler: localToFiler,
+ filerToLocal: filerToLocal,
+ }, nil
+
+}
+
+func parseUint32Pairs(pairsStr string) (localToFiler, filerToLocal map[uint32]uint32, err error) {
+
+ if pairsStr == "" {
+ return
+ }
+
+ localToFiler = make(map[uint32]uint32)
+ filerToLocal = make(map[uint32]uint32)
+ for _, pairStr := range strings.Split(pairsStr, ",") {
+ pair := strings.Split(pairStr, ":")
+ localUidStr, filerUidStr := pair[0], pair[1]
+ localUid, localUidErr := strconv.Atoi(localUidStr)
+ if localUidErr != nil {
+ err = fmt.Errorf("failed to parse local %s: %v", localUidStr, localUidErr)
+ return
+ }
+ filerUid, filerUidErr := strconv.Atoi(filerUidStr)
+ if filerUidErr != nil {
+ err = fmt.Errorf("failed to parse remote %s: %v", filerUidStr, filerUidErr)
+ return
+ }
+ localToFiler[uint32(localUid)] = uint32(filerUid)
+ filerToLocal[uint32(filerUid)] = uint32(localUid)
+ }
+
+ return
+}
diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go
index edf329143..bb81d6d27 100644
--- a/weed/filesys/meta_cache/meta_cache.go
+++ b/weed/filesys/meta_cache/meta_cache.go
@@ -5,10 +5,9 @@ import (
"os"
"sync"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/bounded_tree"
)
@@ -17,19 +16,21 @@ import (
// e.g. fill fileId field for chunks
type MetaCache struct {
- actualStore filer2.FilerStore
+ localStore filer.VirtualFilerStore
sync.RWMutex
visitedBoundary *bounded_tree.BoundedTree
+ uidGidMapper *UidGidMapper
}
-func NewMetaCache(dbFolder string) *MetaCache {
+func NewMetaCache(dbFolder string, uidGidMapper *UidGidMapper) *MetaCache {
return &MetaCache{
- actualStore: openMetaStore(dbFolder),
+ localStore: openMetaStore(dbFolder),
visitedBoundary: bounded_tree.NewBoundedTree(),
+ uidGidMapper: uidGidMapper,
}
}
-func openMetaStore(dbFolder string) filer2.FilerStore {
+func openMetaStore(dbFolder string) filer.VirtualFilerStore {
os.RemoveAll(dbFolder)
os.MkdirAll(dbFolder, 0755)
@@ -43,26 +44,34 @@ func openMetaStore(dbFolder string) filer2.FilerStore {
glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
}
- return store
+ return filer.NewFilerStoreWrapper(store)
}
-func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer2.Entry) error {
+func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer.Entry) error {
mc.Lock()
defer mc.Unlock()
- filer_pb.BeforeEntrySerialization(entry.Chunks)
- return mc.actualStore.InsertEntry(ctx, entry)
+ return mc.doInsertEntry(ctx, entry)
}
-func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPath, newEntry *filer2.Entry) error {
+func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) error {
+ return mc.localStore.InsertEntry(ctx, entry)
+}
+
+func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error {
mc.Lock()
defer mc.Unlock()
oldDir, _ := oldPath.DirAndName()
if mc.visitedBoundary.HasVisited(util.FullPath(oldDir)) {
if oldPath != "" {
- if err := mc.actualStore.DeleteEntry(ctx, oldPath); err != nil {
- return err
+ if newEntry != nil && oldPath == newEntry.FullPath {
+ // skip the unnecessary deletion
+ // leave the update to the following InsertEntry operation
+ } else {
+ if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil {
+ return err
+ }
}
}
} else {
@@ -72,7 +81,7 @@ func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPat
if newEntry != nil {
newDir, _ := newEntry.DirAndName()
if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) {
- if err := mc.actualStore.InsertEntry(ctx, newEntry); err != nil {
+ if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil {
return err
}
}
@@ -80,40 +89,39 @@ func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPat
return nil
}
-func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer2.Entry) error {
+func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer.Entry) error {
mc.Lock()
defer mc.Unlock()
- filer_pb.BeforeEntrySerialization(entry.Chunks)
- return mc.actualStore.UpdateEntry(ctx, entry)
+ return mc.localStore.UpdateEntry(ctx, entry)
}
-func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer2.Entry, err error) {
+func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) {
mc.RLock()
defer mc.RUnlock()
- entry, err = mc.actualStore.FindEntry(ctx, fp)
+ entry, err = mc.localStore.FindEntry(ctx, fp)
if err != nil {
return nil, err
}
- filer_pb.AfterEntryDeserialization(entry.Chunks)
+ mc.mapIdFromFilerToLocal(entry)
return
}
func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
mc.Lock()
defer mc.Unlock()
- return mc.actualStore.DeleteEntry(ctx, fp)
+ return mc.localStore.DeleteEntry(ctx, fp)
}
-func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer2.Entry, error) {
+func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer.Entry, error) {
mc.RLock()
defer mc.RUnlock()
- entries, err := mc.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
+ entries, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
if err != nil {
return nil, err
}
for _, entry := range entries {
- filer_pb.AfterEntryDeserialization(entry.Chunks)
+ mc.mapIdFromFilerToLocal(entry)
}
return entries, err
}
@@ -121,5 +129,9 @@ func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.Full
func (mc *MetaCache) Shutdown() {
mc.Lock()
defer mc.Unlock()
- mc.actualStore.Shutdown()
+ mc.localStore.Shutdown()
+}
+
+func (mc *MetaCache) mapIdFromFilerToLocal(entry *filer.Entry) {
+ entry.Attr.Uid, entry.Attr.Gid = mc.uidGidMapper.FilerToLocal(entry.Attr.Uid, entry.Attr.Gid)
}
diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go
index e119ebff5..455a8772c 100644
--- a/weed/filesys/meta_cache/meta_cache_init.go
+++ b/weed/filesys/meta_cache/meta_cache_init.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -14,11 +14,11 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) {
- glog.V(2).Infof("ReadDirAllEntries %s ...", path)
+ glog.V(4).Infof("ReadDirAllEntries %s ...", path)
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
- entry := filer2.FromPbEntry(string(dirPath), pbEntry)
- if err := mc.InsertEntry(context.Background(), entry); err != nil {
+ entry := filer.FromPbEntry(string(dirPath), pbEntry)
+ if err := mc.doInsertEntry(context.Background(), entry); err != nil {
glog.V(0).Infof("read %s: %v", entry.FullPath, err)
return err
}
diff --git a/weed/filesys/meta_cache/meta_cache_subscribe.go b/weed/filesys/meta_cache/meta_cache_subscribe.go
index ca18411e0..9b72cadcf 100644
--- a/weed/filesys/meta_cache/meta_cache_subscribe.go
+++ b/weed/filesys/meta_cache/meta_cache_subscribe.go
@@ -6,18 +6,25 @@ import (
"io"
"time"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
-func SubscribeMetaEvents(mc *MetaCache, client filer_pb.FilerClient, dir string, lastTsNs int64) error {
+func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.FilerClient, dir string, lastTsNs int64) error {
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
message := resp.EventNotification
+
+ for _, sig := range message.Signatures {
+ if sig == selfSignature && selfSignature != 0 {
+ return nil
+ }
+ }
+
var oldPath util.FullPath
- var newEntry *filer2.Entry
+ var newEntry *filer.Entry
if message.OldEntry != nil {
oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name)
glog.V(4).Infof("deleting %v", oldPath)
@@ -30,17 +37,20 @@ func SubscribeMetaEvents(mc *MetaCache, client filer_pb.FilerClient, dir string,
}
key := util.NewFullPath(dir, message.NewEntry.Name)
glog.V(4).Infof("creating %v", key)
- newEntry = filer2.FromPbEntry(dir, message.NewEntry)
+ newEntry = filer.FromPbEntry(dir, message.NewEntry)
}
- return mc.AtomicUpdateEntry(context.Background(), oldPath, newEntry)
+ return mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
}
for {
err := client.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- stream, err := client.SubscribeMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "mount",
PathPrefix: dir,
SinceNs: lastTsNs,
+ Signature: selfSignature,
})
if err != nil {
return fmt.Errorf("subscribe: %v", err)
@@ -63,7 +73,7 @@ func SubscribeMetaEvents(mc *MetaCache, client filer_pb.FilerClient, dir string,
})
if err != nil {
glog.Errorf("subscribing filer meta change: %v", err)
- time.Sleep(time.Second)
}
+ time.Sleep(time.Second)
}
}
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index 68ad987be..37e9c105a 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -37,15 +37,12 @@ type Option struct {
EntryCacheTtl time.Duration
Umask os.FileMode
- MountUid uint32
- MountGid uint32
- MountMode os.FileMode
MountCtime time.Time
MountMtime time.Time
OutsideContainerClusterMode bool // whether the mount runs outside SeaweedFS containers
Cipher bool // whether encrypt data on volume server
-
+ UidGidMapper *meta_cache.UidGidMapper
}
var _ = fs.FS(&WFS{})
@@ -63,9 +60,11 @@ type WFS struct {
stats statsCache
root fs.Node
+ fsNodeCache *FsCache
- chunkCache *chunk_cache.ChunkCache
+ chunkCache *chunk_cache.TieredChunkCache
metaCache *meta_cache.MetaCache
+ signature int32
}
type statsCache struct {
filer_pb.StatisticsResponse
@@ -81,25 +80,25 @@ func NewSeaweedFileSystem(option *Option) *WFS {
return make([]byte, option.ChunkSizeLimit)
},
},
+ signature: util.RandomInt32(),
}
- cacheUniqueId := util.Md5([]byte(option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:4]
+ cacheUniqueId := util.Md5String([]byte(option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:4]
cacheDir := path.Join(option.CacheDir, cacheUniqueId)
if option.CacheSizeMB > 0 {
- os.MkdirAll(cacheDir, 0755)
- wfs.chunkCache = chunk_cache.NewChunkCache(256, cacheDir, option.CacheSizeMB)
- grace.OnInterrupt(func() {
- wfs.chunkCache.Shutdown()
- })
+ os.MkdirAll(cacheDir, os.FileMode(0777)&^option.Umask)
+ wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
}
- wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"))
+ wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), option.UidGidMapper)
startTime := time.Now()
- go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
+ go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
grace.OnInterrupt(func() {
wfs.metaCache.Shutdown()
})
- wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs}
+ entry, _ := filer_pb.GetEntry(wfs, util.FullPath(wfs.option.FilerMountRootPath))
+ wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs, entry: entry}
+ wfs.fsNodeCache = newFsCache(wfs.root)
return wfs
}
@@ -111,7 +110,7 @@ func (wfs *WFS) Root() (fs.Node, error) {
func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
fullpath := file.fullpath()
- glog.V(4).Infof("%s AcquireHandle uid=%d gid=%d", fullpath, uid, gid)
+ glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
@@ -119,13 +118,16 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand
inodeId := file.fullpath().AsInode()
existingHandle, found := wfs.handles[inodeId]
if found && existingHandle != nil {
+ file.isOpen++
return existingHandle
}
fileHandle = newFileHandle(file, uid, gid)
+ file.maybeLoadEntry(context.Background())
+ file.isOpen++
+
wfs.handles[inodeId] = fileHandle
fileHandle.handle = inodeId
- glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle)
return
}
@@ -202,3 +204,16 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
return nil
}
+
+func (wfs *WFS) mapPbIdFromFilerToLocal(entry *filer_pb.Entry) {
+ if entry.Attributes == nil {
+ return
+ }
+ entry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.FilerToLocal(entry.Attributes.Uid, entry.Attributes.Gid)
+}
+func (wfs *WFS) mapPbIdFromLocalToFiler(entry *filer_pb.Entry) {
+ if entry.Attributes == nil {
+ return
+ }
+ entry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.LocalToFiler(entry.Attributes.Uid, entry.Attributes.Gid)
+}
diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go
index bf21b1808..9791c8630 100644
--- a/weed/filesys/wfs_deletion.go
+++ b/weed/filesys/wfs_deletion.go
@@ -5,7 +5,7 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -18,6 +18,17 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
var fileIds []string
for _, chunk := range chunks {
+ if !chunk.IsChunkManifest {
+ fileIds = append(fileIds, chunk.GetFileIdString())
+ continue
+ }
+ dataChunks, manifestResolveErr := filer.ResolveOneChunkManifest(filer.LookupFn(wfs), chunk)
+ if manifestResolveErr != nil {
+ glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
+ }
+ for _, dChunk := range dataChunks {
+ fileIds = append(fileIds, dChunk.GetFileIdString())
+ }
fileIds = append(fileIds, chunk.GetFileIdString())
}
@@ -31,14 +42,14 @@ func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.Se
var vids []string
for _, fileId := range fileIds {
- vids = append(vids, filer2.VolumeId(fileId))
+ vids = append(vids, filer.VolumeId(fileId))
}
lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
m := make(map[string]operation.LookupResult)
- glog.V(4).Infof("remove file lookup volume id locations: %v", vids)
+ glog.V(4).Infof("deleteFileIds lookup volume id locations: %v", vids)
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: vids,
})
diff --git a/weed/filesys/wfs_write.go b/weed/filesys/wfs_write.go
index 786d0b42a..fec33e4ab 100644
--- a/weed/filesys/wfs_write.go
+++ b/weed/filesys/wfs_write.go
@@ -5,14 +5,14 @@ import (
"fmt"
"io"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
)
-func (wfs *WFS) saveDataAsChunk(dir string) filer2.SaveDataAsChunkFunctionType {
+func (wfs *WFS) saveDataAsChunk(dir string) filer.SaveDataAsChunkFunctionType {
return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) {
var fileId, host string
diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go
index 091a70fa3..92e43b675 100644
--- a/weed/filesys/xattr.go
+++ b/weed/filesys/xattr.go
@@ -119,5 +119,5 @@ func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err err
if cacheErr == filer_pb.ErrNotFound {
return nil, fuse.ENOENT
}
- return cachedEntry.ToProtoEntry(), nil
+ return cachedEntry.ToProtoEntry(), cacheErr
}