aboutsummaryrefslogtreecommitdiff
path: root/weed/filesys
diff options
context:
space:
mode:
Diffstat (limited to 'weed/filesys')
-rw-r--r--weed/filesys/dir.go29
-rw-r--r--weed/filesys/dir_link.go13
-rw-r--r--weed/filesys/dirty_page.go28
-rw-r--r--weed/filesys/file.go125
-rw-r--r--weed/filesys/filehandle.go15
-rw-r--r--weed/filesys/meta_cache/meta_cache_init.go18
-rw-r--r--weed/filesys/wfs.go10
-rw-r--r--weed/filesys/wfs_filer_client.go11
8 files changed, 167 insertions, 82 deletions
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index ae2ae3418..a8481a435 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -27,6 +27,7 @@ type Dir struct {
var _ = fs.Node(&Dir{})
var _ = fs.NodeCreater(&Dir{})
+var _ = fs.NodeMknoder(&Dir{})
var _ = fs.NodeMkdirer(&Dir{})
var _ = fs.NodeFsyncer(&Dir{})
var _ = fs.NodeRequestLookuper(&Dir{})
@@ -179,6 +180,20 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
}
+func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
+ if req.Mode&os.ModeNamedPipe != 0 {
+ glog.V(1).Infof("mknod named pipe %s", req.String())
+ return nil, fuse.ENOSYS
+ }
+ if req.Mode&req.Mode&os.ModeSocket != 0 {
+ glog.V(1).Infof("mknod socket %s", req.String())
+ return nil, fuse.ENOSYS
+ }
+ // not going to support mknod for normal files either
+ glog.V(1).Infof("mknod %s", req.String())
+ return nil, fuse.ENOSYS
+}
+
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name)
@@ -347,8 +362,22 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
// then, delete meta cache and fsNode cache
dir.wfs.metaCache.DeleteEntry(context.Background(), filePath)
+
+ // clear entry inside the file
+ fsNode := dir.wfs.fsNodeCache.GetFsNode(filePath)
+ if fsNode != nil {
+ if file, ok := fsNode.(*File); ok {
+ file.clearEntry()
+ }
+ }
dir.wfs.fsNodeCache.DeleteFsNode(filePath)
+ // remove current file handle if any
+ dir.wfs.handlesLock.Lock()
+ defer dir.wfs.handlesLock.Unlock()
+ inodeId := util.NewFullPath(dir.FullPath(), req.Name).AsInode()
+ delete(dir.wfs.handles, inodeId)
+
// delete the chunks last
if isDeleteData {
dir.wfs.deleteFileChunks(entry.Chunks)
diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go
index f6bc41b56..ba3280f03 100644
--- a/weed/filesys/dir_link.go
+++ b/weed/filesys/dir_link.go
@@ -31,7 +31,7 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
- if err := oldFile.maybeLoadEntry(ctx); err != nil {
+ if _, err := oldFile.maybeLoadEntry(ctx); err != nil {
return nil, err
}
@@ -86,7 +86,7 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
// create new file node
newNode := dir.newFile(req.NewName, request.Entry)
newFile := newNode.(*File)
- if err := newFile.maybeLoadEntry(ctx); err != nil {
+ if _, err := newFile.maybeLoadEntry(ctx); err != nil {
return nil, err
}
@@ -138,16 +138,17 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return "", err
}
- if os.FileMode(file.entry.Attributes.FileMode)&os.ModeSymlink == 0 {
+ if os.FileMode(entry.Attributes.FileMode)&os.ModeSymlink == 0 {
return "", fuse.Errno(syscall.EINVAL)
}
- glog.V(4).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, file.entry.Attributes.SymlinkTarget)
+ glog.V(4).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget)
- return file.entry.Attributes.SymlinkTarget, nil
+ return entry.Attributes.SymlinkTarget, nil
}
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index dd0c48796..11089186f 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -9,22 +9,16 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
-)
-
-var (
- concurrentWriterLimit = runtime.NumCPU()
- concurrentWriters = util.NewLimitedConcurrentExecutor(4 * concurrentWriterLimit)
)
type ContinuousDirtyPages struct {
intervals *ContinuousIntervals
f *File
writeWaitGroup sync.WaitGroup
+ chunkAddLock sync.Mutex
chunkSaveErrChan chan error
chunkSaveErrChanClosed bool
lastErr error
- lock sync.Mutex
collection string
replication string
}
@@ -33,7 +27,7 @@ func newDirtyPages(file *File) *ContinuousDirtyPages {
dirtyPages := &ContinuousDirtyPages{
intervals: &ContinuousIntervals{},
f: file,
- chunkSaveErrChan: make(chan error, concurrentWriterLimit),
+ chunkSaveErrChan: make(chan error, runtime.NumCPU()),
}
go func() {
for t := range dirtyPages.chunkSaveErrChan {
@@ -100,14 +94,18 @@ func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (hasSavedD
func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) {
+ errChanSize := pages.f.wfs.option.ConcurrentWriters
+ if errChanSize == 0 {
+ errChanSize = runtime.NumCPU()
+ }
if pages.chunkSaveErrChanClosed {
- pages.chunkSaveErrChan = make(chan error, concurrentWriterLimit)
+ pages.chunkSaveErrChan = make(chan error, errChanSize)
pages.chunkSaveErrChanClosed = false
}
mtime := time.Now().UnixNano()
pages.writeWaitGroup.Add(1)
- go func() {
+ writer := func() {
defer pages.writeWaitGroup.Done()
reader = io.LimitReader(reader, size)
@@ -119,9 +117,17 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
}
chunk.Mtime = mtime
pages.collection, pages.replication = collection, replication
+ pages.chunkAddLock.Lock()
+ defer pages.chunkAddLock.Unlock()
pages.f.addChunks([]*filer_pb.FileChunk{chunk})
glog.V(3).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
- }()
+ }
+
+ if pages.f.wfs.concurrentWriters != nil {
+ pages.f.wfs.concurrentWriters.Execute(writer)
+ } else {
+ go writer()
+ }
}
func max(x, y int64) int64 {
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index 7aa1016d7..9e1342370 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -43,32 +43,33 @@ func (file *File) fullpath() util.FullPath {
return util.NewFullPath(file.dir.FullPath(), file.Name)
}
-func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
+func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
- if file.isOpen <= 0 {
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry := file.entry
+ if file.isOpen <= 0 || entry == nil {
+ if entry, err = file.maybeLoadEntry(ctx); err != nil {
return err
}
}
attr.Inode = file.fullpath().AsInode()
attr.Valid = time.Second
- attr.Mode = os.FileMode(file.entry.Attributes.FileMode)
- attr.Size = filer.FileSize(file.entry)
+ attr.Mode = os.FileMode(entry.Attributes.FileMode)
+ attr.Size = filer.FileSize(entry)
if file.isOpen > 0 {
- attr.Size = file.entry.Attributes.FileSize
+ attr.Size = entry.Attributes.FileSize
glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
}
- attr.Crtime = time.Unix(file.entry.Attributes.Crtime, 0)
- attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0)
- attr.Gid = file.entry.Attributes.Gid
- attr.Uid = file.entry.Attributes.Uid
+ attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
+ attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
+ attr.Gid = entry.Attributes.Gid
+ attr.Uid = entry.Attributes.Uid
attr.Blocks = attr.Size/blockSize + 1
attr.BlockSize = uint32(file.wfs.option.ChunkSizeLimit)
- if file.entry.HardLinkCounter > 0 {
- attr.Nlink = uint32(file.entry.HardLinkCounter)
+ if entry.HardLinkCounter > 0 {
+ attr.Nlink = uint32(entry.HardLinkCounter)
}
return nil
@@ -79,11 +80,12 @@ func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp
glog.V(4).Infof("file Getxattr %s", file.fullpath())
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
- return getxattr(file.entry, req, resp)
+ return getxattr(entry, req, resp)
}
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
@@ -104,7 +106,8 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req)
- if err := file.maybeLoadEntry(ctx); err != nil {
+ _, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
if file.isOpen > 0 {
@@ -141,7 +144,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
}
}
file.entry.Chunks = chunks
- file.entryViewCache = nil
+ file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), chunks)
file.reader = nil
file.wfs.deleteFileChunks(truncatedChunks)
}
@@ -186,7 +189,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
return nil
}
- return file.saveEntry()
+ return file.saveEntry(file.entry)
}
@@ -194,15 +197,16 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
- if err := setxattr(file.entry, req); err != nil {
+ if err := setxattr(entry, req); err != nil {
return err
}
- return file.saveEntry()
+ return file.saveEntry(entry)
}
@@ -210,15 +214,16 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest)
glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
- if err := removexattr(file.entry, req); err != nil {
+ if err := removexattr(entry, req); err != nil {
return err
}
- return file.saveEntry()
+ return file.saveEntry(entry)
}
@@ -226,11 +231,12 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res
glog.V(4).Infof("file Listxattr %s", file.fullpath())
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
- if err := listxattr(file.entry, req, resp); err != nil {
+ if err := listxattr(entry, req, resp); err != nil {
return err
}
@@ -252,30 +258,61 @@ func (file *File) Forget() {
file.wfs.fsNodeCache.DeleteFsNode(t)
}
-func (file *File) maybeLoadEntry(ctx context.Context) error {
- if (file.entry != nil && len(file.entry.HardLinkId) != 0) || file.isOpen > 0 {
- return nil
+func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, err error) {
+ entry = file.entry
+ if file.isOpen > 0 {
+ return entry, nil
}
- entry, err := file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
+ if entry != nil {
+ if len(entry.HardLinkId) == 0 {
+ // only always reload hard link
+ return entry, nil
+ }
+ }
+ entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
if err != nil {
glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
- return err
+ return entry, err
}
if entry != nil {
file.setEntry(entry)
+ } else {
+ glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
}
- return nil
+ return entry, nil
+}
+
+func lessThan(a, b *filer_pb.FileChunk) bool {
+ if a.Mtime == b.Mtime {
+ return a.Fid.FileKey < b.Fid.FileKey
+ }
+ return a.Mtime < b.Mtime
}
func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
- sort.Slice(chunks, func(i, j int) bool {
- if chunks[i].Mtime == chunks[j].Mtime {
- return chunks[i].Fid.FileKey < chunks[j].Fid.FileKey
+ // find the earliest incoming chunk
+ newChunks := chunks
+ earliestChunk := newChunks[0]
+ for i:=1;i<len(newChunks);i++{
+ if lessThan(earliestChunk, newChunks[i]) {
+ earliestChunk = newChunks[i]
+ }
+ }
+
+ // pick out-of-order chunks from existing chunks
+ for _, chunk := range file.entry.Chunks {
+ if lessThan(earliestChunk, chunk) {
+ chunks = append(chunks, chunk)
}
- return chunks[i].Mtime < chunks[j].Mtime
+ }
+
+ // sort incoming chunks
+ sort.Slice(chunks, func(i, j int) bool {
+ return lessThan(chunks[i], chunks[j])
})
+ // add to entry view cache
for _, chunk := range chunks {
file.entryViewCache = filer.MergeIntoVisibles(file.entryViewCache, chunk)
}
@@ -284,24 +321,30 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
- file.entry.Chunks = append(file.entry.Chunks, chunks...)
+ file.entry.Chunks = append(file.entry.Chunks, newChunks...)
}
func (file *File) setEntry(entry *filer_pb.Entry) {
file.entry = entry
- file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), file.entry.Chunks)
+ file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), entry.Chunks)
+ file.reader = nil
+}
+
+func (file *File) clearEntry() {
+ file.entry = nil
+ file.entryViewCache = nil
file.reader = nil
}
-func (file *File) saveEntry() error {
+func (file *File) saveEntry(entry *filer_pb.Entry) error {
return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- file.wfs.mapPbIdFromLocalToFiler(file.entry)
- defer file.wfs.mapPbIdFromFilerToLocal(file.entry)
+ file.wfs.mapPbIdFromLocalToFiler(entry)
+ defer file.wfs.mapPbIdFromFilerToLocal(entry)
request := &filer_pb.UpdateEntryRequest{
Directory: file.dir.FullPath(),
- Entry: file.entry,
+ Entry: entry,
Signatures: []int32{file.wfs.signature},
}
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 54bde3494..54410a0ba 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -183,16 +183,18 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
}
if fh.f.isOpen == 0 {
+
if err := fh.doFlush(ctx, req.Header); err != nil {
glog.Errorf("Release doFlush %s: %v", fh.f.Name, err)
}
- fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
- }
- // stop the goroutine
- if !fh.dirtyPages.chunkSaveErrChanClosed {
- fh.dirtyPages.chunkSaveErrChanClosed = true
- close(fh.dirtyPages.chunkSaveErrChan)
+ // stop the goroutine
+ if !fh.dirtyPages.chunkSaveErrChanClosed {
+ fh.dirtyPages.chunkSaveErrChanClosed = true
+ close(fh.dirtyPages.chunkSaveErrChan)
+ }
+
+ fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
}
return nil
@@ -262,7 +264,6 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
}
fh.f.entry.Chunks = append(chunks, manifestChunks...)
- fh.f.entryViewCache = nil
fh.f.wfs.mapPbIdFromLocalToFiler(request.Entry)
defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry)
diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go
index f42d61230..4089cea28 100644
--- a/weed/filesys/meta_cache/meta_cache_init.go
+++ b/weed/filesys/meta_cache/meta_cache_init.go
@@ -3,8 +3,6 @@ package meta_cache
import (
"context"
"fmt"
- "strings"
- "time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -18,7 +16,7 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
glog.V(4).Infof("ReadDirAllEntries %s ...", path)
- for waitTime := time.Second; waitTime < filer.ReadWaitTime; waitTime += waitTime / 2 {
+ util.Retry("ReadDirAllEntries", func() error {
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
entry := filer.FromPbEntry(string(dirPath), pbEntry)
if err := mc.doInsertEntry(context.Background(), entry); err != nil {
@@ -30,17 +28,13 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
}
return nil
})
- if err == nil {
- break
- }
- if strings.Contains(err.Error(), "transport: ") {
- glog.V(0).Infof("ReadDirAllEntries %s: %v. Retry in %v", path, err, waitTime)
- time.Sleep(waitTime)
- continue
- }
+ return err
+ })
+
+ if err != nil {
err = fmt.Errorf("list %s: %v", dirPath, err)
- break
}
+
return
})
}
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index 759e21b15..cd14e8032 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -31,6 +31,7 @@ type Option struct {
Replication string
TtlSec int32
ChunkSizeLimit int64
+ ConcurrentWriters int
CacheDir string
CacheSizeMB int64
DataCenter string
@@ -68,6 +69,9 @@ type WFS struct {
chunkCache *chunk_cache.TieredChunkCache
metaCache *meta_cache.MetaCache
signature int32
+
+ // throttle writers
+ concurrentWriters *util.LimitedConcurrentExecutor
}
type statsCache struct {
filer_pb.StatisticsResponse
@@ -96,7 +100,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
fsNode := wfs.fsNodeCache.GetFsNode(filePath)
if fsNode != nil {
if file, ok := fsNode.(*File); ok {
- file.entry = nil
+ file.clearEntry()
}
}
})
@@ -110,6 +114,10 @@ func NewSeaweedFileSystem(option *Option) *WFS {
wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs, entry: entry}
wfs.fsNodeCache = newFsCache(wfs.root)
+ if wfs.option.ConcurrentWriters > 0 {
+ wfs.concurrentWriters = util.NewLimitedConcurrentExecutor(wfs.option.ConcurrentWriters)
+ }
+
return wfs
}
diff --git a/weed/filesys/wfs_filer_client.go b/weed/filesys/wfs_filer_client.go
index 096ee555f..dd76f5669 100644
--- a/weed/filesys/wfs_filer_client.go
+++ b/weed/filesys/wfs_filer_client.go
@@ -1,6 +1,7 @@
package filesys
import (
+ "github.com/chrislusf/seaweedfs/weed/util"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/pb"
@@ -11,10 +12,12 @@ var _ = filer_pb.FilerClient(&WFS{})
func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
- err := pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
- return fn(client)
- }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
+ err := util.Retry("filer grpc", func() error {
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
+ })
if err == nil {
return nil