aboutsummaryrefslogtreecommitdiff
path: root/weed/filesys/filehandle.go
diff options
context:
space:
mode:
authorHongyanShen <763987993@qq.com>2020-03-11 12:55:24 +0800
committerGitHub <noreply@github.com>2020-03-11 12:55:24 +0800
commit03529fc0c29072f6f26e11ffbd7229cf92dc71ce (patch)
treeed8833386a712c850dcef0815509774681a6ab56 /weed/filesys/filehandle.go
parent0fca1ae776783b37481549df40f477b7d9248d3c (diff)
parent60f5f05c78a2918d5219c925cea5847759281a2c (diff)
downloadseaweedfs-03529fc0c29072f6f26e11ffbd7229cf92dc71ce.tar.xz
seaweedfs-03529fc0c29072f6f26e11ffbd7229cf92dc71ce.zip
Merge pull request #1 from chrislusf/master
sync
Diffstat (limited to 'weed/filesys/filehandle.go')
-rw-r--r--weed/filesys/filehandle.go112
1 files changed, 77 insertions, 35 deletions
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 1f4754dd1..100c9eba0 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -7,10 +7,11 @@ import (
"path"
"time"
+ "github.com/gabriel-vasile/mimetype"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/gabriel-vasile/mimetype"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
@@ -50,29 +51,51 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size))
+ buff := make([]byte, req.Size)
+
+ totalRead, err := fh.readFromChunks(buff, req.Offset)
+ if err == nil {
+ dirtyOffset, dirtySize := fh.readFromDirtyPages(buff, req.Offset)
+ if totalRead+req.Offset < dirtyOffset+int64(dirtySize) {
+ totalRead = dirtyOffset + int64(dirtySize) - req.Offset
+ }
+ }
+
+ resp.Data = buff[:totalRead]
+
+ if err != nil {
+ glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
+ return fuse.EIO
+ }
+
+ return err
+}
+
+func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (offset int64, size int) {
+ return fh.dirtyPages.ReadDirtyData(buff, startOffset)
+}
+
+func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
+
// this value should come from the filer instead of the old f
if len(fh.f.entry.Chunks) == 0 {
- glog.V(1).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name)
- return nil
+ glog.V(1).Infof("empty fh %v", fh.f.fullpath())
+ return 0, nil
}
- buff := make([]byte, req.Size)
-
if fh.f.entryViewCache == nil {
fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks)
}
- chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size)
-
- totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, req.Offset)
+ chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, offset, len(buff))
- resp.Data = buff[:totalRead]
+ totalRead, err := filer2.ReadIntoBuffer(fh.f.wfs, fh.f.fullpath(), buff, chunkViews, offset)
if err != nil {
glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
}
- return err
+ return totalRead, err
}
// Write to the file handle
@@ -80,30 +103,32 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
// write the request to volume servers
- glog.V(4).Infof("%+v/%v write fh %d: [%d,%d)", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)))
+ fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize)))
+ // glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)))
- chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data)
+ chunks, err := fh.dirtyPages.AddPage(req.Offset, req.Data)
if err != nil {
- glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err)
- return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err)
+ glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err)
+ return fuse.EIO
}
resp.Size = len(req.Data)
if req.Offset == 0 {
// detect mime type
- var possibleExt string
- fh.contentType, possibleExt = mimetype.Detect(req.Data)
- if ext := path.Ext(fh.f.Name); ext != possibleExt {
+ detectedMIME := mimetype.Detect(req.Data)
+ fh.contentType = detectedMIME.String()
+ if ext := path.Ext(fh.f.Name); ext != detectedMIME.Extension() {
fh.contentType = mime.TypeByExtension(ext)
}
fh.dirtyMetadata = true
}
- fh.f.addChunks(chunks)
-
if len(chunks) > 0 {
+
+ fh.f.addChunks(chunks)
+
fh.dirtyMetadata = true
}
@@ -114,11 +139,12 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle)
- fh.dirtyPages.releaseResource()
-
- fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
+ fh.f.isOpen--
- fh.f.isOpen = false
+ if fh.f.isOpen <= 0 {
+ fh.dirtyPages.releaseResource()
+ fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
+ }
return nil
}
@@ -128,19 +154,22 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
// send the data to the OS
glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req)
- chunk, err := fh.dirtyPages.FlushToStorage(ctx)
+ chunks, err := fh.dirtyPages.FlushToStorage()
if err != nil {
- glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err)
- return fmt.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err)
+ glog.Errorf("flush %s: %v", fh.f.fullpath(), err)
+ return fuse.EIO
}
- fh.f.addChunk(chunk)
+ fh.f.addChunks(chunks)
+ if len(chunks) > 0 {
+ fh.dirtyMetadata = true
+ }
if !fh.dirtyMetadata {
return nil
}
- return fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ err = fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
if fh.f.entry.Attributes != nil {
fh.f.entry.Attributes.Mime = fh.contentType
@@ -149,6 +178,8 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
fh.f.entry.Attributes.Mtime = time.Now().Unix()
fh.f.entry.Attributes.Crtime = time.Now().Unix()
fh.f.entry.Attributes.FileMode = uint32(0777 &^ fh.f.wfs.option.Umask)
+ fh.f.entry.Attributes.Collection = fh.dirtyPages.collection
+ fh.f.entry.Attributes.Replication = fh.dirtyPages.replication
}
request := &filer_pb.CreateEntryRequest{
@@ -156,25 +187,36 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
Entry: fh.f.entry,
}
- glog.V(3).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks))
+ glog.V(3).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
for i, chunk := range fh.f.entry.Chunks {
- glog.V(3).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
+ glog.V(3).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
}
chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks)
fh.f.entry.Chunks = chunks
// fh.f.entryViewCache = nil
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.Errorf("update fh: %v", err)
- return fmt.Errorf("update fh: %v", err)
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
+ return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
}
- fh.f.wfs.deleteFileChunks(ctx, garbages)
+ fh.f.wfs.deleteFileChunks(garbages)
for i, chunk := range garbages {
- glog.V(3).Infof("garbage %s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
+ glog.V(3).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
}
return nil
})
+
+ if err == nil {
+ fh.dirtyMetadata = false
+ }
+
+ if err != nil {
+ glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
+ return fuse.EIO
+ }
+
+ return nil
}