aboutsummaryrefslogtreecommitdiff
path: root/weed/filesys/filehandle.go
diff options
context:
space:
mode:
Diffstat (limited to 'weed/filesys/filehandle.go')
-rw-r--r--weed/filesys/filehandle.go33
1 files changed, 22 insertions, 11 deletions
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 738423b6a..86ea20806 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -28,22 +28,20 @@ type FileHandle struct {
sync.Mutex
f *File
- RequestId fuse.RequestID // unique ID for request
- NodeId fuse.NodeID // file or directory the request is about
- Uid uint32 // user ID of process making request
- Gid uint32 // group ID of process making request
- writeOnly bool
+ NodeId fuse.NodeID // file or directory the request is about
+ Uid uint32 // user ID of process making request
+ Gid uint32 // group ID of process making request
isDeleted bool
}
func newFileHandle(file *File, uid, gid uint32) *FileHandle {
fh := &FileHandle{
- f: file,
- // dirtyPages: newContinuousDirtyPages(file, writeOnly),
- dirtyPages: newPageWriter(file, file.wfs.option.ChunkSizeLimit),
- Uid: uid,
- Gid: gid,
+ f: file,
+ Uid: uid,
+ Gid: gid,
}
+ // dirtyPages: newContinuousDirtyPages(file, writeOnly),
+ fh.dirtyPages = newPageWriter(fh, file.wfs.option.ChunkSizeLimit)
entry := fh.f.getEntry()
if entry != nil {
entry.Attributes.FileSize = filer.FileSize(entry)
@@ -77,6 +75,8 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
buff = make([]byte, req.Size)
}
+ fh.lockForRead(req.Offset, len(buff))
+ defer fh.unlockForRead(req.Offset, len(buff))
totalRead, err := fh.readFromChunks(buff, req.Offset)
if err == nil || err == io.EOF {
maxStop := fh.readFromDirtyPages(buff, req.Offset)
@@ -103,6 +103,13 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
return err
}
+func (fh *FileHandle) lockForRead(startOffset int64, size int) {
+ fh.dirtyPages.LockForRead(startOffset, startOffset+int64(size))
+}
+func (fh *FileHandle) unlockForRead(startOffset int64, size int) {
+ fh.dirtyPages.UnlockForRead(startOffset, startOffset+int64(size))
+}
+
func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (maxStop int64) {
maxStop = fh.dirtyPages.ReadDirtyDataAt(buff, startOffset)
return
@@ -151,6 +158,10 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
reader := fh.reader
if reader == nil {
chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, math.MaxInt64)
+ glog.V(4).Infof("file handle read %s [%d,%d) from %+v", fileFullPath, offset, offset+int64(len(buff)), chunkViews)
+ for _, chunkView := range chunkViews {
+ glog.V(4).Infof(" read %s [%d,%d) from chunk %+v", fileFullPath, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.FileId)
+ }
reader = filer.NewChunkReaderAtFromClient(fh.f.wfs.LookupFn(), chunkViews, fh.f.wfs.chunkCache, fileSize)
}
fh.reader = reader
@@ -161,7 +172,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
glog.Errorf("file handle read %s: %v", fileFullPath, err)
}
- // glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err)
+ glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err)
return int64(totalRead), err
}