aboutsummaryrefslogtreecommitdiff
path: root/weed/filesys
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2020-08-15 09:32:47 -0700
committerChris Lu <chris.lu@gmail.com>2020-08-15 09:32:47 -0700
commitc647deace16ec1a3f0c11d92dc5fa15ec30012e4 (patch)
tree7b13546507283b5566a069d03dc3371ccd2d7290 /weed/filesys
parentc03bb180eb5fc96e79324f0aa5ec7cd9b674f901 (diff)
downloadseaweedfs-c647deace16ec1a3f0c11d92dc5fa15ec30012e4.tar.xz
seaweedfs-c647deace16ec1a3f0c11d92dc5fa15ec30012e4.zip
file size support set file length
use Attr.FileSize and TotalChunkSize to determine file size
Diffstat (limited to 'weed/filesys')
-rw-r--r--weed/filesys/dirty_page.go18
-rw-r--r--weed/filesys/file.go27
-rw-r--r--weed/filesys/filehandle.go23
3 files changed, 44 insertions, 24 deletions
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 2af3e905a..8b7d92ffb 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -35,7 +35,7 @@ func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) (chunks []
pages.lock.Lock()
defer pages.lock.Unlock()
- glog.V(4).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data)))
+ glog.V(4).Infof("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
// this is more than what buffer can hold.
@@ -121,14 +121,16 @@ func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *fi
return nil, false, nil
}
+ fileSize := int64(pages.f.entry.Attributes.FileSize)
for {
- chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size())
+ chunkSize := min(maxList.Size(), fileSize-maxList.Offset())
+ chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), chunkSize)
if err == nil {
hasSavedData = true
- glog.V(4).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId)
+ glog.V(4).Infof("%s saveToStorage %s [%d,%d) of %d bytes", pages.f.fullpath(), chunk.FileId, maxList.Offset(), maxList.Offset()+chunkSize, fileSize)
return
} else {
- glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err)
+ glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+chunkSize, err)
time.Sleep(5 * time.Second)
}
}
@@ -139,6 +141,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
dir, _ := pages.f.fullpath().DirAndName()
+ reader = io.LimitReader(reader, size)
chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(dir)(reader, pages.f.Name, offset)
if err != nil {
return nil, err
@@ -149,6 +152,13 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
}
+func maxUint64(x, y uint64) uint64 {
+ if x > y {
+ return x
+ }
+ return y
+}
+
func max(x, y int64) int64 {
if x > y {
return x
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index dbfd7fd1a..83f6950bd 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -7,12 +7,13 @@ import (
"sort"
"time"
+ "github.com/seaweedfs/fuse"
+ "github.com/seaweedfs/fuse/fs"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/seaweedfs/fuse"
- "github.com/seaweedfs/fuse/fs"
)
const blockSize = 512
@@ -35,6 +36,7 @@ type File struct {
entryViewCache []filer2.VisibleInterval
isOpen int
reader io.ReaderAt
+ dirtyMetadata bool
}
func (file *File) fullpath() util.FullPath {
@@ -43,7 +45,7 @@ func (file *File) fullpath() util.FullPath {
func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
- glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
+ glog.V(5).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
if file.isOpen <= 0 {
if err := file.maybeLoadEntry(ctx); err != nil {
@@ -54,7 +56,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Inode = file.fullpath().AsInode()
attr.Valid = time.Second
attr.Mode = os.FileMode(file.entry.Attributes.FileMode)
- attr.Size = filer2.TotalSize(file.entry.Chunks)
+ attr.Size = filer2.FileSize(file.entry)
if file.isOpen > 0 {
attr.Size = file.entry.Attributes.FileSize
glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
@@ -107,22 +109,31 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
if req.Valid.Size() {
- glog.V(4).Infof("%v file setattr set size=%v", file.fullpath(), req.Size)
+ glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
if req.Size < filer2.TotalSize(file.entry.Chunks) {
// fmt.Printf("truncate %v \n", fullPath)
var chunks []*filer_pb.FileChunk
+ var truncatedChunks []*filer_pb.FileChunk
for _, chunk := range file.entry.Chunks {
int64Size := int64(chunk.Size)
if chunk.Offset+int64Size > int64(req.Size) {
+ // this chunk is truncated
int64Size = int64(req.Size) - chunk.Offset
- }
- if int64Size > 0 {
- chunks = append(chunks, chunk)
+ if int64Size > 0 {
+ chunks = append(chunks, chunk)
+ glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk, chunk.Size, int64Size)
+ chunk.Size = uint64(int64Size)
+ } else {
+ glog.V(4).Infof("truncated whole chunk %+v\n", chunk)
+ truncatedChunks = append(truncatedChunks, chunk)
+ }
}
}
+ file.wfs.deleteFileChunks(truncatedChunks)
file.entry.Chunks = chunks
file.entryViewCache = nil
file.reader = nil
+ file.dirtyMetadata = true
}
file.entry.Attributes.FileSize = req.Size
}
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 680500c75..42a0b2446 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -19,10 +19,9 @@ import (
type FileHandle struct {
// cache file has been written to
- dirtyPages *ContinuousDirtyPages
- contentType string
- dirtyMetadata bool
- handle uint64
+ dirtyPages *ContinuousDirtyPages
+ contentType string
+ handle uint64
f *File
RequestId fuse.RequestID // unique ID for request
@@ -40,7 +39,7 @@ func newFileHandle(file *File, uid, gid uint32) *FileHandle {
Gid: gid,
}
if fh.f.entry != nil {
- fh.f.entry.Attributes.FileSize = filer2.TotalSize(fh.f.entry.Chunks)
+ fh.f.entry.Attributes.FileSize = filer2.FileSize(fh.f.entry)
}
return fh
}
@@ -55,7 +54,7 @@ var _ = fs.HandleReleaser(&FileHandle{})
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
- glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size))
+ glog.V(2).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size))
buff := make([]byte, req.Size)
@@ -126,7 +125,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
copy(data, req.Data)
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize)))
- glog.V(4).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)))
+ glog.V(2).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)))
chunks, err := fh.dirtyPages.AddPage(req.Offset, data)
if err != nil {
@@ -139,14 +138,14 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
if req.Offset == 0 {
// detect mime type
fh.contentType = http.DetectContentType(data)
- fh.dirtyMetadata = true
+ fh.f.dirtyMetadata = true
}
if len(chunks) > 0 {
fh.f.addChunks(chunks)
- fh.dirtyMetadata = true
+ fh.f.dirtyMetadata = true
}
return nil
@@ -181,10 +180,10 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
if len(chunks) > 0 {
fh.f.addChunks(chunks)
- fh.dirtyMetadata = true
+ fh.f.dirtyMetadata = true
}
- if !fh.dirtyMetadata {
+ if !fh.f.dirtyMetadata {
return nil
}
@@ -246,7 +245,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
})
if err == nil {
- fh.dirtyMetadata = false
+ fh.f.dirtyMetadata = false
}
if err != nil {