diff options
| -rw-r--r-- | k8s/seaweedfs/Chart.yaml | 2 | ||||
| -rw-r--r-- | k8s/seaweedfs/values.yaml | 2 | ||||
| -rw-r--r-- | weed/filer2/filechunks.go | 17 | ||||
| -rw-r--r-- | weed/filer2/filechunks_test.go | 33 | ||||
| -rw-r--r-- | weed/filer2/filer_deletion.go | 10 | ||||
| -rw-r--r-- | weed/filer2/reader_at.go | 34 | ||||
| -rw-r--r-- | weed/filesys/dir.go | 17 | ||||
| -rw-r--r-- | weed/filesys/dirty_page.go | 4 | ||||
| -rw-r--r-- | weed/filesys/file.go | 21 | ||||
| -rw-r--r-- | weed/filesys/filehandle.go | 21 | ||||
| -rw-r--r-- | weed/filesys/fscache.go | 13 | ||||
| -rw-r--r-- | weed/filesys/fscache_test.go | 21 | ||||
| -rw-r--r-- | weed/filesys/meta_cache/meta_cache_init.go | 2 | ||||
| -rw-r--r-- | weed/filesys/wfs.go | 11 | ||||
| -rw-r--r-- | weed/filesys/wfs_deletion.go | 2 | ||||
| -rw-r--r-- | weed/operation/upload_content.go | 4 | ||||
| -rw-r--r-- | weed/pb/filer_pb/filer_client.go | 2 | ||||
| -rw-r--r-- | weed/pb/filer_pb/filer_pb_helper.go | 6 | ||||
| -rw-r--r-- | weed/pb/filer_pb/filer_pb_helper_test.go | 2 | ||||
| -rw-r--r-- | weed/server/webdav_server.go | 5 | ||||
| -rw-r--r-- | weed/util/chunk_cache/chunk_cache.go | 2 | ||||
| -rw-r--r-- | weed/util/constants.go | 2 |
22 files changed, 159 insertions, 74 deletions
diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index 73d9a67e4..14b927d2c 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: 1.88
\ No newline at end of file +version: 1.89
\ No newline at end of file diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index 6fb25e0a3..6ddb1953c 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - imageTag: "1.88" + imageTag: "1.89" imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go index 9de888d50..d7c31bf0f 100644 --- a/weed/filer2/filechunks.go +++ b/weed/filer2/filechunks.go @@ -134,17 +134,18 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int for _, chunk := range visibles { - if chunk.start <= offset && offset < chunk.stop && offset < stop { + chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop) + + if chunkStart < chunkStop { views = append(views, &ChunkView{ FileId: chunk.fileId, - Offset: offset - chunk.start, // offset is the data starting location in this file id - Size: uint64(min(chunk.stop, stop) - offset), - LogicOffset: offset, + Offset: chunkStart - chunk.start, + Size: uint64(chunkStop - chunkStart), + LogicOffset: chunkStart, ChunkSize: chunk.chunkSize, CipherKey: chunk.cipherKey, IsGzipped: chunk.isGzipped, }) - offset = min(chunk.stop, stop) } } @@ -266,3 +267,9 @@ func min(x, y int64) int64 { } return y } +func max(x, y int64) int64 { + if x <= y { + return y + } + return x +} diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go index bfee59198..2390d4fb2 100644 --- a/weed/filer2/filechunks_test.go +++ b/weed/filer2/filechunks_test.go @@ -2,9 +2,13 @@ package filer2 import ( "log" + "math" "testing" "fmt" + + "github.com/stretchr/testify/assert" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) @@ -286,7 +290,7 @@ func TestChunksReading(t *testing.T) { Size: 400, Expected: []*ChunkView{ {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0}, - // {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen + {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 250}, }, }, // case 5: updates overwrite full chunks @@ -418,3 +422,30 @@ func BenchmarkCompactFileChunks(b *testing.B) { CompactFileChunks(nil, chunks) } } + +func TestViewFromVisibleIntervals(t *testing.T) { + visibles := []VisibleInterval{ + { + start: 0, + stop: 25, + fileId: "fid1", + }, + { + start: 4096, + stop: 8192, + fileId: "fid2", + }, + { + start: 16384, + stop: 18551, + fileId: "fid3", + }, + } + + views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) + + if len(views) != len(visibles) { + assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") + } + +}
\ No newline at end of file diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go index a6b229771..2ff9dac63 100644 --- a/weed/filer2/filer_deletion.go +++ b/weed/filer2/filer_deletion.go @@ -1,6 +1,7 @@ package filer2 import ( + "strings" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -50,15 +51,14 @@ func (f *Filer) loopProcessingDeletion() { fileIds = fileIds[:0] } deletionCount = len(toDeleteFileIds) - deleteResults, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) + _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) if err != nil { - glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + if !strings.Contains(err.Error(), "already deleted") { + glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + } } else { glog.V(1).Infof("deleting fileIds len=%d", deletionCount) } - if len(deleteResults) != deletionCount { - glog.V(0).Infof("delete %d fileIds actual %d", deletionCount, len(deleteResults)) - } } }) diff --git a/weed/filer2/reader_at.go b/weed/filer2/reader_at.go index 2f65761cc..b5bd85cbb 100644 --- a/weed/filer2/reader_at.go +++ b/weed/filer2/reader_at.go @@ -19,6 +19,7 @@ type ChunkReadAt struct { bufferOffset int64 lookupFileId func(fileId string) (targetUrl string, err error) readerLock sync.Mutex + fileSize int64 chunkCache *chunk_cache.ChunkCache } @@ -54,13 +55,14 @@ func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType { } } -func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt { +func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt { return &ChunkReadAt{ chunkViews: chunkViews, lookupFileId: LookupFn(filerClient), bufferOffset: -1, chunkCache: chunkCache, + fileSize: fileSize, } } @@ -73,9 +75,6 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { readCount, readErr := c.doReadAt(p[n:], offset+int64(n)) n += readCount err = readErr - if readCount == 0 { - return n, io.EOF - } } return } @@ -83,8 +82,11 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { var found bool + var chunkStart, chunkStop int64 for _, chunk := range c.chunkViews { - if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) { + // fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d), %v && %v\n", offset, offset+int64(len(p)), chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size), chunk.LogicOffset <= offset, offset < chunk.LogicOffset+int64(chunk.Size)) + chunkStart, chunkStop = max(chunk.LogicOffset, offset), min(chunk.LogicOffset+int64(chunk.Size), offset+int64(len(p))) + if chunkStart < chunkStop { found = true if c.bufferOffset != chunk.LogicOffset { c.buffer, err = c.fetchChunkData(chunk) @@ -96,15 +98,23 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { break } } - if !found { - return 0, io.EOF + + // fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d), found:%v, err:%v\n", offset, offset+int64(len(p)), c.bufferOffset, c.bufferOffset+int64(len(c.buffer)), found, err) + + if err != nil { + return } - if err == nil { - n = copy(p, c.buffer[offset-c.bufferOffset:]) + if found { + n = int(chunkStart-offset) + copy(p[chunkStart-offset:chunkStop-offset], c.buffer[chunkStart-c.bufferOffset:chunkStop-c.bufferOffset]) + return } - // fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d)\n", offset, offset+int64(n), c.bufferOffset, c.bufferOffset+int64(len(c.buffer))) + n = len(p) + if offset+int64(n) >= c.fileSize { + err = io.EOF + n = int(c.fileSize - offset) + } return @@ -112,12 +122,12 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err error) { - glog.V(4).Infof("fetchChunkData %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + glog.V(5).Infof("fetchChunkData %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) hasDataInCache := false chunkData := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize) if chunkData != nil { - glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + glog.V(5).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) hasDataInCache = true } else { chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 50ca6df5d..578c40014 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -63,7 +63,7 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { attr.Gid = dir.entry.Attributes.Gid attr.Uid = dir.entry.Attributes.Uid - glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr) + glog.V(5).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr) return nil } @@ -101,7 +101,7 @@ func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { } func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node { - return dir.wfs.fsNodeCache.EnsureFsNode(util.NewFullPath(dir.FullPath(), name), func() fs.Node { + f := dir.wfs.fsNodeCache.EnsureFsNode(util.NewFullPath(dir.FullPath(), name), func() fs.Node { return &File{ Name: name, dir: dir, @@ -110,14 +110,17 @@ func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node { entryViewCache: nil, } }) + f.(*File).dir = dir // in case dir node was created later + return f } func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.Node { - return dir.wfs.fsNodeCache.EnsureFsNode(fullpath, func() fs.Node { + d := dir.wfs.fsNodeCache.EnsureFsNode(fullpath, func() fs.Node { return &Dir{name: entry.Name, wfs: dir.wfs, entry: entry, parent: dir} }) - + d.(*Dir).parent = dir // in case dir node was created later + return d } func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, @@ -237,7 +240,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. return nil, fuse.ENOENT } } else { - glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath) + glog.V(5).Infof("dir Lookup cache hit %s", fullFilePath) } if entry != nil { @@ -265,7 +268,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { - glog.V(4).Infof("dir ReadDirAll %s", dir.FullPath()) + glog.V(5).Infof("dir ReadDirAll %s", dir.FullPath()) processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error { fullpath := util.NewFullPath(dir.FullPath(), entry.Name) @@ -427,7 +430,7 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp } func (dir *Dir) Forget() { - glog.V(4).Infof("Forget dir %s", dir.FullPath()) + glog.V(5).Infof("Forget dir %s", dir.FullPath()) dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath())) } diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 8b7d92ffb..ba8f7ec41 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -35,7 +35,7 @@ func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) (chunks [] pages.lock.Lock() defer pages.lock.Unlock() - glog.V(4).Infof("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize) + glog.V(5).Infof("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize) if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) { // this is more than what buffer can hold. @@ -127,7 +127,7 @@ func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *fi chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), chunkSize) if err == nil { hasSavedData = true - glog.V(4).Infof("%s saveToStorage %s [%d,%d) of %d bytes", pages.f.fullpath(), chunk.FileId, maxList.Offset(), maxList.Offset()+chunkSize, fileSize) + glog.V(4).Infof("saveToStorage %s %s [%d,%d) of %d bytes", pages.f.fullpath(), chunk.GetFileIdString(), maxList.Offset(), maxList.Offset()+chunkSize, fileSize) return } else { glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+chunkSize, err) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 519e12c59..8db892447 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -85,7 +85,7 @@ func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { - glog.V(4).Infof("file %v open %+v", file.fullpath(), req) + glog.V(5).Infof("file %v open %+v", file.fullpath(), req) file.isOpen++ @@ -93,7 +93,7 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op resp.Handle = fuse.HandleID(handle.handle) - glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle) + glog.V(5).Infof("%v file open handle id = %d", file.fullpath(), handle.handle) return handle, nil @@ -101,7 +101,7 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - glog.V(4).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) + glog.V(5).Infof("%v file setattr %+v", file.fullpath(), req) if err := file.maybeLoadEntry(ctx); err != nil { return err @@ -121,10 +121,10 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f int64Size = int64(req.Size) - chunk.Offset if int64Size > 0 { chunks = append(chunks, chunk) - glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk, chunk.Size, int64Size) + glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size) chunk.Size = uint64(int64Size) } else { - glog.V(4).Infof("truncated whole chunk %+v\n", chunk) + glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString()) truncatedChunks = append(truncatedChunks, chunk) } } @@ -133,10 +133,11 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f file.entry.Chunks = chunks file.entryViewCache = nil file.reader = nil - file.dirtyMetadata = true } file.entry.Attributes.FileSize = req.Size + file.dirtyMetadata = true } + if req.Valid.Mode() { file.entry.Attributes.FileMode = uint32(req.Mode) file.dirtyMetadata = true @@ -170,6 +171,10 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f return nil } + if !file.dirtyMetadata { + return nil + } + return file.saveEntry() } @@ -232,7 +237,7 @@ func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { func (file *File) Forget() { t := util.NewFullPath(file.dir.FullPath(), file.Name) - glog.V(4).Infof("Forget file %s", t) + glog.V(5).Infof("Forget file %s", t) file.wfs.fsNodeCache.DeleteFsNode(t) } @@ -285,7 +290,7 @@ func (file *File) saveEntry() error { Entry: file.entry, } - glog.V(1).Infof("save file entry: %v", request) + glog.V(4).Infof("save file entry: %v", request) _, err := client.UpdateEntry(context.Background(), request) if err != nil { glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err) diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 42a0b2446..550aec5fb 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -54,7 +54,7 @@ var _ = fs.HandleReleaser(&FileHandle{}) func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - glog.V(2).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size)) + glog.V(5).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size)) buff := make([]byte, req.Size) @@ -82,10 +82,11 @@ func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (offset func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { - // this value should come from the filer instead of the old f - if len(fh.f.entry.Chunks) == 0 { + fileSize := int64(filer2.FileSize(fh.f.entry)) + + if fileSize == 0 { glog.V(1).Infof("empty fh %v", fh.f.fullpath()) - return 0, nil + return 0, io.EOF } var chunkResolveErr error @@ -99,7 +100,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { if fh.f.reader == nil { chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32) - fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache) + fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache, fileSize) } totalRead, err := fh.f.reader.ReadAt(buff, offset) @@ -125,7 +126,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f copy(data, req.Data) fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize))) - glog.V(2).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data))) + glog.V(5).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data))) chunks, err := fh.dirtyPages.AddPage(req.Offset, data) if err != nil { @@ -153,7 +154,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error { - glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle) + glog.V(4).Infof("Release %v fh %d", fh.f.fullpath(), fh.handle) fh.f.isOpen-- @@ -170,7 +171,7 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { // fflush works at fh level // send the data to the OS - glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req) + glog.V(5).Infof("Flush %s fh %d %v", fh.f.fullpath(), fh.handle, req) chunks, err := fh.dirtyPages.FlushToStorage() if err != nil { @@ -213,7 +214,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks)) for i, chunk := range fh.f.entry.Chunks { - glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) } chunks, garbages := filer2.CompactFileChunks(filer2.LookupFn(fh.f.wfs), fh.f.entry.Chunks) @@ -238,7 +239,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { fh.f.wfs.deleteFileChunks(garbages) for i, chunk := range garbages { - glog.V(4).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(4).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) } return nil diff --git a/weed/filesys/fscache.go b/weed/filesys/fscache.go index b146f0615..fdec8253c 100644 --- a/weed/filesys/fscache.go +++ b/weed/filesys/fscache.go @@ -3,8 +3,9 @@ package filesys import ( "sync" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/util" ) type FsCache struct { @@ -118,7 +119,6 @@ func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode { target = target.ensureChild(p) } parent := target.parent - src.name = target.name if dir, ok := src.node.(*Dir); ok { dir.name = target.name // target is not Dir, but a shortcut } @@ -132,6 +132,7 @@ func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode { target.deleteSelf() + src.name = target.name src.connectToParent(parent) return src @@ -144,10 +145,14 @@ func (n *FsNode) connectToParent(parent *FsNode) { oldNode.deleteSelf() } if dir, ok := n.node.(*Dir); ok { - dir.parent = parent.node.(*Dir) + if parent.node != nil { + dir.parent = parent.node.(*Dir) + } } if f, ok := n.node.(*File); ok { - f.dir = parent.node.(*Dir) + if parent.node != nil { + f.dir = parent.node.(*Dir) + } } n.childrenLock.Lock() parent.children[n.name] = n diff --git a/weed/filesys/fscache_test.go b/weed/filesys/fscache_test.go index 67f9aacc8..8bfae1472 100644 --- a/weed/filesys/fscache_test.go +++ b/weed/filesys/fscache_test.go @@ -94,3 +94,24 @@ func TestFsCacheMove(t *testing.T) { } } + + +func TestFsCacheMove2(t *testing.T) { + + cache := newFsCache(nil) + + cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"}) + cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"}) + + cache.Move(util.FullPath("/a/b/d"), util.FullPath("/a/b/e")) + + d := cache.GetFsNode(util.FullPath("/a/b/e")) + if d == nil { + t.Errorf("unexpected nil node!") + } + if d.(*File).Name != "e" { + t.Errorf("unexpected node!") + } + +} + diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go index 662a60fe0..cd98f4a7c 100644 --- a/weed/filesys/meta_cache/meta_cache_init.go +++ b/weed/filesys/meta_cache/meta_cache_init.go @@ -14,7 +14,7 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) { - glog.V(4).Infof("ReadDirAllEntries %s ...", path) + glog.V(5).Infof("ReadDirAllEntries %s ...", path) err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error { entry := filer2.FromPbEntry(string(dirPath), pbEntry) diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 22f0b655a..eb7042663 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -113,7 +113,7 @@ func (wfs *WFS) Root() (fs.Node, error) { func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) { fullpath := file.fullpath() - glog.V(4).Infof("%s AcquireHandle uid=%d gid=%d", fullpath, uid, gid) + glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid) wfs.handlesLock.Lock() defer wfs.handlesLock.Unlock() @@ -127,7 +127,6 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand fileHandle = newFileHandle(file, uid, gid) wfs.handles[inodeId] = fileHandle fileHandle.handle = inodeId - glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle) return } @@ -136,7 +135,7 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) { wfs.handlesLock.Lock() defer wfs.handlesLock.Unlock() - glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) + glog.V(5).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) delete(wfs.handles, fullpath.AsInode()) @@ -146,7 +145,7 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) { // Statfs is called to obtain file system metadata. Implements fuse.FSStatfser func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error { - glog.V(4).Infof("reading fs stats: %+v", req) + glog.V(5).Infof("reading fs stats: %+v", req) if wfs.stats.lastChecked < time.Now().Unix()-20 { @@ -158,13 +157,13 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec), } - glog.V(4).Infof("reading filer stats: %+v", request) + glog.V(5).Infof("reading filer stats: %+v", request) resp, err := client.Statistics(context.Background(), request) if err != nil { glog.V(0).Infof("reading filer stats %v: %v", request, err) return err } - glog.V(4).Infof("read filer stats: %+v", resp) + glog.V(5).Infof("read filer stats: %+v", resp) wfs.stats.TotalSize = resp.TotalSize wfs.stats.UsedSize = resp.UsedSize diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index bf21b1808..203ebdad1 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -38,7 +38,7 @@ func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.Se m := make(map[string]operation.LookupResult) - glog.V(4).Infof("remove file lookup volume id locations: %v", vids) + glog.V(5).Infof("deleteFileIds lookup volume id locations: %v", vids) resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: vids, }) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index e1914f20a..f59c7e1a9 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -33,6 +33,7 @@ type UploadResult struct { } func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *filer_pb.FileChunk { + fid, _ := filer_pb.ToFileIdObject(fileId) return &filer_pb.FileChunk{ FileId: fileId, Offset: offset, @@ -41,6 +42,7 @@ func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *fi ETag: uploadResult.ETag, CipherKey: uploadResult.CipherKey, IsCompressed: uploadResult.Gzip > 0, + Fid: fid, } } @@ -84,7 +86,7 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, } func retriedUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { - for i:=0; i< 3; i++ { + for i := 0; i < 3; i++ { uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) if err == nil { return diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go index 6605202e0..c5a8c311a 100644 --- a/weed/pb/filer_pb/filer_client.go +++ b/weed/pb/filer_pb/filer_client.go @@ -83,7 +83,7 @@ func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, f InclusiveStartFrom: inclusive, } - glog.V(4).Infof("read directory: %v", request) + glog.V(5).Infof("read directory: %v", request) ctx, cancel := context.WithCancel(context.Background()) stream, err := client.ListEntries(ctx, request) if err != nil { diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 96ab2154f..2dc1ebaf8 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -10,7 +10,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/needle" ) -func toFileIdObject(fileIdStr string) (*FileId, error) { +func ToFileIdObject(fileIdStr string) (*FileId, error) { t, err := needle.ParseFileIdFromString(fileIdStr) if err != nil { return nil, err @@ -43,14 +43,14 @@ func BeforeEntrySerialization(chunks []*FileChunk) { for _, chunk := range chunks { if chunk.FileId != "" { - if fid, err := toFileIdObject(chunk.FileId); err == nil { + if fid, err := ToFileIdObject(chunk.FileId); err == nil { chunk.Fid = fid chunk.FileId = "" } } if chunk.SourceFileId != "" { - if fid, err := toFileIdObject(chunk.SourceFileId); err == nil { + if fid, err := ToFileIdObject(chunk.SourceFileId); err == nil { chunk.SourceFid = fid chunk.SourceFileId = "" } diff --git a/weed/pb/filer_pb/filer_pb_helper_test.go b/weed/pb/filer_pb/filer_pb_helper_test.go index d4468c011..0009afdbe 100644 --- a/weed/pb/filer_pb/filer_pb_helper_test.go +++ b/weed/pb/filer_pb/filer_pb_helper_test.go @@ -9,7 +9,7 @@ import ( func TestFileIdSize(t *testing.T) { fileIdStr := "11745,0293434534cbb9892b" - fid, _ := toFileIdObject(fileIdStr) + fid, _ := ToFileIdObject(fileIdStr) bytes, _ := proto.Marshal(fid) println(len(fileIdStr)) diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index e9f7b23fd..277e261f0 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -470,7 +470,8 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { if err != nil { return 0, err } - if len(f.entry.Chunks) == 0 { + fileSize := int64(filer2.FileSize(f.entry)) + if fileSize == 0 { return 0, io.EOF } if f.entryViewCache == nil { @@ -479,7 +480,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { } if f.reader == nil { chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32) - f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache) + f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache, fileSize) } readSize, err = f.reader.ReadAt(p, f.off) diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go index 17b64fb6c..b54b40dbb 100644 --- a/weed/util/chunk_cache/chunk_cache.go +++ b/weed/util/chunk_cache/chunk_cache.go @@ -89,7 +89,7 @@ func (c *ChunkCache) SetChunk(fileId string, data []byte) { c.Lock() defer c.Unlock() - glog.V(4).Infof("SetChunk %s size %d\n", fileId, len(data)) + glog.V(5).Infof("SetChunk %s size %d\n", fileId, len(data)) c.doSetChunk(fileId, data) } diff --git a/weed/util/constants.go b/weed/util/constants.go index 10955acde..69cf57216 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,7 +5,7 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 88) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 89) COMMIT = "" ) |
