aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Schmidt <patrick.schmidt@innogames.com>2022-08-21 20:33:58 +0200
committerGitHub <noreply@github.com>2022-08-21 11:33:58 -0700
commit3f758820c1f93540b77bfa6a13e840d8e9f75e1a (patch)
tree62968d48214a4631adbb4cbc150bab305199806b
parentc7892bc7c4a5df48cc1db946243573b3ded3d711 (diff)
downloadseaweedfs-3f758820c1f93540b77bfa6a13e840d8e9f75e1a.tar.xz
seaweedfs-3f758820c1f93540b77bfa6a13e840d8e9f75e1a.zip
Fix FUSE server buffer leaks in file gaps (#3472)
* Fix FUSE server buffer leaks in file gaps This change zeros read buffers when encountering file gaps during file/chunk reads in FUSE mounts. It prevents leaking internal buffers of the FUSE server which could otherwise reveal metadata, directory listings, file contents and other data related to FUSE API calls. The issue was that buffers are reused, but when a file gap was found the buffer was not zeroed accordingly and the existing data of the buffer was kept and returned. * Move zero logic into its own method
-rw-r--r--weed/filer/reader_at.go33
-rw-r--r--weed/filer/reader_at_test.go96
2 files changed, 94 insertions, 35 deletions
diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go
index 2b3e77d1b..e18324068 100644
--- a/weed/filer/reader_at.go
+++ b/weed/filer/reader_at.go
@@ -127,10 +127,11 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
nextChunks = c.chunkViews[i+1:]
}
if startOffset < chunk.LogicOffset {
- gap := int(chunk.LogicOffset - startOffset)
+ gap := chunk.LogicOffset - startOffset
glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.LogicOffset)
- n += int(min(int64(gap), remaining))
- startOffset, remaining = chunk.LogicOffset, remaining-int64(gap)
+ c.zero(p, startOffset-offset, gap)
+ n += int(min(gap, remaining))
+ startOffset, remaining = chunk.LogicOffset, remaining-gap
if remaining <= 0 {
break
}
@@ -154,10 +155,19 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
// glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
- if err == nil && remaining > 0 && c.fileSize > startOffset {
- delta := int(min(remaining, c.fileSize-startOffset))
- glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
- n += delta
+ // zero the remaining bytes if a gap exists at the end of the last chunk (or a fully sparse file)
+ if err == nil && remaining > 0 {
+ var delta int64
+ if c.fileSize > startOffset {
+ delta = min(remaining, c.fileSize-startOffset)
+ startOffset -= offset
+ } else {
+ delta = remaining
+ startOffset = max(startOffset-offset, startOffset-remaining-offset)
+ }
+ glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize)
+ c.zero(p, startOffset, delta)
+ n += int(delta)
}
if err == nil && offset+int64(len(p)) >= c.fileSize {
@@ -195,3 +205,12 @@ func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, next
c.lastChunkFid = chunkView.FileId
return
}
+
+func (c *ChunkReadAt) zero(buffer []byte, start, length int64) {
+ end := min(start+length, int64(len(buffer)))
+
+ // zero the bytes
+ for o := start; o < end; o++ {
+ buffer[o] = 0
+ }
+}
diff --git a/weed/filer/reader_at_test.go b/weed/filer/reader_at_test.go
index d9afb460c..29bd47ea4 100644
--- a/weed/filer/reader_at_test.go
+++ b/weed/filer/reader_at_test.go
@@ -1,7 +1,7 @@
package filer
import (
- "fmt"
+ "bytes"
"io"
"math"
"strconv"
@@ -75,29 +75,28 @@ func TestReaderAt(t *testing.T) {
readerPattern: NewReaderPattern(),
}
- testReadAt(t, readerAt, 0, 10, 10, io.EOF)
- testReadAt(t, readerAt, 0, 12, 10, io.EOF)
- testReadAt(t, readerAt, 2, 8, 8, io.EOF)
- testReadAt(t, readerAt, 3, 6, 6, nil)
+ testReadAt(t, readerAt, 0, 10, 10, io.EOF, nil, nil)
+ testReadAt(t, readerAt, 0, 12, 12, io.EOF, nil, nil)
+ testReadAt(t, readerAt, 2, 8, 8, io.EOF, nil, nil)
+ testReadAt(t, readerAt, 3, 6, 6, nil, nil, nil)
}
-func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, expected int, expectedErr error) {
- data := make([]byte, size)
- n, err := readerAt.doReadAt(data, offset)
-
- for _, d := range data {
- fmt.Printf("%x", d)
+func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, expectedN int, expectedErr error, data, expectedData []byte) {
+ if data == nil {
+ data = make([]byte, size)
}
- fmt.Println()
+ n, err := readerAt.doReadAt(data, offset)
- if expected != n {
- t.Errorf("unexpected read size: %d, expect: %d", n, expected)
+ if expectedN != n {
+ t.Errorf("unexpected read size: %d, expect: %d", n, expectedN)
}
if err != expectedErr {
t.Errorf("unexpected read error: %v, expect: %v", err, expectedErr)
}
-
+ if expectedData != nil && !bytes.Equal(data, expectedData) {
+ t.Errorf("unexpected read data: %v, expect: %v", data, expectedData)
+ }
}
func TestReaderAt0(t *testing.T) {
@@ -125,12 +124,12 @@ func TestReaderAt0(t *testing.T) {
readerPattern: NewReaderPattern(),
}
- testReadAt(t, readerAt, 0, 10, 10, io.EOF)
- testReadAt(t, readerAt, 3, 16, 7, io.EOF)
- testReadAt(t, readerAt, 3, 5, 5, nil)
+ testReadAt(t, readerAt, 0, 10, 10, io.EOF, nil, nil)
+ testReadAt(t, readerAt, 3, 16, 7, io.EOF, nil, nil)
+ testReadAt(t, readerAt, 3, 5, 5, nil, nil, nil)
- testReadAt(t, readerAt, 11, 5, 0, io.EOF)
- testReadAt(t, readerAt, 10, 5, 0, io.EOF)
+ testReadAt(t, readerAt, 11, 5, 5, io.EOF, nil, nil)
+ testReadAt(t, readerAt, 10, 5, 5, io.EOF, nil, nil)
}
@@ -153,13 +152,54 @@ func TestReaderAt1(t *testing.T) {
readerPattern: NewReaderPattern(),
}
- testReadAt(t, readerAt, 0, 20, 20, io.EOF)
- testReadAt(t, readerAt, 1, 7, 7, nil)
- testReadAt(t, readerAt, 0, 1, 1, nil)
- testReadAt(t, readerAt, 18, 4, 2, io.EOF)
- testReadAt(t, readerAt, 12, 4, 4, nil)
- testReadAt(t, readerAt, 4, 20, 16, io.EOF)
- testReadAt(t, readerAt, 4, 10, 10, nil)
- testReadAt(t, readerAt, 1, 10, 10, nil)
+ testReadAt(t, readerAt, 0, 20, 20, io.EOF, nil, nil)
+ testReadAt(t, readerAt, 1, 7, 7, nil, nil, nil)
+ testReadAt(t, readerAt, 0, 1, 1, nil, nil, nil)
+ testReadAt(t, readerAt, 18, 4, 2, io.EOF, nil, nil)
+ testReadAt(t, readerAt, 12, 4, 4, nil, nil, nil)
+ testReadAt(t, readerAt, 4, 20, 16, io.EOF, nil, nil)
+ testReadAt(t, readerAt, 4, 10, 10, nil, nil, nil)
+ testReadAt(t, readerAt, 1, 10, 10, nil, nil, nil)
+
+}
+
+func TestReaderAtGappedChunksDoNotLeak(t *testing.T) {
+ visibles := []VisibleInterval{
+ {
+ start: 2,
+ stop: 3,
+ fileId: "1",
+ chunkSize: 5,
+ },
+ {
+ start: 7,
+ stop: 9,
+ fileId: "1",
+ chunkSize: 4,
+ },
+ }
+
+ readerAt := &ChunkReadAt{
+ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64),
+ readerLock: sync.Mutex{},
+ fileSize: 9,
+ readerCache: newReaderCache(3, &mockChunkCache{}, nil),
+ readerPattern: NewReaderPattern(),
+ }
+
+ testReadAt(t, readerAt, 0, 9, 9, io.EOF, []byte{2, 2, 2, 2, 2, 2, 2, 2, 2}, []byte{0, 0, 1, 0, 0, 0, 0, 1, 1})
+ testReadAt(t, readerAt, 1, 8, 8, io.EOF, []byte{2, 2, 2, 2, 2, 2, 2, 2}, []byte{0, 1, 0, 0, 0, 0, 1, 1})
+}
+
+func TestReaderAtSparseFileDoesNotLeak(t *testing.T) {
+ readerAt := &ChunkReadAt{
+ chunkViews: ViewFromVisibleIntervals([]VisibleInterval{}, 0, math.MaxInt64),
+ readerLock: sync.Mutex{},
+ fileSize: 3,
+ readerCache: newReaderCache(3, &mockChunkCache{}, nil),
+ readerPattern: NewReaderPattern(),
+ }
+ testReadAt(t, readerAt, 0, 3, 3, io.EOF, []byte{2, 2, 2}, []byte{0, 0, 0})
+ testReadAt(t, readerAt, 1, 2, 2, io.EOF, []byte{2, 2}, []byte{0, 0})
}