aboutsummaryrefslogtreecommitdiff
path: root/weed/mount/weedfs_dir_read.go
blob: 325512c37f8a3f13de8e5f110864c57b48797a50 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
package mount

import (
	"context"
	"sync"

	"github.com/hanwen/go-fuse/v2/fuse"
	"github.com/seaweedfs/seaweedfs/weed/filer"
	"github.com/seaweedfs/seaweedfs/weed/glog"
	"github.com/seaweedfs/seaweedfs/weed/mount/meta_cache"
	"github.com/seaweedfs/seaweedfs/weed/util"
)

type DirectoryHandleId uint64

const (
	directoryStreamBaseOffset = 2 // . & ..
	batchSize                 = 1000
)

// DirectoryHandle represents an open directory handle.
// It maintains state for directory listing pagination and is protected by a mutex
// to handle concurrent readdir operations from NFS-Ganesha and other multi-threaded clients.
type DirectoryHandle struct {
	sync.Mutex
	isFinished        bool
	entryStream       []*filer.Entry
	entryStreamOffset uint64
}

func (dh *DirectoryHandle) reset() {
	dh.isFinished = false
	// Nil out pointers to allow garbage collection of old entries,
	// then reuse the slice's capacity to avoid re-allocations.
	for i := range dh.entryStream {
		dh.entryStream[i] = nil
	}
	dh.entryStream = dh.entryStream[:0]
	dh.entryStreamOffset = directoryStreamBaseOffset
}

type DirectoryHandleToInode struct {
	sync.Mutex
	dir2inode map[DirectoryHandleId]*DirectoryHandle
}

func NewDirectoryHandleToInode() *DirectoryHandleToInode {
	return &DirectoryHandleToInode{
		dir2inode: make(map[DirectoryHandleId]*DirectoryHandle),
	}
}

func (wfs *WFS) AcquireDirectoryHandle() (DirectoryHandleId, *DirectoryHandle) {
	fh := DirectoryHandleId(util.RandomUint64())

	wfs.dhMap.Lock()
	defer wfs.dhMap.Unlock()
	dh := &DirectoryHandle{}
	dh.reset()
	wfs.dhMap.dir2inode[fh] = dh
	return fh, dh
}

func (wfs *WFS) GetDirectoryHandle(dhid DirectoryHandleId) *DirectoryHandle {
	wfs.dhMap.Lock()
	defer wfs.dhMap.Unlock()
	if dh, found := wfs.dhMap.dir2inode[dhid]; found {
		return dh
	}
	dh := &DirectoryHandle{}
	dh.reset()
	wfs.dhMap.dir2inode[dhid] = dh
	return dh
}

func (wfs *WFS) ReleaseDirectoryHandle(dhid DirectoryHandleId) {
	wfs.dhMap.Lock()
	defer wfs.dhMap.Unlock()
	delete(wfs.dhMap.dir2inode, dhid)
}

// Directory handling

/** Open directory
 *
 * Unless the 'default_permissions' mount option is given,
 * this method should check if opendir is permitted for this
 * directory. Optionally opendir may also return an arbitrary
 * filehandle in the fuse_file_info structure, which will be
 * passed to readdir, releasedir and fsyncdir.
 */
func (wfs *WFS) OpenDir(cancel <-chan struct{}, input *fuse.OpenIn, out *fuse.OpenOut) (code fuse.Status) {
	if !wfs.inodeToPath.HasInode(input.NodeId) {
		return fuse.ENOENT
	}
	dhid, _ := wfs.AcquireDirectoryHandle()
	out.Fh = uint64(dhid)
	return fuse.OK
}

/** Release directory
 *
 * If the directory has been removed after the call to opendir, the
 * path parameter will be NULL.
 */
func (wfs *WFS) ReleaseDir(input *fuse.ReleaseIn) {
	wfs.ReleaseDirectoryHandle(DirectoryHandleId(input.Fh))
}

/** Synchronize directory contents
 *
 * If the directory has been removed after the call to opendir, the
 * path parameter will be NULL.
 *
 * If the datasync parameter is non-zero, then only the user data
 * should be flushed, not the meta data
 */
func (wfs *WFS) FsyncDir(cancel <-chan struct{}, input *fuse.FsyncIn) (code fuse.Status) {
	return fuse.OK
}

/** Read directory
 *
 * The filesystem may choose between two modes of operation:
 *
 * 1) The readdir implementation ignores the offset parameter, and
 * passes zero to the filler function's offset.  The filler
 * function will not return '1' (unless an error happens), so the
 * whole directory is read in a single readdir operation.
 *
 * 2) The readdir implementation keeps track of the offsets of the
 * directory entries.  It uses the offset parameter and always
 * passes non-zero offset to the filler function.  When the buffer
 * is full (or an error happens) the filler function will return
 * '1'.
 */
func (wfs *WFS) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out *fuse.DirEntryList) (code fuse.Status) {
	return wfs.doReadDirectory(input, out, false)
}

func (wfs *WFS) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out *fuse.DirEntryList) (code fuse.Status) {
	return wfs.doReadDirectory(input, out, true)
}

func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPlusMode bool) fuse.Status {
	// Get the directory handle and lock it for the duration of this operation.
	// This serializes concurrent readdir calls on the same handle, fixing the
	// race condition that caused hangs with NFS-Ganesha.
	dh := wfs.GetDirectoryHandle(DirectoryHandleId(input.Fh))
	dh.Lock()
	defer dh.Unlock()

	if input.Offset == 0 {
		dh.reset()
	} else if dh.isFinished && input.Offset >= dh.entryStreamOffset {
		entryCurrentIndex := input.Offset - dh.entryStreamOffset
		if uint64(len(dh.entryStream)) <= entryCurrentIndex {
			return fuse.OK
		}
	}

	dirPath, code := wfs.inodeToPath.GetPath(input.NodeId)
	if code != fuse.OK {
		return code
	}

	var dirEntry fuse.DirEntry

	// index is the position in entryStream, used to calculate the offset for next readdir
	processEachEntryFn := func(entry *filer.Entry, index int64) bool {
		dirEntry.Name = entry.Name()
		dirEntry.Mode = toSyscallMode(entry.Mode)
		inode := wfs.inodeToPath.Lookup(dirPath.Child(dirEntry.Name), entry.Crtime.Unix(), entry.IsDirectory(), len(entry.HardLinkId) > 0, entry.Inode, false)
		dirEntry.Ino = inode

		// Set Off to the next offset so client can resume from correct position
		dirEntry.Off = dh.entryStreamOffset + uint64(index) + 1

		if !isPlusMode {
			if !out.AddDirEntry(dirEntry) {
				return false
			}
		} else {
			entryOut := out.AddDirLookupEntry(dirEntry)
			if entryOut == nil {
				return false
			}
			if fh, found := wfs.fhMap.FindFileHandle(inode); found {
				glog.V(4).Infof("readdir opened file %s", dirPath.Child(dirEntry.Name))
				entry = filer.FromPbEntry(string(dirPath), fh.GetEntry().GetEntry())
			}
			wfs.outputFilerEntry(entryOut, inode, entry)
			wfs.inodeToPath.Lookup(dirPath.Child(dirEntry.Name), entry.Crtime.Unix(), entry.IsDirectory(), len(entry.HardLinkId) > 0, entry.Inode, true)
		}
		return true
	}

	if input.Offset < directoryStreamBaseOffset {
		if !isPlusMode {
			if input.Offset == 0 {
				out.AddDirEntry(fuse.DirEntry{Mode: fuse.S_IFDIR, Name: ".", Off: 1})
			}
			out.AddDirEntry(fuse.DirEntry{Mode: fuse.S_IFDIR, Name: "..", Off: 2})
		} else {
			if input.Offset == 0 {
				out.AddDirLookupEntry(fuse.DirEntry{Mode: fuse.S_IFDIR, Name: ".", Off: 1})
			}
			out.AddDirLookupEntry(fuse.DirEntry{Mode: fuse.S_IFDIR, Name: "..", Off: 2})
		}
		input.Offset = directoryStreamBaseOffset
	}

	var lastEntryName string

	// Read from cache first, then load next batch if needed
	if input.Offset >= dh.entryStreamOffset {
		// Handle case: new handle with non-zero offset but empty cache
		// This happens when NFS-Ganesha opens multiple directory handles
		if len(dh.entryStream) == 0 && input.Offset > dh.entryStreamOffset {
			skipCount := int64(input.Offset - dh.entryStreamOffset)

			if err := meta_cache.EnsureVisited(wfs.metaCache, wfs, dirPath); err != nil {
				glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
				return fuse.EIO
			}

			// Load entries from beginning to fill cache up to the requested offset
			loadErr := wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, "", false, skipCount+int64(batchSize), func(entry *filer.Entry) (bool, error) {
				dh.entryStream = append(dh.entryStream, entry)
				return true, nil
			})
			if loadErr != nil {
				glog.Errorf("list meta cache: %v", loadErr)
				return fuse.EIO
			}
		}

		if input.Offset > dh.entryStreamOffset {
			entryPreviousIndex := (input.Offset - dh.entryStreamOffset) - 1
			if uint64(len(dh.entryStream)) > entryPreviousIndex {
				lastEntryName = dh.entryStream[entryPreviousIndex].Name()
			}
		}

		entryCurrentIndex := int64(input.Offset - dh.entryStreamOffset)
		for int64(len(dh.entryStream)) > entryCurrentIndex {
			entry := dh.entryStream[entryCurrentIndex]
			if processEachEntryFn(entry, entryCurrentIndex) {
				lastEntryName = entry.Name()
				entryCurrentIndex++
			} else {
				return fuse.OK
			}
		}

		// Cache exhausted, load next batch
		if err := meta_cache.EnsureVisited(wfs.metaCache, wfs, dirPath); err != nil {
			glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
			return fuse.EIO
		}

		// Batch loading: fetch batchSize entries starting from lastEntryName
		loadedCount := 0
		bufferFull := false
		loadErr := wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, lastEntryName, false, int64(batchSize), func(entry *filer.Entry) (bool, error) {
			currentIndex := int64(len(dh.entryStream))
			dh.entryStream = append(dh.entryStream, entry)
			loadedCount++
			if !processEachEntryFn(entry, currentIndex) {
				bufferFull = true
				return false, nil
			}
			return true, nil
		})
		if loadErr != nil {
			glog.Errorf("list meta cache: %v", loadErr)
			return fuse.EIO
		}

		// Mark finished only when loading completed normally (not buffer full)
		// and we got fewer entries than requested
		if !bufferFull && loadedCount < batchSize {
			dh.isFinished = true
		}
	}

	return fuse.OK
}