aboutsummaryrefslogtreecommitdiff
path: root/weed/filer/leveldb/leveldb_store.go
diff options
context:
space:
mode:
authorChris Lu <chrislusf@users.noreply.github.com>2025-12-09 23:44:15 -0800
committerGitHub <noreply@github.com>2025-12-09 23:44:15 -0800
commit0cd9f341778775195fb44cc32d1434e63dba4fca (patch)
tree7b3a0bc3a71a18081b6ca4669a810845bb0c4ced /weed/filer/leveldb/leveldb_store.go
parent1e1473ef4ac8e0ca70f25a3bcf5ef57f79b03779 (diff)
downloadseaweedfs-0cd9f341778775195fb44cc32d1434e63dba4fca.tar.xz
seaweedfs-0cd9f341778775195fb44cc32d1434e63dba4fca.zip
mount: improve EnsureVisited performance with dedup, parallelism, and batching (#7697)
* mount: add singleflight to deduplicate concurrent EnsureVisited calls When multiple goroutines access the same uncached directory simultaneously, they would all make redundant network requests to the filer. This change uses singleflight.Group to ensure only one goroutine fetches the directory entries while others wait for the result. This fixes a race condition where concurrent lookups or readdir operations on the same uncached directory would: 1. Make duplicate network requests to the filer 2. Insert duplicate entries into LevelDB cache 3. Waste CPU and network bandwidth * mount: fetch parent directories in parallel during EnsureVisited Previously, when accessing a deep path like /a/b/c/d, the parent directories were fetched serially from target to root. This change: 1. Collects all uncached directories from target to root first 2. Fetches them all in parallel using errgroup 3. Relies on singleflight (from previous commit) for deduplication This reduces latency when accessing deep uncached paths, especially in high-latency network environments where parallel requests can significantly improve performance. * mount: add batch inserts for LevelDB meta cache When populating the meta cache from filer, entries were inserted one-by-one into LevelDB. This change: 1. Adds BatchInsertEntries method to LevelDBStore that uses LevelDB's native batch write API 2. Updates MetaCache to keep a direct reference to the LevelDB store for batch operations 3. Modifies doEnsureVisited to collect entries and insert them in batches of 100 entries Batch writes are more efficient because: - Reduces number of individual write operations - Reduces disk syncs - Improves throughput for large directories * mount: fix potential nil dereference in MarkChildrenCached Add missing check for inode existence in inode2path map before accessing the InodeEntry. This prevents a potential nil pointer dereference if the inode exists in path2inode but not in inode2path (which could happen due to race conditions or bugs). This follows the same pattern used in IsChildrenCached which properly checks for existence before accessing the entry. * mount: fix batch flush when last entry is hidden The previous batch insert implementation relied on the isLast flag to flush remaining entries. However, if the last entry is a hidden system entry (like 'topics' or 'etc' in root), the callback returns early and the remaining entries in the batch are never flushed. Fix by: 1. Only flush when batch reaches threshold inside the callback 2. Flush any remaining entries after ReadDirAllEntries completes 3. Use error wrapping instead of logging+returning to avoid duplicate logs 4. Create new slice after flush to allow GC of flushed entries 5. Add documentation for batchInsertSize constant This ensures all entries are properly inserted regardless of whether the last entry is hidden, and prevents memory retention issues. * mount: add context support for cancellation in EnsureVisited Thread context.Context through the batch insert call chain to enable proper cancellation and timeout support: 1. Use errgroup.WithContext() so if one fetch fails, others are cancelled 2. Add context parameter to BatchInsertEntries for consistency with InsertEntry 3. Pass context to ReadDirAllEntries for cancellation during network calls 4. Check context cancellation before starting work in doEnsureVisited 5. Use %w for error wrapping to preserve error types for inspection This prevents unnecessary work when one directory fetch fails and makes the batch operations consistent with the existing context-aware APIs.
Diffstat (limited to 'weed/filer/leveldb/leveldb_store.go')
-rw-r--r--weed/filer/leveldb/leveldb_store.go33
1 files changed, 33 insertions, 0 deletions
diff --git a/weed/filer/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go
index fea6e0a3d..3e001b6df 100644
--- a/weed/filer/leveldb/leveldb_store.go
+++ b/weed/filer/leveldb/leveldb_store.go
@@ -107,6 +107,39 @@ func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
return store.InsertEntry(ctx, entry)
}
+// BatchInsertEntries inserts multiple entries in a single LevelDB batch write.
+// This is more efficient than inserting entries one by one as it reduces
+// the number of write operations and syncs to disk.
+func (store *LevelDBStore) BatchInsertEntries(ctx context.Context, entries []*filer.Entry) error {
+ if len(entries) == 0 {
+ return nil
+ }
+
+ batch := new(leveldb.Batch)
+
+ for _, entry := range entries {
+ key := genKey(entry.DirAndName())
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %w", entry.FullPath, entry.Attr, err)
+ }
+
+ if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
+ value = weed_util.MaybeGzipData(value)
+ }
+
+ batch.Put(key, value)
+ }
+
+ err := store.db.Write(batch, nil)
+ if err != nil {
+ return fmt.Errorf("batch write: %w", err)
+ }
+
+ return nil
+}
+
func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
key := genKey(fullpath.DirAndName())