aboutsummaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
authorchrislusf <chris.lu@gmail.com>2025-12-03 19:42:31 -0800
committerChris Lu <chrislusf@users.noreply.github.com>2025-12-03 20:52:27 -0800
commitd432e63322e0237f2f5ced3580f6233f401e9e48 (patch)
tree4287add8705e443840cddd7bb9b7ca24fae5605a /pkg
parentdfad8b9747b114c863838cf48223fcebe3049890 (diff)
downloadseaweedfs-csi-driver-d432e63322e0237f2f5ced3580f6233f401e9e48.tar.xz
seaweedfs-csi-driver-d432e63322e0237f2f5ced3580f6233f401e9e48.zip
fix: preserve healthy mounts in NodeStageVolume instead of re-staging
Address CodeRabbit review - when a healthy staging path exists after driver restart, rebuild the cache using rebuildVolumeFromStaging() instead of cleaning up and re-staging. This: - Maintains consistency with NodePublishVolume behavior - Avoids disrupting existing published volumes that are bind-mounted - Makes NodeStageVolume idempotent as per CSI spec
Diffstat (limited to 'pkg')
-rw-r--r--pkg/driver/nodeserver.go26
1 files changed, 13 insertions, 13 deletions
diff --git a/pkg/driver/nodeserver.go b/pkg/driver/nodeserver.go
index 95b556f..d861ddc 100644
--- a/pkg/driver/nodeserver.go
+++ b/pkg/driver/nodeserver.go
@@ -62,20 +62,20 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
// 1. The CSI driver restarted and lost its in-memory state
// 2. The FUSE process died leaving a stale mount
if isStagingPathHealthy(stagingTargetPath) {
- // The staging path is healthy - this means the FUSE mount is still active
- // (possibly from before driver restart). We need to clean it up and re-stage
- // because we don't have the unmounter reference to properly manage it.
- glog.Infof("volume %s has existing healthy mount at %s, will re-stage to get proper control", volumeID, stagingTargetPath)
+ // The staging path is healthy - rebuild the cache from the existing mount
+ // This preserves the existing FUSE mount and avoids disrupting any published volumes
+ glog.Infof("volume %s has existing healthy mount at %s, rebuilding cache", volumeID, stagingTargetPath)
+ volume := ns.rebuildVolumeFromStaging(volumeID, stagingTargetPath)
+ ns.volumes.Store(volumeID, volume)
+ glog.Infof("volume %s cache rebuilt from existing staging at %s", volumeID, stagingTargetPath)
+ return &csi.NodeStageVolumeResponse{}, nil
+ }
+
+ // Check if there's a stale/corrupted mount that needs cleanup
+ if _, err := os.Stat(stagingTargetPath); err == nil || mount.IsCorruptedMnt(err) {
+ glog.Infof("volume %s has stale staging path at %s, cleaning up", volumeID, stagingTargetPath)
if err := cleanupStaleStagingPath(stagingTargetPath); err != nil {
- glog.Warningf("failed to cleanup existing healthy staging path %s: %v, will try to stage anyway", stagingTargetPath, err)
- }
- } else {
- // Check if there's a stale/corrupted mount that needs cleanup
- if _, err := os.Stat(stagingTargetPath); err == nil || mount.IsCorruptedMnt(err) {
- glog.Infof("volume %s has stale staging path at %s, cleaning up", volumeID, stagingTargetPath)
- if err := cleanupStaleStagingPath(stagingTargetPath); err != nil {
- glog.Warningf("failed to cleanup stale staging path %s: %v, will try to stage anyway", stagingTargetPath, err)
- }
+ glog.Warningf("failed to cleanup stale staging path %s: %v, will try to stage anyway", stagingTargetPath, err)
}
}