diff options
| -rw-r--r-- | LICENSE | 2 | ||||
| -rw-r--r-- | README.md | 2 | ||||
| -rw-r--r-- | other/java/client/pom.xml | 2 | ||||
| -rw-r--r-- | other/java/client/pom_debug.xml | 2 | ||||
| -rw-r--r-- | other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java | 34 | ||||
| -rw-r--r-- | other/java/hdfs2/dependency-reduced-pom.xml | 2 | ||||
| -rw-r--r-- | other/java/hdfs2/pom.xml | 2 | ||||
| -rw-r--r-- | other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java | 3 | ||||
| -rw-r--r-- | other/java/hdfs3/dependency-reduced-pom.xml | 2 | ||||
| -rw-r--r-- | other/java/hdfs3/pom.xml | 2 | ||||
| -rw-r--r-- | other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java | 3 | ||||
| -rw-r--r-- | unmaintained/see_dat/see_dat_gzip.go | 83 | ||||
| -rw-r--r-- | weed/filesys/unimplemented.go | 20 | ||||
| -rw-r--r-- | weed/server/volume_grpc_erasure_coding.go | 4 | ||||
| -rw-r--r-- | weed/shell/command_ec_encode.go | 6 | ||||
| -rw-r--r-- | weed/shell/command_volume_fix_replication.go | 5 | ||||
| -rw-r--r-- | weed/storage/erasure_coding/ec_decoder.go | 6 | ||||
| -rw-r--r-- | weed/storage/needle/needle_read_write.go | 4 | ||||
| -rw-r--r-- | weed/util/bounded_tree/bounded_tree.go | 163 | ||||
| -rw-r--r-- | weed/util/bounded_tree/bounded_tree_test.go | 118 |
20 files changed, 432 insertions, 33 deletions
@@ -1,4 +1,4 @@ - Apache License +g Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -120,6 +120,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a * [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. * [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. * [File TTL][FilerTTL] automatically purge file metadata and actual file data. +* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files [Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount @@ -132,6 +133,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a [FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption [FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores [VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live +[SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver [Back to TOC](#table-of-contents) diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index a8b561251..05061e0f6 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -5,7 +5,7 @@ <groupId>com.github.chrislusf</groupId> <artifactId>seaweedfs-client</artifactId> - <version>1.2.8</version> + <version>1.2.9</version> <parent> <groupId>org.sonatype.oss</groupId> diff --git a/other/java/client/pom_debug.xml b/other/java/client/pom_debug.xml index 88447f7e7..1d8454bf7 100644 --- a/other/java/client/pom_debug.xml +++ b/other/java/client/pom_debug.xml @@ -5,7 +5,7 @@ <groupId>com.github.chrislusf</groupId> <artifactId>seaweedfs-client</artifactId> - <version>1.2.8</version> + <version>1.2.9</version> <parent> <groupId>org.sonatype.oss</groupId> diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java index dc6203e52..18ec77b76 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java @@ -45,28 +45,32 @@ public class SeaweedWrite { String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey); + synchronized (entry) { + entry.addChunks(FilerProto.FileChunk.newBuilder() + .setFileId(fileId) + .setOffset(offset) + .setSize(bytesLength) + .setMtime(System.currentTimeMillis() / 10000L) + .setETag(etag) + .setCipherKey(cipherKeyString) + ); + } + // cache fileId ~ bytes SeaweedRead.chunkCache.setChunk(fileId, bytes); - entry.addChunks(FilerProto.FileChunk.newBuilder() - .setFileId(fileId) - .setOffset(offset) - .setSize(bytesLength) - .setMtime(System.currentTimeMillis() / 10000L) - .setETag(etag) - .setCipherKey(cipherKeyString) - ); - } public static void writeMeta(final FilerGrpcClient filerGrpcClient, final String parentDirectory, final FilerProto.Entry.Builder entry) { - filerGrpcClient.getBlockingStub().createEntry( - FilerProto.CreateEntryRequest.newBuilder() - .setDirectory(parentDirectory) - .setEntry(entry) - .build() - ); + synchronized (entry){ + filerGrpcClient.getBlockingStub().createEntry( + FilerProto.CreateEntryRequest.newBuilder() + .setDirectory(parentDirectory) + .setEntry(entry) + .build() + ); + } } private static String multipartUpload(String targetUrl, diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml index bef448f3f..53fb62186 100644 --- a/other/java/hdfs2/dependency-reduced-pom.xml +++ b/other/java/hdfs2/dependency-reduced-pom.xml @@ -127,7 +127,7 @@ </snapshotRepository>
</distributionManagement>
<properties>
- <seaweedfs.client.version>1.2.8</seaweedfs.client.version>
+ <seaweedfs.client.version>1.2.9</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>
</project>
diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml index f3086fab8..0d5b138d5 100644 --- a/other/java/hdfs2/pom.xml +++ b/other/java/hdfs2/pom.xml @@ -5,7 +5,7 @@ <modelVersion>4.0.0</modelVersion> <properties> - <seaweedfs.client.version>1.2.8</seaweedfs.client.version> + <seaweedfs.client.version>1.2.9</seaweedfs.client.version> <hadoop.version>2.9.2</hadoop.version> </properties> diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java index 7b488a5da..e08843caa 100644 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java @@ -69,9 +69,6 @@ public class SeaweedOutputStream extends OutputStream { } private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException { - - LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry); - try { SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry); } catch (Exception ex) { diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml index f2056b7b1..f5d14acdd 100644 --- a/other/java/hdfs3/dependency-reduced-pom.xml +++ b/other/java/hdfs3/dependency-reduced-pom.xml @@ -127,7 +127,7 @@ </snapshotRepository>
</distributionManagement>
<properties>
- <seaweedfs.client.version>1.2.8</seaweedfs.client.version>
+ <seaweedfs.client.version>1.2.9</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>
</project>
diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml index 6ca210f78..8c88b60df 100644 --- a/other/java/hdfs3/pom.xml +++ b/other/java/hdfs3/pom.xml @@ -5,7 +5,7 @@ <modelVersion>4.0.0</modelVersion> <properties> - <seaweedfs.client.version>1.2.8</seaweedfs.client.version> + <seaweedfs.client.version>1.2.9</seaweedfs.client.version> <hadoop.version>3.1.1</hadoop.version> </properties> diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java index 4f307ff96..96af27fe0 100644 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java @@ -78,9 +78,6 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea } private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException { - - LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry); - try { SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry); } catch (Exception ex) { diff --git a/unmaintained/see_dat/see_dat_gzip.go b/unmaintained/see_dat/see_dat_gzip.go new file mode 100644 index 000000000..cec073e3f --- /dev/null +++ b/unmaintained/see_dat/see_dat_gzip.go @@ -0,0 +1,83 @@ +package main + +import ( + "bytes" + "compress/gzip" + "crypto/md5" + "flag" + "io" + "io/ioutil" + "net/http" + "time" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type VolumeFileScanner4SeeDat struct { + version needle.Version +} + +func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version + return nil +} + +func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { + return true +} + +var ( + files = int64(0) + filebytes = int64(0) + diffbytes = int64(0) +) + +func Compresssion(data []byte) float64 { + if len(data) <= 128 { + return 100.0 + } + compressed, _ := util.GzipData(data[0:128]) + return float64(len(compressed)*10) / 1280.0 +} + +func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { + t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second)) + glog.V(0).Info("----------------------------------------------------------------------------------") + glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v hasmime[%t] mime[%s] (len: %d)", + *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t, n.HasMime(), string(n.Mime), len(n.Mime)) + r, err := gzip.NewReader(bytes.NewReader(n.Data)) + if err == nil { + buf := bytes.Buffer{} + h := md5.New() + c, _ := io.Copy(&buf, r) + d := buf.Bytes() + io.Copy(h, bytes.NewReader(d)) + diff := (int64(n.DataSize) - int64(c)) + diffbytes += diff + glog.V(0).Infof("was gzip! stored_size: %d orig_size: %d diff: %d(%d) mime:%s compression-of-128: %.2f md5: %x", n.DataSize, c, diff, diffbytes, http.DetectContentType(d), Compresssion(d), h.Sum(nil)) + } else { + glog.V(0).Infof("no gzip!") + } + return nil +} + +var ( + _ = ioutil.ReadAll + volumePath = flag.String("dir", "/tmp", "data directory to store files") + volumeCollection = flag.String("collection", "", "the volume collection name") + volumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.") +) + +func main() { + flag.Parse() + vid := needle.VolumeId(*volumeId) + glog.V(0).Info("Starting") + scanner := &VolumeFileScanner4SeeDat{} + err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) + if err != nil { + glog.Fatalf("Reading Volume File [ERROR] %s\n", err) + } +} diff --git a/weed/filesys/unimplemented.go b/weed/filesys/unimplemented.go new file mode 100644 index 000000000..1f4fe554d --- /dev/null +++ b/weed/filesys/unimplemented.go @@ -0,0 +1,20 @@ +package filesys + +import ( + "context" + + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" +) + +// https://github.com/bazil/fuse/issues/130 + +var _ = fs.NodeAccesser(&Dir{}) +func (dir *Dir) Access(ctx context.Context, req *fuse.AccessRequest) error { + return fuse.ENOSYS +} + +var _ = fs.NodeAccesser(&File{}) +func (file *File) Access(ctx context.Context, req *fuse.AccessRequest) error { + return fuse.ENOSYS +} diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 66dd5bf8d..3d11cff29 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -201,9 +201,7 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se if err := os.Remove(baseFilename + ".ecx"); err != nil { return nil, err } - if err := os.Remove(baseFilename + ".ecj"); err != nil { - return nil, err - } + os.Remove(baseFilename + ".ecj") } if !hasIdxFile { // .vif is used for ec volumes and normal volumes diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index 165809d05..5a8146954 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -123,6 +123,8 @@ func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId for _, location := range locations { + fmt.Printf("markVolumeReadonly %d on %s ...\n", volumeId, location.Url) + err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, markErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{ VolumeId: uint32(volumeId), @@ -141,6 +143,8 @@ func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { + fmt.Printf("generateEcShards %s %d on %s ...\n", collection, volumeId, sourceVolumeServer) + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{ VolumeId: uint32(volumeId), @@ -204,6 +208,8 @@ func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { + fmt.Printf("parallelCopyEcShardsFromSource %d %s\n", volumeId, existingLocation.Url) + // parallelize shardIdChan := make(chan []uint32, len(targetServers)) var wg sync.WaitGroup diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go index 19da89b67..6b5e4e735 100644 --- a/weed/shell/command_volume_fix_replication.go +++ b/weed/shell/command_volume_fix_replication.go @@ -121,7 +121,10 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, VolumeId: volumeInfo.Id, SourceDataNode: sourceNode.dataNode.Id, }) - return fmt.Errorf("copying from %s => %s : %v", sourceNode.dataNode.Id, dst.dataNode.Id, replicateErr) + if replicateErr != nil { + return fmt.Errorf("copying from %s => %s : %v", sourceNode.dataNode.Id, dst.dataNode.Id, replicateErr) + } + return nil }) if err != nil { diff --git a/weed/storage/erasure_coding/ec_decoder.go b/weed/storage/erasure_coding/ec_decoder.go index ae77cee3f..7b42d02e7 100644 --- a/weed/storage/erasure_coding/ec_decoder.go +++ b/weed/storage/erasure_coding/ec_decoder.go @@ -11,6 +11,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" ) // write .idx file from .ecx and .ecj files @@ -118,9 +119,12 @@ func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId } func iterateEcjFile(baseFileName string, processNeedleFn func(key types.NeedleId) error) error { + if !util.FileExists(baseFileName+".ecj") { + return nil + } ecjFile, openErr := os.OpenFile(baseFileName+".ecj", os.O_RDONLY, 0644) if openErr != nil { - return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) + return fmt.Errorf("cannot open ec index %s.ecj: %v", baseFileName, openErr) } defer ecjFile.Close() diff --git a/weed/storage/needle/needle_read_write.go b/weed/storage/needle/needle_read_write.go index 7f8aa4823..e89e253cd 100644 --- a/weed/storage/needle/needle_read_write.go +++ b/weed/storage/needle/needle_read_write.go @@ -140,6 +140,10 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u err = fmt.Errorf("Cannot Read Current Volume Position: %v", e) return } + if offset >= MaxPossibleVolumeSize { + err = fmt.Errorf("Volume Size %d Exeededs %d", offset, MaxPossibleVolumeSize) + return + } bytesToWrite, size, actualSize, err := n.prepareWriteBuffer(version) diff --git a/weed/util/bounded_tree/bounded_tree.go b/weed/util/bounded_tree/bounded_tree.go new file mode 100644 index 000000000..5aa31ef74 --- /dev/null +++ b/weed/util/bounded_tree/bounded_tree.go @@ -0,0 +1,163 @@ +package bounded_tree + +import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type Node struct { + Parent *Node + Name string + Children map[string]*Node +} + +type BoundedTree struct { + root *Node +} + +func NewBoundedTree() *BoundedTree { + return &BoundedTree{ + root: &Node{ + Name: "/", + }, + } +} + +type VisitNodeFunc func(path util.FullPath) (childDirectories []string, err error) + +// If the path is not visited, call the visitFn for each level of directory +// No action if the directory has been visited before or does not exist. +// A leaf node, which has no children, represents a directory not visited. +// A non-leaf node or a non-existing node represents a directory already visited, or does not need to visit. +func (t *BoundedTree) EnsureVisited(p util.FullPath, visitFn VisitNodeFunc) { + // println() + // println("EnsureVisited", p) + + if t.root == nil { + return + } + components := p.Split() + // fmt.Printf("components %v %d\n", components, len(components)) + if canDelete := t.ensureVisited(t.root, util.FullPath("/"), components, 0, visitFn); canDelete { + t.root = nil + } +} + +func (t *BoundedTree) ensureVisited(n *Node, currentPath util.FullPath, components []string, i int, visitFn VisitNodeFunc) (canDeleteNode bool) { + + // println("ensureVisited", currentPath, i) + + if n == nil { + // fmt.Printf("%s null\n", currentPath) + return + } + + if n.isVisited() { + // fmt.Printf("%s visited %v\n", currentPath, n.Name) + } else { + // fmt.Printf("ensure %v\n", currentPath) + + children, err := visitFn(currentPath) + if err != nil { + glog.V(0).Infof("failed to visit %s: %v", currentPath, err) + return + } + + if len(children) == 0 { + // fmt.Printf(" canDelete %v without children\n", currentPath) + return true + } + + n.Children = make(map[string]*Node) + for _, child := range children { + // fmt.Printf(" add child %v %v\n", currentPath, child) + n.Children[child] = &Node{ + Name: child, + } + } + } + + if i >= len(components) { + return + } + + // fmt.Printf(" check child %v %v\n", currentPath, components[i]) + + toVisitNode, found := n.Children[components[i]] + if !found { + // fmt.Printf(" did not find child %v %v\n", currentPath, components[i]) + return + } + + // fmt.Printf(" ensureVisited %v %v\n", currentPath, toVisitNode.Name) + + if canDelete := t.ensureVisited(toVisitNode, currentPath.Child(components[i]), components, i+1, visitFn); canDelete { + + // fmt.Printf(" delete %v %v\n", currentPath, components[i]) + delete(n.Children, components[i]) + + if len(n.Children) == 0 { + // fmt.Printf(" canDelete %v\n", currentPath) + return true + } + } + + return false + +} + +func (n *Node) isVisited() bool { + if n == nil { + return true + } + if len(n.Children) > 0 { + return true + } + return false +} + +func (n *Node) getChild(childName string) *Node { + if n == nil { + return nil + } + if len(n.Children) > 0 { + return n.Children[childName] + } + return nil +} + +func (t *BoundedTree) HasVisited(p util.FullPath) bool { + + if t.root == nil { + return true + } + + components := p.Split() + // fmt.Printf("components %v %d\n", components, len(components)) + return t.hasVisited(t.root, util.FullPath("/"), components, 0) +} + +func (t *BoundedTree) hasVisited(n *Node, currentPath util.FullPath, components []string, i int) bool { + + if n == nil { + return true + } + + if !n.isVisited() { + return false + } + + // fmt.Printf(" hasVisited child %v %v\n", currentPath, components[i]) + + toVisitNode, found := n.Children[components[i]] + if !found { + return true + } + + if i+1 >= len(components) { + return false + } + + return t.hasVisited(toVisitNode, currentPath.Child(components[i]), components, i+1) + +} diff --git a/weed/util/bounded_tree/bounded_tree_test.go b/weed/util/bounded_tree/bounded_tree_test.go new file mode 100644 index 000000000..18bc2f6d5 --- /dev/null +++ b/weed/util/bounded_tree/bounded_tree_test.go @@ -0,0 +1,118 @@ +package bounded_tree + +import ( + "fmt" + "testing" + + "github.com/chrislusf/seaweedfs/weed/util" +) + + +var ( + + visitFn = func(path util.FullPath) (childDirectories []string, err error) { + fmt.Printf(" visit %v ...\n", path) + switch path { + case "/": + return []string{"a", "g", "h"}, nil + case "/a": + return []string{"b", "f"}, nil + case "/a/b": + return []string{"c", "e"}, nil + case "/a/b/c": + return []string{"d"}, nil + case "/a/b/c/d": + return []string{"i", "j"}, nil + case "/a/b/c/d/i": + return []string{}, nil + case "/a/b/c/d/j": + return []string{}, nil + case "/a/b/e": + return []string{}, nil + case "/a/f": + return []string{}, nil + } + return nil, nil + } + + + printMap = func(m map[string]*Node) { + for k := range m { + println(" >", k) + } + } + + +) + +func TestBoundedTree(t *testing.T) { + + // a/b/c/d/i + // a/b/c/d/j + // a/b/c/d + // a/b/e + // a/f + // g + // h + + tree := NewBoundedTree() + + tree.EnsureVisited(util.FullPath("/a/b/c"), visitFn) + + printMap(tree.root.Children) + + a := tree.root.getChild("a") + + b := a.getChild("b") + if !b.isVisited() { + t.Errorf("expect visited /a/b") + } + c := b.getChild("c") + if !c.isVisited() { + t.Errorf("expect visited /a/b/c") + } + + d := c.getChild("d") + if d.isVisited() { + t.Errorf("expect unvisited /a/b/c/d") + } + + tree.EnsureVisited(util.FullPath("/a/b/c/d"), visitFn) + tree.EnsureVisited(util.FullPath("/a/b/c/d/i"), visitFn) + tree.EnsureVisited(util.FullPath("/a/b/c/d/j"), visitFn) + tree.EnsureVisited(util.FullPath("/a/b/e"), visitFn) + tree.EnsureVisited(util.FullPath("/a/f"), visitFn) + + printMap(tree.root.Children) + +} + +func TestEmptyBoundedTree(t *testing.T) { + + // g + // h + + tree := NewBoundedTree() + + visitFn := func(path util.FullPath) (childDirectories []string, err error) { + fmt.Printf(" visit %v ...\n", path) + switch path { + case "/": + return []string{"g", "h"}, nil + } + t.Fatalf("expected visit %s", path) + return nil, nil + } + + tree.EnsureVisited(util.FullPath("/a/b"), visitFn) + + tree.EnsureVisited(util.FullPath("/a/b"), visitFn) + + printMap(tree.root.Children) + + println(tree.HasVisited(util.FullPath("/a/b"))) + println(tree.HasVisited(util.FullPath("/a"))) + println(tree.HasVisited(util.FullPath("/g"))) + println(tree.HasVisited(util.FullPath("/g/x"))) + +} |
