aboutsummaryrefslogtreecommitdiff
path: root/other/java/hdfs3
diff options
context:
space:
mode:
authordependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>2025-11-19 21:22:18 -0800
committerGitHub <noreply@github.com>2025-11-19 21:22:18 -0800
commitc14e513964ff708b7ace352b7e86198b3ebe6827 (patch)
treefc64d74d241faf292d2b37177e266614e15d9d18 /other/java/hdfs3
parent7dce429e61037529694727134f828d273704f8ec (diff)
downloadseaweedfs-c14e513964ff708b7ace352b7e86198b3ebe6827.tar.xz
seaweedfs-c14e513964ff708b7ace352b7e86198b3ebe6827.zip
chore(deps): bump org.apache.hadoop:hadoop-common from 3.2.4 to 3.4.0 in /other/java/hdfs3 (#7512)
* chore(deps): bump org.apache.hadoop:hadoop-common in /other/java/hdfs3 Bumps org.apache.hadoop:hadoop-common from 3.2.4 to 3.4.0. --- updated-dependencies: - dependency-name: org.apache.hadoop:hadoop-common dependency-version: 3.4.0 dependency-type: direct:production ... Signed-off-by: dependabot[bot] <support@github.com> * add java client unit tests * Update dependency-reduced-pom.xml * add java integration tests * fix * fix buffer --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chrislu <chris.lu@gmail.com>
Diffstat (limited to 'other/java/hdfs3')
-rw-r--r--other/java/hdfs3/README.md190
-rw-r--r--other/java/hdfs3/dependency-reduced-pom.xml263
-rw-r--r--other/java/hdfs3/pom.xml21
-rw-r--r--other/java/hdfs3/src/test/java/seaweed/hdfs/SeaweedFileSystemConfigTest.java90
-rw-r--r--other/java/hdfs3/src/test/java/seaweed/hdfs/SeaweedFileSystemTest.java379
5 files changed, 933 insertions, 10 deletions
diff --git a/other/java/hdfs3/README.md b/other/java/hdfs3/README.md
new file mode 100644
index 000000000..f1afee264
--- /dev/null
+++ b/other/java/hdfs3/README.md
@@ -0,0 +1,190 @@
+# SeaweedFS Hadoop3 Client
+
+Hadoop FileSystem implementation for SeaweedFS, compatible with Hadoop 3.x.
+
+## Building
+
+```bash
+mvn clean install
+```
+
+## Testing
+
+This project includes two types of tests:
+
+### 1. Configuration Tests (No SeaweedFS Required)
+
+These tests verify configuration handling and initialization logic without requiring a running SeaweedFS instance:
+
+```bash
+mvn test -Dtest=SeaweedFileSystemConfigTest
+```
+
+### 2. Integration Tests (Requires SeaweedFS)
+
+These tests verify actual FileSystem operations against a running SeaweedFS instance.
+
+#### Prerequisites
+
+1. Start SeaweedFS with default ports:
+ ```bash
+ # Terminal 1: Start master
+ weed master
+
+ # Terminal 2: Start volume server
+ weed volume -mserver=localhost:9333
+
+ # Terminal 3: Start filer
+ weed filer -master=localhost:9333
+ ```
+
+2. Verify services are running:
+ - Master: http://localhost:9333
+ - Filer HTTP: http://localhost:8888
+ - Filer gRPC: localhost:18888
+
+#### Running Integration Tests
+
+```bash
+# Enable integration tests
+export SEAWEEDFS_TEST_ENABLED=true
+
+# Run all tests
+mvn test
+
+# Run specific test
+mvn test -Dtest=SeaweedFileSystemTest
+```
+
+### Test Configuration
+
+Integration tests can be configured via environment variables or system properties:
+
+- `SEAWEEDFS_TEST_ENABLED`: Set to `true` to enable integration tests (default: false)
+- Tests use these default connection settings:
+ - Filer Host: localhost
+ - Filer HTTP Port: 8888
+ - Filer gRPC Port: 18888
+
+### Running Tests with Custom Configuration
+
+To test against a different SeaweedFS instance, modify the test code or use Hadoop configuration:
+
+```java
+conf.set("fs.seaweed.filer.host", "your-host");
+conf.setInt("fs.seaweed.filer.port", 8888);
+conf.setInt("fs.seaweed.filer.port.grpc", 18888);
+```
+
+## Test Coverage
+
+The test suite covers:
+
+- **Configuration & Initialization**
+ - URI parsing and configuration
+ - Default values
+ - Configuration overrides
+ - Working directory management
+
+- **File Operations**
+ - Create files
+ - Read files
+ - Write files
+ - Append to files
+ - Delete files
+
+- **Directory Operations**
+ - Create directories
+ - List directory contents
+ - Delete directories (recursive and non-recursive)
+
+- **Metadata Operations**
+ - Get file status
+ - Set permissions
+ - Set owner/group
+ - Rename files and directories
+
+## Usage in Hadoop
+
+1. Copy the built JAR to your Hadoop classpath:
+ ```bash
+ cp target/seaweedfs-hadoop3-client-*.jar $HADOOP_HOME/share/hadoop/common/lib/
+ ```
+
+2. Configure `core-site.xml`:
+ ```xml
+ <configuration>
+ <property>
+ <name>fs.seaweedfs.impl</name>
+ <value>seaweed.hdfs.SeaweedFileSystem</value>
+ </property>
+ <property>
+ <name>fs.seaweed.filer.host</name>
+ <value>localhost</value>
+ </property>
+ <property>
+ <name>fs.seaweed.filer.port</name>
+ <value>8888</value>
+ </property>
+ <property>
+ <name>fs.seaweed.filer.port.grpc</name>
+ <value>18888</value>
+ </property>
+ </configuration>
+ ```
+
+3. Use SeaweedFS with Hadoop commands:
+ ```bash
+ hadoop fs -ls seaweedfs://localhost:8888/
+ hadoop fs -mkdir seaweedfs://localhost:8888/test
+ hadoop fs -put local.txt seaweedfs://localhost:8888/test/
+ ```
+
+## Continuous Integration
+
+For CI environments, tests can be run in two modes:
+
+1. **Configuration Tests Only** (default, no SeaweedFS required):
+ ```bash
+ mvn test -Dtest=SeaweedFileSystemConfigTest
+ ```
+
+2. **Full Integration Tests** (requires SeaweedFS):
+ ```bash
+ # Start SeaweedFS in CI environment
+ # Then run:
+ export SEAWEEDFS_TEST_ENABLED=true
+ mvn test
+ ```
+
+## Troubleshooting
+
+### Tests are skipped
+
+If you see "Skipping test - SEAWEEDFS_TEST_ENABLED not set":
+```bash
+export SEAWEEDFS_TEST_ENABLED=true
+```
+
+### Connection refused errors
+
+Ensure SeaweedFS is running and accessible:
+```bash
+curl http://localhost:8888/
+```
+
+### gRPC errors
+
+Verify the gRPC port is accessible:
+```bash
+# Should show the port is listening
+netstat -an | grep 18888
+```
+
+## Contributing
+
+When adding new features, please include:
+1. Configuration tests (no SeaweedFS required)
+2. Integration tests (with SEAWEEDFS_TEST_ENABLED guard)
+3. Documentation updates
+
diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml
index decf55a59..d3c2751a5 100644
--- a/other/java/hdfs3/dependency-reduced-pom.xml
+++ b/other/java/hdfs3/dependency-reduced-pom.xml
@@ -140,7 +140,7 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
- <version>3.2.4</version>
+ <version>3.4.0</version>
<scope>provided</scope>
<exclusions>
<exclusion>
@@ -172,10 +172,18 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
- <version>3.2.4</version>
+ <version>3.4.0</version>
<scope>provided</scope>
<exclusions>
<exclusion>
+ <artifactId>hadoop-shaded-protobuf_3_21</artifactId>
+ <groupId>org.apache.hadoop.thirdparty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>hadoop-shaded-guava</artifactId>
+ <groupId>org.apache.hadoop.thirdparty</groupId>
+ </exclusion>
+ <exclusion>
<artifactId>commons-cli</artifactId>
<groupId>commons-cli</groupId>
</exclusion>
@@ -200,8 +208,8 @@
<groupId>javax.servlet</groupId>
</exclusion>
<exclusion>
- <artifactId>javax.activation-api</artifactId>
- <groupId>javax.activation</groupId>
+ <artifactId>jakarta.activation-api</artifactId>
+ <groupId>jakarta.activation</groupId>
</exclusion>
<exclusion>
<artifactId>jetty-server</artifactId>
@@ -233,7 +241,11 @@
</exclusion>
<exclusion>
<artifactId>jersey-json</artifactId>
- <groupId>com.sun.jersey</groupId>
+ <groupId>com.github.pjfanning</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jettison</artifactId>
+ <groupId>org.codehaus.jettison</groupId>
</exclusion>
<exclusion>
<artifactId>jersey-server</artifactId>
@@ -288,19 +300,248 @@
<groupId>org.apache.curator</groupId>
</exclusion>
<exclusion>
- <artifactId>htrace-core4</artifactId>
- <groupId>org.apache.htrace</groupId>
+ <artifactId>zookeeper</artifactId>
+ <groupId>org.apache.zookeeper</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>netty-handler</artifactId>
+ <groupId>io.netty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>netty-transport-native-epoll</artifactId>
+ <groupId>io.netty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>metrics-core</artifactId>
+ <groupId>io.dropwizard.metrics</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>commons-compress</artifactId>
+ <groupId>org.apache.commons</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>bcprov-jdk15on</artifactId>
+ <groupId>org.bouncycastle</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>kerb-core</artifactId>
+ <groupId>org.apache.kerby</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jackson-databind</artifactId>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>stax2-api</artifactId>
+ <groupId>org.codehaus.woodstox</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>woodstox-core</artifactId>
+ <groupId>com.fasterxml.woodstox</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>dnsjava</artifactId>
+ <groupId>dnsjava</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>snappy-java</artifactId>
+ <groupId>org.xerial.snappy</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>hadoop-annotations</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.13.1</version>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <artifactId>hamcrest-core</artifactId>
+ <groupId>org.hamcrest</groupId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
+ <version>3.12.4</version>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <artifactId>byte-buddy</artifactId>
+ <groupId>net.bytebuddy</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>byte-buddy-agent</artifactId>
+ <groupId>net.bytebuddy</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>objenesis</artifactId>
+ <groupId>org.objenesis</groupId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>3.4.0</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <artifactId>hadoop-shaded-protobuf_3_21</artifactId>
+ <groupId>org.apache.hadoop.thirdparty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>hadoop-shaded-guava</artifactId>
+ <groupId>org.apache.hadoop.thirdparty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>commons-cli</artifactId>
+ <groupId>commons-cli</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>commons-math3</artifactId>
+ <groupId>org.apache.commons</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>commons-io</artifactId>
+ <groupId>commons-io</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>commons-net</artifactId>
+ <groupId>commons-net</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>commons-collections</artifactId>
+ <groupId>commons-collections</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>javax.servlet-api</artifactId>
+ <groupId>javax.servlet</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jakarta.activation-api</artifactId>
+ <groupId>jakarta.activation</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jetty-server</artifactId>
+ <groupId>org.eclipse.jetty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jetty-util</artifactId>
+ <groupId>org.eclipse.jetty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jetty-servlet</artifactId>
+ <groupId>org.eclipse.jetty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jetty-webapp</artifactId>
+ <groupId>org.eclipse.jetty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jsp-api</artifactId>
+ <groupId>javax.servlet.jsp</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jersey-core</artifactId>
+ <groupId>com.sun.jersey</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jersey-servlet</artifactId>
+ <groupId>com.sun.jersey</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jersey-json</artifactId>
+ <groupId>com.github.pjfanning</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jettison</artifactId>
+ <groupId>org.codehaus.jettison</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jersey-server</artifactId>
+ <groupId>com.sun.jersey</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>reload4j</artifactId>
+ <groupId>ch.qos.reload4j</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>commons-beanutils</artifactId>
+ <groupId>commons-beanutils</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>commons-configuration2</artifactId>
+ <groupId>org.apache.commons</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>commons-lang3</artifactId>
+ <groupId>org.apache.commons</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>commons-text</artifactId>
+ <groupId>org.apache.commons</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>slf4j-reload4j</artifactId>
+ <groupId>org.slf4j</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>avro</artifactId>
+ <groupId>org.apache.avro</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>re2j</artifactId>
+ <groupId>com.google.re2j</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>hadoop-auth</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>jsch</artifactId>
+ <groupId>com.jcraft</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>curator-client</artifactId>
+ <groupId>org.apache.curator</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>curator-recipes</artifactId>
+ <groupId>org.apache.curator</groupId>
</exclusion>
<exclusion>
<artifactId>zookeeper</artifactId>
<groupId>org.apache.zookeeper</groupId>
</exclusion>
<exclusion>
+ <artifactId>netty-handler</artifactId>
+ <groupId>io.netty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>netty-transport-native-epoll</artifactId>
+ <groupId>io.netty</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>metrics-core</artifactId>
+ <groupId>io.dropwizard.metrics</groupId>
+ </exclusion>
+ <exclusion>
<artifactId>commons-compress</artifactId>
<groupId>org.apache.commons</groupId>
</exclusion>
<exclusion>
- <artifactId>kerb-simplekdc</artifactId>
+ <artifactId>bcprov-jdk15on</artifactId>
+ <groupId>org.bouncycastle</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>kerb-core</artifactId>
<groupId>org.apache.kerby</groupId>
</exclusion>
<exclusion>
@@ -320,6 +561,10 @@
<groupId>dnsjava</groupId>
</exclusion>
<exclusion>
+ <artifactId>snappy-java</artifactId>
+ <groupId>org.xerial.snappy</groupId>
+ </exclusion>
+ <exclusion>
<artifactId>hadoop-annotations</artifactId>
<groupId>org.apache.hadoop</groupId>
</exclusion>
@@ -328,6 +573,6 @@
</dependencies>
<properties>
<seaweedfs.client.version>3.80</seaweedfs.client.version>
- <hadoop.version>3.2.4</hadoop.version>
+ <hadoop.version>3.4.0</hadoop.version>
</properties>
</project>
diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml
index 3faba03be..061d4d700 100644
--- a/other/java/hdfs3/pom.xml
+++ b/other/java/hdfs3/pom.xml
@@ -6,7 +6,7 @@
<properties>
<seaweedfs.client.version>3.80</seaweedfs.client.version>
- <hadoop.version>3.2.4</hadoop.version>
+ <hadoop.version>3.4.0</hadoop.version>
</properties>
<groupId>com.seaweedfs</groupId>
@@ -171,6 +171,25 @@
<version>${hadoop.version}</version>
<scope>provided</scope>
</dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.13.1</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
+ <version>3.12.4</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>${hadoop.version}</version>
+ <scope>test</scope>
+ <type>test-jar</type>
+ </dependency>
</dependencies>
</project>
diff --git a/other/java/hdfs3/src/test/java/seaweed/hdfs/SeaweedFileSystemConfigTest.java b/other/java/hdfs3/src/test/java/seaweed/hdfs/SeaweedFileSystemConfigTest.java
new file mode 100644
index 000000000..bcc08b8e2
--- /dev/null
+++ b/other/java/hdfs3/src/test/java/seaweed/hdfs/SeaweedFileSystemConfigTest.java
@@ -0,0 +1,90 @@
+package seaweed.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/**
+ * Unit tests for SeaweedFileSystem configuration that don't require a running SeaweedFS instance.
+ *
+ * These tests verify basic properties and constants.
+ */
+public class SeaweedFileSystemConfigTest {
+
+ private SeaweedFileSystem fs;
+ private Configuration conf;
+
+ @Before
+ public void setUp() {
+ fs = new SeaweedFileSystem();
+ conf = new Configuration();
+ }
+
+ @Test
+ public void testScheme() {
+ assertEquals("seaweedfs", fs.getScheme());
+ }
+
+ @Test
+ public void testConstants() {
+ // Test that constants are defined correctly
+ assertEquals("fs.seaweed.filer.host", SeaweedFileSystem.FS_SEAWEED_FILER_HOST);
+ assertEquals("fs.seaweed.filer.port", SeaweedFileSystem.FS_SEAWEED_FILER_PORT);
+ assertEquals("fs.seaweed.filer.port.grpc", SeaweedFileSystem.FS_SEAWEED_FILER_PORT_GRPC);
+ assertEquals(8888, SeaweedFileSystem.FS_SEAWEED_DEFAULT_PORT);
+ assertEquals("fs.seaweed.buffer.size", SeaweedFileSystem.FS_SEAWEED_BUFFER_SIZE);
+ assertEquals(4 * 1024 * 1024, SeaweedFileSystem.FS_SEAWEED_DEFAULT_BUFFER_SIZE);
+ assertEquals("fs.seaweed.replication", SeaweedFileSystem.FS_SEAWEED_REPLICATION);
+ assertEquals("fs.seaweed.volume.server.access", SeaweedFileSystem.FS_SEAWEED_VOLUME_SERVER_ACCESS);
+ assertEquals("fs.seaweed.filer.cn", SeaweedFileSystem.FS_SEAWEED_FILER_CN);
+ }
+
+ @Test
+ public void testWorkingDirectoryPathOperations() {
+ // Test path operations that don't require initialization
+ Path testPath = new Path("/test/path");
+ assertTrue("Path should be absolute", testPath.isAbsolute());
+ assertEquals("/test/path", testPath.toUri().getPath());
+
+ Path childPath = new Path(testPath, "child");
+ assertEquals("/test/path/child", childPath.toUri().getPath());
+ }
+
+ @Test
+ public void testConfigurationProperties() {
+ // Test that configuration can be set and read
+ conf.set(SeaweedFileSystem.FS_SEAWEED_FILER_HOST, "testhost");
+ assertEquals("testhost", conf.get(SeaweedFileSystem.FS_SEAWEED_FILER_HOST));
+
+ conf.setInt(SeaweedFileSystem.FS_SEAWEED_FILER_PORT, 9999);
+ assertEquals(9999, conf.getInt(SeaweedFileSystem.FS_SEAWEED_FILER_PORT, 0));
+
+ conf.setInt(SeaweedFileSystem.FS_SEAWEED_BUFFER_SIZE, 8 * 1024 * 1024);
+ assertEquals(8 * 1024 * 1024, conf.getInt(SeaweedFileSystem.FS_SEAWEED_BUFFER_SIZE, 0));
+
+ conf.set(SeaweedFileSystem.FS_SEAWEED_REPLICATION, "001");
+ assertEquals("001", conf.get(SeaweedFileSystem.FS_SEAWEED_REPLICATION));
+
+ conf.set(SeaweedFileSystem.FS_SEAWEED_VOLUME_SERVER_ACCESS, "publicUrl");
+ assertEquals("publicUrl", conf.get(SeaweedFileSystem.FS_SEAWEED_VOLUME_SERVER_ACCESS));
+
+ conf.set(SeaweedFileSystem.FS_SEAWEED_FILER_CN, "test-cn");
+ assertEquals("test-cn", conf.get(SeaweedFileSystem.FS_SEAWEED_FILER_CN));
+ }
+
+ @Test
+ public void testDefaultBufferSize() {
+ // Test default buffer size constant
+ int expected = 4 * 1024 * 1024; // 4MB
+ assertEquals(expected, SeaweedFileSystem.FS_SEAWEED_DEFAULT_BUFFER_SIZE);
+ }
+
+ @Test
+ public void testDefaultPort() {
+ // Test default port constant
+ assertEquals(8888, SeaweedFileSystem.FS_SEAWEED_DEFAULT_PORT);
+ }
+}
diff --git a/other/java/hdfs3/src/test/java/seaweed/hdfs/SeaweedFileSystemTest.java b/other/java/hdfs3/src/test/java/seaweed/hdfs/SeaweedFileSystemTest.java
new file mode 100644
index 000000000..4ccb21a56
--- /dev/null
+++ b/other/java/hdfs3/src/test/java/seaweed/hdfs/SeaweedFileSystemTest.java
@@ -0,0 +1,379 @@
+package seaweed.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.URI;
+
+import static org.junit.Assert.*;
+
+/**
+ * Unit tests for SeaweedFileSystem.
+ *
+ * These tests verify basic FileSystem operations against a SeaweedFS backend.
+ * Note: These tests require a running SeaweedFS filer instance.
+ *
+ * To run tests, ensure SeaweedFS is running with default ports:
+ * - Filer HTTP: 8888
+ * - Filer gRPC: 18888
+ *
+ * Set environment variable SEAWEEDFS_TEST_ENABLED=true to enable these tests.
+ */
+public class SeaweedFileSystemTest {
+
+ private SeaweedFileSystem fs;
+ private Configuration conf;
+ private static final String TEST_ROOT = "/test-hdfs3";
+ private static final boolean TESTS_ENABLED =
+ "true".equalsIgnoreCase(System.getenv("SEAWEEDFS_TEST_ENABLED"));
+
+ @Before
+ public void setUp() throws Exception {
+ if (!TESTS_ENABLED) {
+ return;
+ }
+
+ conf = new Configuration();
+ conf.set("fs.seaweed.filer.host", "localhost");
+ conf.setInt("fs.seaweed.filer.port", 8888);
+ conf.setInt("fs.seaweed.filer.port.grpc", 18888);
+
+ fs = new SeaweedFileSystem();
+ URI uri = new URI("seaweedfs://localhost:8888/");
+ fs.initialize(uri, conf);
+
+ // Clean up any existing test directory
+ Path testPath = new Path(TEST_ROOT);
+ if (fs.exists(testPath)) {
+ fs.delete(testPath, true);
+ }
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (!TESTS_ENABLED || fs == null) {
+ return;
+ }
+
+ // Clean up test directory
+ Path testPath = new Path(TEST_ROOT);
+ if (fs.exists(testPath)) {
+ fs.delete(testPath, true);
+ }
+
+ fs.close();
+ }
+
+ @Test
+ public void testInitialization() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ assertNotNull(fs);
+ assertEquals("seaweedfs", fs.getScheme());
+ assertNotNull(fs.getUri());
+ assertEquals("/", fs.getWorkingDirectory().toUri().getPath());
+ }
+
+ @Test
+ public void testMkdirs() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path testDir = new Path(TEST_ROOT + "/testdir");
+ assertTrue("Failed to create directory", fs.mkdirs(testDir));
+ assertTrue("Directory should exist", fs.exists(testDir));
+
+ FileStatus status = fs.getFileStatus(testDir);
+ assertTrue("Path should be a directory", status.isDirectory());
+ }
+
+ @Test
+ public void testCreateAndReadFile() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path testFile = new Path(TEST_ROOT + "/testfile.txt");
+ String testContent = "Hello, SeaweedFS!";
+
+ // Create and write to file
+ FSDataOutputStream out = fs.create(testFile, FsPermission.getDefault(),
+ false, 4096, (short) 1, 4 * 1024 * 1024, null);
+ assertNotNull("Output stream should not be null", out);
+ out.write(testContent.getBytes());
+ out.close();
+
+ // Verify file exists
+ assertTrue("File should exist", fs.exists(testFile));
+
+ // Read and verify content
+ FSDataInputStream in = fs.open(testFile, 4096);
+ assertNotNull("Input stream should not be null", in);
+ byte[] buffer = new byte[testContent.length()];
+ int bytesRead = in.read(buffer);
+ in.close();
+
+ assertEquals("Should read all bytes", testContent.length(), bytesRead);
+ assertEquals("Content should match", testContent, new String(buffer));
+ }
+
+ @Test
+ public void testFileStatus() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path testFile = new Path(TEST_ROOT + "/statustest.txt");
+ String content = "test content";
+
+ FSDataOutputStream out = fs.create(testFile);
+ out.write(content.getBytes());
+ out.close();
+
+ FileStatus status = fs.getFileStatus(testFile);
+ assertNotNull("FileStatus should not be null", status);
+ assertFalse("Should not be a directory", status.isDirectory());
+ assertTrue("Should be a file", status.isFile());
+ assertEquals("File length should match", content.length(), status.getLen());
+ assertNotNull("Path should not be null", status.getPath());
+ }
+
+ @Test
+ public void testListStatus() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path testDir = new Path(TEST_ROOT + "/listtest");
+ fs.mkdirs(testDir);
+
+ // Create multiple files
+ for (int i = 0; i < 3; i++) {
+ Path file = new Path(testDir, "file" + i + ".txt");
+ FSDataOutputStream out = fs.create(file);
+ out.write(("content" + i).getBytes());
+ out.close();
+ }
+
+ FileStatus[] statuses = fs.listStatus(testDir);
+ assertNotNull("List should not be null", statuses);
+ assertEquals("Should have 3 files", 3, statuses.length);
+ }
+
+ @Test
+ public void testRename() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path srcFile = new Path(TEST_ROOT + "/source.txt");
+ Path dstFile = new Path(TEST_ROOT + "/destination.txt");
+ String content = "rename test";
+
+ // Create source file
+ FSDataOutputStream out = fs.create(srcFile);
+ out.write(content.getBytes());
+ out.close();
+
+ assertTrue("Source file should exist", fs.exists(srcFile));
+
+ // Rename
+ assertTrue("Rename should succeed", fs.rename(srcFile, dstFile));
+
+ // Verify
+ assertFalse("Source file should not exist", fs.exists(srcFile));
+ assertTrue("Destination file should exist", fs.exists(dstFile));
+
+ // Verify content preserved
+ FSDataInputStream in = fs.open(dstFile);
+ byte[] buffer = new byte[content.length()];
+ in.read(buffer);
+ in.close();
+ assertEquals("Content should be preserved", content, new String(buffer));
+ }
+
+ @Test
+ public void testDelete() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path testFile = new Path(TEST_ROOT + "/deletetest.txt");
+
+ // Create file
+ FSDataOutputStream out = fs.create(testFile);
+ out.write("delete me".getBytes());
+ out.close();
+
+ assertTrue("File should exist before delete", fs.exists(testFile));
+
+ // Delete
+ assertTrue("Delete should succeed", fs.delete(testFile, false));
+ assertFalse("File should not exist after delete", fs.exists(testFile));
+ }
+
+ @Test
+ public void testDeleteDirectory() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path testDir = new Path(TEST_ROOT + "/deletedir");
+ Path testFile = new Path(testDir, "file.txt");
+
+ // Create directory with file
+ fs.mkdirs(testDir);
+ FSDataOutputStream out = fs.create(testFile);
+ out.write("content".getBytes());
+ out.close();
+
+ assertTrue("Directory should exist", fs.exists(testDir));
+ assertTrue("File should exist", fs.exists(testFile));
+
+ // Recursive delete
+ assertTrue("Recursive delete should succeed", fs.delete(testDir, true));
+ assertFalse("Directory should not exist after delete", fs.exists(testDir));
+ assertFalse("File should not exist after delete", fs.exists(testFile));
+ }
+
+ @Test
+ public void testAppend() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path testFile = new Path(TEST_ROOT + "/appendtest.txt");
+ String initialContent = "initial";
+ String appendContent = " appended";
+
+ // Create initial file
+ FSDataOutputStream out = fs.create(testFile);
+ out.write(initialContent.getBytes());
+ out.close();
+
+ // Append
+ FSDataOutputStream appendOut = fs.append(testFile, 4096, null);
+ assertNotNull("Append stream should not be null", appendOut);
+ appendOut.write(appendContent.getBytes());
+ appendOut.close();
+
+ // Verify combined content
+ FSDataInputStream in = fs.open(testFile);
+ byte[] buffer = new byte[initialContent.length() + appendContent.length()];
+ int bytesRead = in.read(buffer);
+ in.close();
+
+ String expected = initialContent + appendContent;
+ assertEquals("Should read all bytes", expected.length(), bytesRead);
+ assertEquals("Content should match", expected, new String(buffer));
+ }
+
+ @Test
+ public void testSetWorkingDirectory() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path originalWd = fs.getWorkingDirectory();
+ assertEquals("Original working directory should be /", "/", originalWd.toUri().getPath());
+
+ Path newWd = new Path(TEST_ROOT);
+ fs.mkdirs(newWd);
+ fs.setWorkingDirectory(newWd);
+
+ Path currentWd = fs.getWorkingDirectory();
+ assertTrue("Working directory should be updated",
+ currentWd.toUri().getPath().contains(TEST_ROOT));
+ }
+
+ @Test
+ public void testSetPermission() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path testFile = new Path(TEST_ROOT + "/permtest.txt");
+
+ // Create file
+ FSDataOutputStream out = fs.create(testFile);
+ out.write("permission test".getBytes());
+ out.close();
+
+ // Set permission
+ FsPermission newPerm = new FsPermission((short) 0644);
+ fs.setPermission(testFile, newPerm);
+
+ FileStatus status = fs.getFileStatus(testFile);
+ assertNotNull("Permission should not be null", status.getPermission());
+ }
+
+ @Test
+ public void testSetOwner() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path testFile = new Path(TEST_ROOT + "/ownertest.txt");
+
+ // Create file
+ FSDataOutputStream out = fs.create(testFile);
+ out.write("owner test".getBytes());
+ out.close();
+
+ // Set owner - this may not fail even if not fully implemented
+ fs.setOwner(testFile, "testuser", "testgroup");
+
+ // Just verify the call doesn't throw an exception
+ FileStatus status = fs.getFileStatus(testFile);
+ assertNotNull("FileStatus should not be null", status);
+ }
+
+ @Test
+ public void testRenameToExistingDirectory() throws Exception {
+ if (!TESTS_ENABLED) {
+ System.out.println("Skipping test - SEAWEEDFS_TEST_ENABLED not set");
+ return;
+ }
+
+ Path srcFile = new Path(TEST_ROOT + "/movefile.txt");
+ Path dstDir = new Path(TEST_ROOT + "/movedir");
+
+ // Create source file and destination directory
+ FSDataOutputStream out = fs.create(srcFile);
+ out.write("move test".getBytes());
+ out.close();
+ fs.mkdirs(dstDir);
+
+ // Rename file to existing directory (should move file into directory)
+ assertTrue("Rename to directory should succeed", fs.rename(srcFile, dstDir));
+
+ // File should be moved into the directory
+ Path expectedLocation = new Path(dstDir, srcFile.getName());
+ assertTrue("File should exist in destination directory", fs.exists(expectedLocation));
+ assertFalse("Source file should not exist", fs.exists(srcFile));
+ }
+}
+