aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--other/java/client/pom.xml2
-rw-r--r--other/java/hdfs2/dependency-reduced-pom.xml2
-rw-r--r--other/java/hdfs2/pom.xml2
-rw-r--r--other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java37
-rw-r--r--other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java9
-rw-r--r--other/java/hdfs3/dependency-reduced-pom.xml2
-rw-r--r--other/java/hdfs3/pom.xml2
-rw-r--r--other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java39
-rw-r--r--other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java9
-rw-r--r--weed/server/filer_grpc_server_rename.go4
-rw-r--r--weed/util/constants.go2
12 files changed, 79 insertions, 33 deletions
diff --git a/Makefile b/Makefile
index cce9d586d..ce20a482b 100644
--- a/Makefile
+++ b/Makefile
@@ -27,6 +27,8 @@ clean:
deps:
go get $(GO_FLAGS) -d $(SOURCE_DIR)
+ rm -rf /home/travis/gopath/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace
+ rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace
build: deps
go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR)
diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml
index dc3f8f2ca..b5c7af29e 100644
--- a/other/java/client/pom.xml
+++ b/other/java/client/pom.xml
@@ -4,7 +4,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
- <version>1.1.5</version>
+ <version>1.1.6</version>
<parent>
<groupId>org.sonatype.oss</groupId>
diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml
index bbec239d5..949616f1c 100644
--- a/other/java/hdfs2/dependency-reduced-pom.xml
+++ b/other/java/hdfs2/dependency-reduced-pom.xml
@@ -123,7 +123,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
- <seaweedfs.client.version>1.1.5</seaweedfs.client.version>
+ <seaweedfs.client.version>1.1.6</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>
</project>
diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml
index 4ad14a7f7..e48bf87b0 100644
--- a/other/java/hdfs2/pom.xml
+++ b/other/java/hdfs2/pom.xml
@@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
- <seaweedfs.client.version>1.1.5</seaweedfs.client.version>
+ <seaweedfs.client.version>1.1.6</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
index 7cf76e5e8..d471d8440 100644
--- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
@@ -1,14 +1,7 @@
package seaweed.hdfs;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
-import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -87,6 +80,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
return new FSDataInputStream(inputStream);
} catch (Exception ex) {
+ LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
return null;
}
}
@@ -104,10 +98,36 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
+ LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
return null;
}
}
+ /**
+ * {@inheritDoc}
+ * @throws FileNotFoundException if the parent directory is not present -or
+ * is not a directory.
+ */
+ @Override
+ public FSDataOutputStream createNonRecursive(Path path,
+ FsPermission permission,
+ EnumSet<CreateFlag> flags,
+ int bufferSize,
+ short replication,
+ long blockSize,
+ Progressable progress) throws IOException {
+ Path parent = path.getParent();
+ if (parent != null) {
+ // expect this to raise an exception if there is no parent
+ if (!getFileStatus(parent).isDirectory()) {
+ throw new FileAlreadyExistsException("Not a directory: " + parent);
+ }
+ }
+ return create(path, permission,
+ flags.contains(CreateFlag.OVERWRITE), bufferSize,
+ replication, blockSize, progress);
+ }
+
@Override
public FSDataOutputStream append(Path path, int bufferSize, Progressable progressable) throws IOException {
@@ -118,6 +138,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
+ LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
return null;
}
}
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
index 2ddcd41e8..826b74560 100644
--- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -137,7 +137,7 @@ public class SeaweedFileSystemStore {
if (source.isRoot()) {
return;
}
- LOG.warn("rename lookupEntry source: {}", source);
+ LOG.warn("rename source: {} destination:{}", source, destination);
FilerProto.Entry entry = lookupEntry(source);
if (entry == null) {
LOG.warn("rename non-existing source: {}", source);
@@ -171,10 +171,10 @@ public class SeaweedFileSystemStore {
entry = FilerProto.Entry.newBuilder();
entry.mergeFrom(existingEntry);
entry.getAttributesBuilder().setMtime(now);
+ LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
+ writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
+ replication = existingEntry.getAttributes().getReplication();
}
- LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
- writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
- replication = existingEntry.getAttributes().getReplication();
}
if (entry == null) {
entry = FilerProto.Entry.newBuilder()
@@ -266,4 +266,5 @@ public class SeaweedFileSystemStore {
filerClient.updateEntry(getParentDirectory(path), entryBuilder.build());
}
+
}
diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml
index 71c74b0c8..667713e7c 100644
--- a/other/java/hdfs3/dependency-reduced-pom.xml
+++ b/other/java/hdfs3/dependency-reduced-pom.xml
@@ -123,7 +123,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
- <seaweedfs.client.version>1.1.5</seaweedfs.client.version>
+ <seaweedfs.client.version>1.1.6</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>
</project>
diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml
index f97b0ef5e..078e76757 100644
--- a/other/java/hdfs3/pom.xml
+++ b/other/java/hdfs3/pom.xml
@@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
- <seaweedfs.client.version>1.1.5</seaweedfs.client.version>
+ <seaweedfs.client.version>1.1.6</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
index 7cf76e5e8..c12da8261 100644
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
@@ -1,14 +1,7 @@
package seaweed.hdfs;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
-import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -29,7 +22,7 @@ import java.util.Map;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
+public class SeaweedFileSystem extends FileSystem {
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
@@ -87,6 +80,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
return new FSDataInputStream(inputStream);
} catch (Exception ex) {
+ LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
return null;
}
}
@@ -104,10 +98,36 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
+ LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
return null;
}
}
+ /**
+ * {@inheritDoc}
+ * @throws FileNotFoundException if the parent directory is not present -or
+ * is not a directory.
+ */
+ @Override
+ public FSDataOutputStream createNonRecursive(Path path,
+ FsPermission permission,
+ EnumSet<CreateFlag> flags,
+ int bufferSize,
+ short replication,
+ long blockSize,
+ Progressable progress) throws IOException {
+ Path parent = path.getParent();
+ if (parent != null) {
+ // expect this to raise an exception if there is no parent
+ if (!getFileStatus(parent).isDirectory()) {
+ throw new FileAlreadyExistsException("Not a directory: " + parent);
+ }
+ }
+ return create(path, permission,
+ flags.contains(CreateFlag.OVERWRITE), bufferSize,
+ replication, blockSize, progress);
+ }
+
@Override
public FSDataOutputStream append(Path path, int bufferSize, Progressable progressable) throws IOException {
@@ -118,6 +138,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
+ LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
return null;
}
}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
index 2ddcd41e8..826b74560 100644
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -137,7 +137,7 @@ public class SeaweedFileSystemStore {
if (source.isRoot()) {
return;
}
- LOG.warn("rename lookupEntry source: {}", source);
+ LOG.warn("rename source: {} destination:{}", source, destination);
FilerProto.Entry entry = lookupEntry(source);
if (entry == null) {
LOG.warn("rename non-existing source: {}", source);
@@ -171,10 +171,10 @@ public class SeaweedFileSystemStore {
entry = FilerProto.Entry.newBuilder();
entry.mergeFrom(existingEntry);
entry.getAttributesBuilder().setMtime(now);
+ LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
+ writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
+ replication = existingEntry.getAttributes().getReplication();
}
- LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
- writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
- replication = existingEntry.getAttributes().getReplication();
}
if (entry == null) {
entry = FilerProto.Entry.newBuilder()
@@ -266,4 +266,5 @@ public class SeaweedFileSystemStore {
filerClient.updateEntry(getParentDirectory(path), entryBuilder.build());
}
+
}
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
index b5d2c8163..556e53fd3 100644
--- a/weed/server/filer_grpc_server_rename.go
+++ b/weed/server/filer_grpc_server_rename.go
@@ -73,11 +73,11 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer
return err
}
- println("found", len(entries), "entries under", currentDirPath)
+ // println("found", len(entries), "entries under", currentDirPath)
for _, item := range entries {
lastFileName = item.Name()
- println("processing", lastFileName)
+ // println("processing", lastFileName)
err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name(), events)
if err != nil {
return err
diff --git a/weed/util/constants.go b/weed/util/constants.go
index 36e82a480..830a85057 100644
--- a/weed/util/constants.go
+++ b/weed/util/constants.go
@@ -5,5 +5,5 @@ import (
)
var (
- VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 42)
+ VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 43)
)