aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2020-10-10 00:51:26 -0700
committerChris Lu <chris.lu@gmail.com>2020-10-10 00:57:57 -0700
commite1a3ffcdbf7981473398e9526c6e0d8cb0fb24a0 (patch)
tree261303171c4b2480ce1dc28b7e1f62e5bf4baf99
parent4a15e9c830de1b654515308e5be8380ffa34aefa (diff)
downloadseaweedfs-e1a3ffcdbf7981473398e9526c6e0d8cb0fb24a0.tar.xz
seaweedfs-e1a3ffcdbf7981473398e9526c6e0d8cb0fb24a0.zip
Hadoop: add exponential back off for failed reads
-rw-r--r--other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java33
1 files changed, 30 insertions, 3 deletions
diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
index 045751717..7e5c5cb88 100644
--- a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
+++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
@@ -97,8 +97,35 @@ public class SeaweedRead {
public static byte[] doFetchFullChunkData(ChunkView chunkView, FilerProto.Locations locations) throws IOException {
- HttpGet request = new HttpGet(
- String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
+ byte[] data = null;
+ for (long waitTime = 230L; waitTime < 20 * 1000; waitTime += waitTime / 2) {
+ for (FilerProto.Location location : locations.getLocationsList()) {
+ String url = String.format("http://%s/%s", location.getUrl(), chunkView.fileId);
+ try {
+ data = doFetchOneFullChunkData(chunkView, url);
+ break;
+ } catch (IOException ioe) {
+ LOG.debug("doFetchFullChunkData {} :{}", url, ioe);
+ }
+ }
+ if (data != null) {
+ break;
+ }
+ try {
+ Thread.sleep(waitTime);
+ } catch (InterruptedException e) {
+ }
+ }
+
+ LOG.debug("doFetchFullChunkData fid:{} chunkData.length:{}", chunkView.fileId, data.length);
+
+ return data;
+
+ }
+
+ public static byte[] doFetchOneFullChunkData(ChunkView chunkView, String url) throws IOException {
+
+ HttpGet request = new HttpGet(url);
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "gzip");
@@ -142,7 +169,7 @@ public class SeaweedRead {
data = Gzip.decompress(data);
}
- LOG.debug("doFetchFullChunkData fid:{} chunkData.length:{}", chunkView.fileId, data.length);
+ LOG.debug("doFetchOneFullChunkData url:{} chunkData.length:{}", url, data.length);
return data;