aboutsummaryrefslogtreecommitdiff
path: root/weed/server/filer_server_handlers_write_autochunk.go
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2021-01-10 23:14:46 -0800
committerChris Lu <chris.lu@gmail.com>2021-01-10 23:14:46 -0800
commit1efb51ba843d809df5617cf9c1c288b7af5d10d4 (patch)
tree4a109bb66f9ea0fde40dca1d0a82b171d044a028 /weed/server/filer_server_handlers_write_autochunk.go
parentf0d3b3bf9397fe5bfb64fd0d46c40055a950d9cf (diff)
downloadseaweedfs-1efb51ba843d809df5617cf9c1c288b7af5d10d4.tar.xz
seaweedfs-1efb51ba843d809df5617cf9c1c288b7af5d10d4.zip
filer: change to saveToFilerLimit from cacheToFilerLimit
short circuit saving small files to volume server
Diffstat (limited to 'weed/server/filer_server_handlers_write_autochunk.go')
-rw-r--r--weed/server/filer_server_handlers_write_autochunk.go14
1 files changed, 8 insertions, 6 deletions
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index fe4e68140..43884bfde 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -207,7 +207,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
chunkOffset := int64(0)
- var smallContent, content []byte
+ var smallContent []byte
for {
limitedReader := io.LimitReader(partReader, int64(chunkSize))
@@ -216,6 +216,13 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
if err != nil {
return nil, nil, 0, err, nil
}
+ if chunkOffset == 0 {
+ if len(data) < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
+ smallContent = data
+ chunkOffset += int64(len(data))
+ break
+ }
+ }
dataReader := util.NewBytesReader(data)
// retry to assign a different file id
@@ -242,8 +249,6 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
return nil, nil, 0, uploadErr, nil
}
- content = data
-
// if last chunk exhausted the reader exactly at the border
if uploadResult.Size == 0 {
break
@@ -263,9 +268,6 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
}
}
- if chunkOffset < fs.option.CacheToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && chunkOffset < 4*1024 {
- smallContent = content
- }
return fileChunks, md5Hash, chunkOffset, nil, smallContent
}