diff options
| author | Chris Lu <chris.lu@gmail.com> | 2021-07-15 11:56:28 -0700 |
|---|---|---|
| committer | Chris Lu <chris.lu@gmail.com> | 2021-07-15 11:56:28 -0700 |
| commit | 18c40686d9c9f72f80a420c7a464d13cf33520d8 (patch) | |
| tree | 10f2c75dd4167960c03d88801f53fcadf466793c | |
| parent | 5a838dbe53acc82e853c206ce3d21e4712e16f73 (diff) | |
| download | seaweedfs-18c40686d9c9f72f80a420c7a464d13cf33520d8.tar.xz seaweedfs-18c40686d9c9f72f80a420c7a464d13cf33520d8.zip | |
s3: multipart upload miss data if file is chunked in 4MB
fix https://github.com/chrislusf/seaweedfs/issues/2195
| -rw-r--r-- | weed/server/filer_server_handlers_write_upload.go | 11 |
1 files changed, 3 insertions, 8 deletions
diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go index 395852517..2275ff1bc 100644 --- a/weed/server/filer_server_handlers_write_upload.go +++ b/weed/server/filer_server_handlers_write_upload.go @@ -28,19 +28,14 @@ var bufPool = sync.Pool{ }, } -func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) { +func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) { - md5Hash := md5.New() + md5Hash = md5.New() var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash)) - chunkOffset := int64(0) - var smallContent []byte - var uploadErr error - var wg sync.WaitGroup var bytesBufferCounter int64 bytesBufferLimitCond := sync.NewCond(new(sync.Mutex)) - var fileChunks []*filer_pb.FileChunk var fileChunksLock sync.Mutex for { @@ -67,7 +62,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque bufPool.Put(bytesBuffer) atomic.AddInt64(&bytesBufferCounter, -1) bytesBufferLimitCond.Signal() - return nil, md5Hash, 0, err, nil + break } if chunkOffset == 0 && !isAppend(r) { if dataSize < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) { |
