aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbanjiaojuhao <banjiaojuhao@qq.com>2022-02-22 00:15:00 +0800
committerbanjiaojuhao <banjiaojuhao@qq.com>2022-02-22 00:15:00 +0800
commit6ab09e9071048684c3ab52db378446b7b1e78a66 (patch)
tree06245f0d87c3a81c5edc756427e769515a9c84fb
parent497ebbbd45cb1a095b4d061258871ce63c706e61 (diff)
downloadseaweedfs-6ab09e9071048684c3ab52db378446b7b1e78a66.tar.xz
seaweedfs-6ab09e9071048684c3ab52db378446b7b1e78a66.zip
filer_http: support uploading file with offset
-rw-r--r--weed/server/filer_server_handlers_write_autochunk.go21
-rw-r--r--weed/server/filer_server_handlers_write_upload.go20
2 files changed, 32 insertions, 9 deletions
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index 61d30372b..be6e0c652 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -126,10 +126,6 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter
return
}
-func isAppend(r *http.Request) bool {
- return r.URL.Query().Get("op") == "append"
-}
-
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) {
// detect file mode
@@ -161,8 +157,11 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
var entry *filer.Entry
var mergedChunks []*filer_pb.FileChunk
+
+ isAppend := r.URL.Query().Get("op") == "append"
+ isOffsetWrite := fileChunks[0].Offset > 0
// when it is an append
- if isAppend(r) {
+ if isAppend || isOffsetWrite {
existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path))
if findErr != nil && findErr != filer_pb.ErrNotFound {
glog.V(0).Infof("failing to find %s: %v", path, findErr)
@@ -173,11 +172,13 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
entry.Mtime = time.Now()
entry.Md5 = nil
// adjust chunk offsets
- for _, chunk := range fileChunks {
- chunk.Offset += int64(entry.FileSize)
+ if isAppend {
+ for _, chunk := range fileChunks {
+ chunk.Offset += int64(entry.FileSize)
+ }
+ entry.FileSize += uint64(chunkOffset)
}
mergedChunks = append(entry.Chunks, fileChunks...)
- entry.FileSize += uint64(chunkOffset)
// TODO
if len(entry.Content) > 0 {
@@ -215,6 +216,10 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
return
}
entry.Chunks = mergedChunks
+ if isOffsetWrite {
+ entry.Md5 = nil
+ entry.FileSize = entry.Size()
+ }
filerResult = &FilerPostResult{
Name: fileName,
diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go
index a7716ef02..294a97582 100644
--- a/weed/server/filer_server_handlers_write_upload.go
+++ b/weed/server/filer_server_handlers_write_upload.go
@@ -3,10 +3,12 @@ package weed_server
import (
"bytes"
"crypto/md5"
+ "fmt"
"hash"
"io"
"net/http"
"sort"
+ "strconv"
"strings"
"sync"
"sync/atomic"
@@ -28,6 +30,22 @@ var bufPool = sync.Pool{
}
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) {
+ query := r.URL.Query()
+ isAppend := query.Get("op") == "append"
+
+ if query.Has("offset") {
+ offset := query.Get("offset")
+ offsetInt, err := strconv.ParseInt(offset, 10, 64)
+ if err != nil || offsetInt < 0 {
+ err = fmt.Errorf("invalid 'offset': '%s'", offset)
+ return nil, nil, 0, err, nil
+ }
+ if isAppend && offsetInt > 0 {
+ err = fmt.Errorf("cannot set offset when op=append")
+ return nil, nil, 0, err, nil
+ }
+ chunkOffset = offsetInt
+ }
md5Hash = md5.New()
var partReader = io.NopCloser(io.TeeReader(reader, md5Hash))
@@ -63,7 +81,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
bytesBufferLimitCond.Signal()
break
}
- if chunkOffset == 0 && !isAppend(r) {
+ if chunkOffset == 0 && !isAppend {
if dataSize < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) {
chunkOffset += dataSize
smallContent = make([]byte, dataSize)