diff options
28 files changed, 119 insertions, 102 deletions
@@ -39,7 +39,7 @@ require ( github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 github.com/gorilla/websocket v1.4.1 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 + github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect github.com/jcmturner/gofork v1.0.0 // indirect github.com/jinzhu/copier v0.2.8 @@ -90,7 +90,7 @@ require ( gocloud.dev/pubsub/rabbitpubsub v0.20.0 golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sync v0.0.0-20200930132711-30421366ff76 // indirect golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd golang.org/x/tools v0.0.0-20200608174601-1b747fd94509 google.golang.org/api v0.26.0 @@ -969,8 +969,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0= golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/weed/Makefile b/weed/Makefile index 8f1257d09..edc0bf544 100644 --- a/weed/Makefile +++ b/weed/Makefile @@ -16,7 +16,7 @@ debug_shell: debug_mount: go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets debug_server: go build -gcflags="all=-N -l" diff --git a/weed/command/filer_cat.go b/weed/command/filer_cat.go index c4281feba..a46098b04 100644 --- a/weed/command/filer_cat.go +++ b/weed/command/filer_cat.go @@ -110,7 +110,7 @@ func runFilerCat(cmd *Command, args []string) bool { filerCat.filerClient = client - return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) + return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64) }) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index e7a9b107f..a9b715f90 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -299,7 +299,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err var assignResult *filer_pb.AssignVolumeResponse var assignError error - if task.fileMode & os.ModeDir == 0 && task.fileSize > 0 { + if task.fileMode&os.ModeDir == 0 && task.fileSize > 0 { mimeType = detectMimeType(f) data, err := ioutil.ReadAll(f) diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go index a91faeb24..c9f75a5ca 100644 --- a/weed/filer/filer_on_meta_event.go +++ b/weed/filer/filer_on_meta_event.go @@ -52,7 +52,7 @@ func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataR func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) { var buf bytes.Buffer - err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64, false) + err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64) if err != nil { return nil, err } diff --git a/weed/filer/filer_search.go b/weed/filer/filer_search.go index f43312cfa..2ee29be25 100644 --- a/weed/filer/filer_search.go +++ b/weed/filer/filer_search.go @@ -58,7 +58,7 @@ func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) { - if len(restNamePattern) == 0 && len(namePatternExclude) == 0{ + if len(restNamePattern) == 0 && len(namePatternExclude) == 0 { lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc) return 0, lastFileName, err } diff --git a/weed/filer/read_write.go b/weed/filer/read_write.go index d92d526d5..c4c90fb63 100644 --- a/weed/filer/read_write.go +++ b/weed/filer/read_write.go @@ -27,7 +27,7 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed return err } - return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) + return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64) } diff --git a/weed/filer/reader_at_test.go b/weed/filer/reader_at_test.go index a31319082..f8e4727ce 100644 --- a/weed/filer/reader_at_test.go +++ b/weed/filer/reader_at_test.go @@ -21,7 +21,7 @@ func (m *mockChunkCache) GetChunk(fileId string, minSize uint64) (data []byte) { return data } -func(m *mockChunkCache) GetChunkSlice(fileId string, offset, length uint64) []byte { +func (m *mockChunkCache) GetChunkSlice(fileId string, offset, length uint64) []byte { return nil } diff --git a/weed/filer/stream.go b/weed/filer/stream.go index 661a210ea..2c25b8722 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -3,7 +3,6 @@ package filer import ( "bytes" "fmt" - "golang.org/x/sync/errgroup" "io" "math" "strings" @@ -14,7 +13,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/wdclient" ) -func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64, isCheck bool) error { +func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error { glog.V(9).Infof("start to stream content for chunks: %+v\n", chunks) chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size) @@ -34,17 +33,6 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c fileId2Url[chunkView.FileId] = urlStrings } - if isCheck { - // Pre-check all chunkViews urls - gErr := new(errgroup.Group) - CheckAllChunkViews(chunkViews, &fileId2Url, gErr) - if err := gErr.Wait(); err != nil { - glog.Errorf("check all chunks: %v", err) - return fmt.Errorf("check all chunks: %v", err) - } - return nil - } - for _, chunkView := range chunkViews { urlStrings := fileId2Url[chunkView.FileId] @@ -53,7 +41,6 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c glog.Errorf("read chunk: %v", err) return fmt.Errorf("read chunk: %v", err) } - _, err = w.Write(data) if err != nil { glog.Errorf("write chunk: %v", err) @@ -65,17 +52,6 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c } -func CheckAllChunkViews(chunkViews []*ChunkView, fileId2Url *map[string][]string, gErr *errgroup.Group) { - for _, chunkView := range chunkViews { - urlStrings := (*fileId2Url)[chunkView.FileId] - glog.V(9).Infof("Check chunk: %+v\n url: %v", chunkView, urlStrings) - gErr.Go(func() error { - _, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) - return err - }) - } -} - // ---------------- ReadAllReader ---------------------------------- func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) { diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 79fc10442..09d5fd449 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -176,7 +176,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, }, } file.dirtyMetadata = true - fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) + fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid, req.Flags&fuse.OpenWriteOnly > 0) return file, fh, nil } diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 8888cff96..1719d68e6 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -13,6 +13,7 @@ import ( type ContinuousDirtyPages struct { intervals *ContinuousIntervals f *File + fh *FileHandle writeWaitGroup sync.WaitGroup chunkAddLock sync.Mutex lastErr error @@ -94,7 +95,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, defer pages.writeWaitGroup.Done() reader = io.LimitReader(reader, size) - chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset) + chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath(), pages.fh.writeOnly)(reader, pages.f.Name, offset) if err != nil { glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err) pages.lastErr = err diff --git a/weed/filesys/file.go b/weed/filesys/file.go index bb57988cd..122aeeef4 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -97,7 +97,7 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op glog.V(4).Infof("file %v open %+v", file.fullpath(), req) - handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid) + handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid, req.Flags&fuse.OpenWriteOnly > 0) resp.Handle = fuse.HandleID(handle.handle) @@ -267,7 +267,7 @@ func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, er file.wfs.handlesLock.Unlock() entry = file.entry if found { - glog.V(4).Infof("maybeLoadEntry found opened file %s/%s: %v %v", file.dir.FullPath(), file.Name, handle.f.entry, entry) + glog.V(4).Infof("maybeLoadEntry found opened file %s/%s", file.dir.FullPath(), file.Name) entry = handle.f.entry } diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 27ffab6e1..8cbaf6fd2 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -32,7 +32,7 @@ type FileHandle struct { NodeId fuse.NodeID // file or directory the request is about Uid uint32 // user ID of process making request Gid uint32 // group ID of process making request - + writeOnly bool } func newFileHandle(file *File, uid, gid uint32) *FileHandle { @@ -42,6 +42,7 @@ func newFileHandle(file *File, uid, gid uint32) *FileHandle { Uid: uid, Gid: gid, } + fh.dirtyPages.fh = fh entry := fh.f.getEntry() if entry != nil { entry.Attributes.FileSize = filer.FileSize(entry) @@ -289,7 +290,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error { manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.Chunks) chunks, _ := filer.CompactFileChunks(fh.f.wfs.LookupFn(), nonManifestChunks) - chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks) + chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath(), fh.writeOnly), chunks) if manifestErr != nil { // not good, but should be ok glog.V(0).Infof("MaybeManifestize: %v", manifestErr) diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go index 1ca3b16d5..9af25ae29 100644 --- a/weed/filesys/meta_cache/meta_cache_init.go +++ b/weed/filesys/meta_cache/meta_cache_init.go @@ -17,9 +17,9 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full glog.V(4).Infof("ReadDirAllEntries %s ...", path) util.Retry("ReadDirAllEntries", func() error { - err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error { - entry := filer.FromPbEntry(string(dirPath), pbEntry) - if IsHiddenSystemEntry(string(dirPath), entry.Name()) { + err = filer_pb.ReadDirAllEntries(client, path, "", func(pbEntry *filer_pb.Entry, isLast bool) error { + entry := filer.FromPbEntry(string(path), pbEntry) + if IsHiddenSystemEntry(string(path), entry.Name()) { return nil } if err := mc.doInsertEntry(context.Background(), entry); err != nil { @@ -35,7 +35,7 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full }) if err != nil { - err = fmt.Errorf("list %s: %v", dirPath, err) + err = fmt.Errorf("list %s: %v", path, err) } return diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 832925bc1..fcce7d9cc 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -138,7 +138,7 @@ func (wfs *WFS) Root() (fs.Node, error) { return wfs.root, nil } -func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) { +func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32, writeOnly bool) (fileHandle *FileHandle) { fullpath := file.fullpath() glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid) @@ -150,6 +150,9 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand wfs.handlesLock.Unlock() if found && existingHandle != nil { existingHandle.f.isOpen++ + if existingHandle.writeOnly { + existingHandle.writeOnly = writeOnly + } glog.V(4).Infof("Acquired Handle %s open %d", fullpath, existingHandle.f.isOpen) return existingHandle } @@ -157,6 +160,7 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand entry, _ := file.maybeLoadEntry(context.Background()) file.entry = entry fileHandle = newFileHandle(file, uid, gid) + fileHandle.writeOnly = writeOnly file.isOpen++ wfs.handlesLock.Lock() @@ -266,6 +270,7 @@ func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType { } type NodeWithId uint64 + func (n NodeWithId) Id() uint64 { return uint64(n) } diff --git a/weed/filesys/wfs_write.go b/weed/filesys/wfs_write.go index dbec3bebc..9d2ce26ec 100644 --- a/weed/filesys/wfs_write.go +++ b/weed/filesys/wfs_write.go @@ -13,7 +13,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType { +func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath, writeOnly bool) filer.SaveDataAsChunkFunctionType { return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) { var fileId, host string @@ -67,7 +67,9 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error) } - wfs.chunkCache.SetChunk(fileId, data) + if !writeOnly { + wfs.chunkCache.SetChunk(fileId, data) + } chunk = uploadResult.ToPbFileChunk(fileId, offset) return chunk, collection, replication, nil diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index f1a539ac5..a8dc34b54 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -326,11 +326,7 @@ func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) { for k, v := range proxyResponse.Header { w.Header()[k] = v } - if proxyResponse.Header.Get("Content-Range") != "" && proxyResponse.StatusCode == 200 { - w.WriteHeader(http.StatusPartialContent) - } else { - w.WriteHeader(proxyResponse.StatusCode) - } + w.WriteHeader(proxyResponse.StatusCode) io.Copy(w, proxyResponse.Body) } diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index 739cdd8f9..66c66d280 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -63,6 +63,14 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } + + if len(response.Contents) == 0 { + if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists { + writeErrorResponse(w, s3err.ErrNoSuchBucket, r.URL) + return + } + } + responseV2 := &ListBucketResultV2{ XMLName: response.XMLName, Name: response.Name, @@ -106,6 +114,13 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ return } + if len(response.Contents) == 0 { + if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists { + writeErrorResponse(w, s3err.ErrNoSuchBucket, r.URL) + return + } + } + writeSuccessResponseXML(w, encodeResponse(response)) } diff --git a/weed/server/common.go b/weed/server/common.go index 5c5f1b8eb..9001a3b33 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -234,12 +234,12 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file } } -func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { +func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64, httpStatusCode int) error) { rangeReq := r.Header.Get("Range") if rangeReq == "" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if err := writeFn(w, 0, totalSize); err != nil { + if err := writeFn(w, 0, totalSize, 0); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -279,7 +279,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) w.Header().Set("Content-Range", ra.contentRange(totalSize)) - err = writeFn(w, ra.start, ra.length) + err = writeFn(w, ra.start, ra.length, http.StatusPartialContent) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -307,7 +307,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 pw.CloseWithError(e) return } - if e = writeFn(part, ra.start, ra.length); e != nil { + if e = writeFn(part, ra.start, ra.length, 0); e != nil { pw.CloseWithError(e) return } diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 6bc09e953..1d90871d8 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -131,9 +131,6 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, if r.Method == "HEAD" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { - return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, true) - }) return } @@ -153,7 +150,10 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, } } - processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error { + if httpStatusCode != 0 { + w.WriteHeader(httpStatusCode) + } if offset+size <= int64(len(entry.Content)) { _, err := writer.Write(entry.Content[offset : offset+size]) if err != nil { @@ -161,7 +161,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, } return err } - return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, false) + return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size) }) } diff --git a/weed/server/filer_server_handlers_tagging.go b/weed/server/filer_server_handlers_tagging.go index 50b3a2c06..70b5327d6 100644 --- a/weed/server/filer_server_handlers_tagging.go +++ b/weed/server/filer_server_handlers_tagging.go @@ -78,11 +78,27 @@ func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Reque existingEntry.Extended = make(map[string][]byte) } + // parse out tags to be deleted + toDelete := strings.Split(r.URL.Query().Get("tagging"), ",") + deletions := make(map[string]struct{}) + for _, deletion := range toDelete { + deletions[deletion] = struct{}{} + } + + // delete all tags or specific tags hasDeletion := false for header, _ := range existingEntry.Extended { if strings.HasPrefix(header, needle.PairNamePrefix) { - delete(existingEntry.Extended, header) - hasDeletion = true + if len(deletions) == 0 { + delete(existingEntry.Extended, header) + hasDeletion = true + } else { + tag := header[len(needle.PairNamePrefix):] + if _, found := deletions[tag]; found { + delete(existingEntry.Extended, header) + hasDeletion = true + } + } } } diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go index b15deb9d1..540def563 100644 --- a/weed/server/filer_server_handlers_write_upload.go +++ b/weed/server/filer_server_handlers_write_upload.go @@ -72,10 +72,12 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque if uploadResult.Size == 0 { break } - uploadedMd5 := util.Base64Md5ToBytes(uploadResult.ContentMd5) - readedMd5 := md5Hash.Sum(nil) - if !bytes.Equal(uploadedMd5, readedMd5) { - glog.Errorf("md5 %x does not match %x uploaded chunk %s to the volume server", readedMd5, uploadedMd5, uploadResult.Name) + if chunkOffset == 0 { + uploadedMd5 := util.Base64Md5ToBytes(uploadResult.ContentMd5) + readedMd5 := md5Hash.Sum(nil) + if !bytes.Equal(uploadedMd5, readedMd5) { + glog.Errorf("md5 %x does not match %x uploaded chunk %s to the volume server", readedMd5, uploadedMd5, uploadResult.Name) + } } // Save to chunk manifest structure diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go index b87de4b5b..f8d1b7fda 100644 --- a/weed/server/volume_grpc_vacuum.go +++ b/weed/server/volume_grpc_vacuum.go @@ -44,19 +44,14 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv resp := &volume_server_pb.VacuumVolumeCommitResponse{} - err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId)) + readOnly, err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId)) if err != nil { glog.Errorf("commit volume %d: %v", req.VolumeId, err) } else { glog.V(1).Infof("commit volume %d", req.VolumeId) } - if err == nil { - if vs.store.GetVolume(needle.VolumeId(req.VolumeId)).IsReadOnly() { - resp.IsReadOnly = true - } - } - + resp.IsReadOnly = readOnly return resp, err } diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 3e977cfd4..2db46ac9b 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -27,7 +27,7 @@ var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`) func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) { - glog.V(9).Info(r.Method + " " + r.URL.Path + " " + r.Header.Get("Range")) + // println(r.Method + " " + r.URL.Path) stats.VolumeServerRequestCounter.WithLabelValues("get").Inc() start := time.Now() @@ -261,10 +261,13 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re return nil } - processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error { if _, e = rs.Seek(offset, 0); e != nil { return e } + if httpStatusCode != 0 { + w.WriteHeader(httpStatusCode) + } _, e = io.CopyN(writer, rs, size) return e }) diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index 634cb11e2..8480bab06 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -63,6 +63,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr collection := encodeCommand.String("collection", "", "the collection name") fullPercentage := encodeCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size") quietPeriod := encodeCommand.Duration("quietFor", time.Hour, "select volumes without no writes for this period") + parallelCopy := encodeCommand.Bool("parallelCopy", true, "copy shards in parallel") if err = encodeCommand.Parse(args); err != nil { return nil } @@ -71,7 +72,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr // volumeId is provided if vid != 0 { - return doEcEncode(commandEnv, *collection, vid) + return doEcEncode(commandEnv, *collection, vid, *parallelCopy) } // apply to all volumes in the collection @@ -81,7 +82,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr } fmt.Printf("ec encode volumes: %v\n", volumeIds) for _, vid := range volumeIds { - if err = doEcEncode(commandEnv, *collection, vid); err != nil { + if err = doEcEncode(commandEnv, *collection, vid, *parallelCopy); err != nil { return err } } @@ -89,7 +90,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } -func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) { +func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId, parallelCopy bool) (err error) { // find volume location locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) if !found { @@ -111,7 +112,7 @@ func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) } // balance the ec shards to current cluster - err = spreadEcShards(commandEnv, vid, collection, locations) + err = spreadEcShards(commandEnv, vid, collection, locations, parallelCopy) if err != nil { return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err) } @@ -157,7 +158,7 @@ func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, } -func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { +func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location, parallelCopy bool) (err error) { allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "") if err != nil { @@ -176,7 +177,7 @@ func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection allocatedEcIds := balancedEcDistribution(allocatedDataNodes) // ask the data nodes to copy from the source volume server - copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0]) + copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0], parallelCopy) if err != nil { return err } @@ -206,30 +207,36 @@ func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection } -func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { +func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location, parallelCopy bool) (actuallyCopied []uint32, err error) { fmt.Printf("parallelCopyEcShardsFromSource %d %s\n", volumeId, existingLocation.Url) - // parallelize - shardIdChan := make(chan []uint32, len(targetServers)) var wg sync.WaitGroup + shardIdChan := make(chan []uint32, len(targetServers)) + copyFunc := func(server *EcNode, allocatedEcShardIds []uint32) { + defer wg.Done() + copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server, + allocatedEcShardIds, volumeId, collection, existingLocation.Url) + if copyErr != nil { + err = copyErr + } else { + shardIdChan <- copiedShardIds + server.addEcVolumeShards(volumeId, collection, copiedShardIds) + } + } + + // maybe parallelize for i, server := range targetServers { if len(allocatedEcIds[i]) <= 0 { continue } wg.Add(1) - go func(server *EcNode, allocatedEcShardIds []uint32) { - defer wg.Done() - copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server, - allocatedEcShardIds, volumeId, collection, existingLocation.Url) - if copyErr != nil { - err = copyErr - } else { - shardIdChan <- copiedShardIds - server.addEcVolumeShards(volumeId, collection, copiedShardIds) - } - }(server, allocatedEcIds[i]) + if parallelCopy { + go copyFunc(server, allocatedEcIds[i]) + } else { + copyFunc(server, allocatedEcIds[i]) + } } wg.Wait() close(shardIdChan) diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index df43d93dc..3c5e13663 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -52,7 +52,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write return err } - return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) + return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64) }) diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go index 32666a417..fe2033070 100644 --- a/weed/storage/store_vacuum.go +++ b/weed/storage/store_vacuum.go @@ -25,11 +25,11 @@ func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compaction } return fmt.Errorf("volume id %d is not found during compact", vid) } -func (s *Store) CommitCompactVolume(vid needle.VolumeId) error { +func (s *Store) CommitCompactVolume(vid needle.VolumeId) (bool, error) { if v := s.findVolume(vid); v != nil { - return v.CommitCompact() + return v.IsReadOnly(), v.CommitCompact() } - return fmt.Errorf("volume id %d is not found during commit compact", vid) + return false, fmt.Errorf("volume id %d is not found during commit compact", vid) } func (s *Store) CommitCleanupVolume(vid needle.VolumeId) error { if v := s.findVolume(vid); v != nil { |
