diff options
Diffstat (limited to 'go/weed')
| -rw-r--r-- | go/weed/download.go | 94 | ||||
| -rw-r--r-- | go/weed/weed_server/common.go | 2 | ||||
| -rw-r--r-- | go/weed/weed_server/volume_server_handlers_read.go | 146 | ||||
| -rw-r--r-- | go/weed/weed_server/volume_server_handlers_write.go | 64 |
4 files changed, 221 insertions, 85 deletions
diff --git a/go/weed/download.go b/go/weed/download.go index 3c55b3a34..dfe4f88b4 100644 --- a/go/weed/download.go +++ b/go/weed/download.go @@ -3,9 +3,11 @@ package main import ( "fmt" "io" - "io/ioutil" "os" "path" + + "io/ioutil" + "strings" "github.com/chrislusf/seaweedfs/go/operation" @@ -43,50 +45,76 @@ var cmdDownload = &Command{ func runDownload(cmd *Command, args []string) bool { for _, fid := range args { - filename, content, e := fetchFileId(*d.server, fid) - if e != nil { - fmt.Println("Fetch Error:", e) - continue + if e := downloadToFile(*d.server, fid, *d.dir); e != nil { + fmt.Println("Download Error: ", fid, e) } - if filename == "" { - filename = fid + } + return true +} + +func downloadToFile(server, fileId, saveDir string) error { + fileUrl, lookupError := operation.LookupFileId(server, fileId) + if lookupError != nil { + return lookupError + } + filename, rc, err := util.DownloadUrl(fileUrl) + if err != nil { + return err + } + defer rc.Close() + if filename == "" { + filename = fileId + } + isFileList := false + if strings.HasSuffix(filename, "-list") { + // old command compatible + isFileList = true + filename = filename[0 : len(filename)-len("-list")] + } + f, err := os.OpenFile(path.Join(saveDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) + if err != nil { + return err + } + defer f.Close() + if isFileList { + content, err := ioutil.ReadAll(rc) + if err != nil { + return err } - if strings.HasSuffix(filename, "-list") { - filename = filename[0 : len(filename)-len("-list")] - fids := strings.Split(string(content), "\n") - f, err := os.OpenFile(path.Join(*d.dir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) - if err != nil { - fmt.Println("File Creation Error:", e) - continue + fids := strings.Split(string(content), "\n") + for _, partId := range fids { + var n int + _, part, err := fetchContent(*d.server, partId) + if err == nil { + n, err = f.Write(part) } - defer f.Close() - for _, partId := range fids { - var n int - _, part, err := fetchFileId(*d.server, partId) - if err == nil { - n, err = f.Write(part) - } - if err == nil && n < len(part) { - err = io.ErrShortWrite - } - if err != nil { - fmt.Println("File Write Error:", err) - break - } + if err == nil && n < len(part) { + err = io.ErrShortWrite } - } else { - ioutil.WriteFile(path.Join(*d.dir, filename), content, os.ModePerm) + if err != nil { + return err + } + } + } else { + if _, err = io.Copy(f, rc); err != nil { + return err } + } - return true + return nil } -func fetchFileId(server string, fileId string) (filename string, content []byte, e error) { +func fetchContent(server string, fileId string) (filename string, content []byte, e error) { fileUrl, lookupError := operation.LookupFileId(server, fileId) if lookupError != nil { return "", nil, lookupError } - filename, content, e = util.DownloadUrl(fileUrl) + var rc io.ReadCloser + if filename, rc, e = util.DownloadUrl(fileUrl); e != nil { + return "", nil, e + } + content, e = ioutil.ReadAll(rc) + rc.Close() return } diff --git a/go/weed/weed_server/common.go b/go/weed/weed_server/common.go index 4ad9824b1..a7fa2de53 100644 --- a/go/weed/weed_server/common.go +++ b/go/weed/weed_server/common.go @@ -86,7 +86,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, isGzipped, lastModified, _, pe := storage.ParseUpload(r) + fname, data, mimeType, isGzipped, lastModified, _, _, pe := storage.ParseUpload(r) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return diff --git a/go/weed/weed_server/volume_server_handlers_read.go b/go/weed/weed_server/volume_server_handlers_read.go index d569f5510..2aa0fc656 100644 --- a/go/weed/weed_server/volume_server_handlers_read.go +++ b/go/weed/weed_server/volume_server_handlers_read.go @@ -9,6 +9,10 @@ import ( "strings" "time" + "path" + + "bytes" + "github.com/chrislusf/seaweedfs/go/glog" "github.com/chrislusf/seaweedfs/go/images" "github.com/chrislusf/seaweedfs/go/operation" @@ -81,36 +85,32 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) return } w.Header().Set("Etag", etag) + + if vs.tryHandleChunkedFile(n, filename, w, r) { + return + } + if n.NameSize > 0 && filename == "" { filename = string(n.Name) - dotIndex := strings.LastIndex(filename, ".") - if dotIndex > 0 { - ext = filename[dotIndex:] + if ext == "" { + ext = path.Ext(filename) } } mtype := "" - if ext != "" { - mtype = mime.TypeByExtension(ext) - } if n.MimeSize > 0 { mt := string(n.Mime) if !strings.HasPrefix(mt, "application/octet-stream") { mtype = mt } } - if mtype != "" { - w.Header().Set("Content-Type", mtype) - } - if filename != "" { - w.Header().Set("Content-Disposition", "filename=\""+fileNameEscaper.Replace(filename)+"\"") - } + if ext != ".gz" { if n.IsGzipped() { if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { w.Header().Set("Content-Encoding", "gzip") } else { - if n.Data, err = storage.UnGzipData(n.Data); err != nil { - glog.V(0).Infoln("lookup error:", err, r.URL.Path) + if n.Data, err = operation.UnGzipData(n.Data); err != nil { + glog.V(0).Infoln("ungzip error:", err, r.URL.Path) } } } @@ -126,38 +126,94 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) n.Data, _, _ = images.Resized(ext, n.Data, width, height) } + if e := writeResponseContent(filename, mtype, bytes.NewReader(n.Data), w, r); e != nil { + glog.V(2).Infoln("response write error:", e) + } +} + +func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) { + if !n.IsChunkedManifest() { + return false + } + raw, _ := strconv.ParseBool(r.FormValue("raw")) + if raw { + return false + } + processed = true + + chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped()) + if e != nil { + glog.V(0).Infof("load chunked manifest (%s) error: %s", r.URL.Path, e.Error()) + return false + } + if fileName == "" && chunkManifest.Name != "" { + fileName = chunkManifest.Name + } + mType := "" + if chunkManifest.Mime != "" { + mt := chunkManifest.Mime + if !strings.HasPrefix(mt, "application/octet-stream") { + mType = mt + } + } + + w.Header().Set("X-File-Store", "chunked") + + chunkedFileReader := &operation.ChunkedFileReader{ + Manifest: chunkManifest, + Master: vs.GetMasterNode(), + } + defer chunkedFileReader.Close() + if e := writeResponseContent(fileName, mType, chunkedFileReader, w, r); e != nil { + glog.V(2).Infoln("response write error:", e) + } + return +} + +func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error { + totalSize, e := rs.Seek(0, 2) + if mimeType == "" { + if ext := path.Ext(filename); ext != "" { + mimeType = mime.TypeByExtension(ext) + } + } + if mimeType != "" { + w.Header().Set("Content-Type", mimeType) + } + if filename != "" { + w.Header().Set("Content-Disposition", `filename="`+fileNameEscaper.Replace(filename)+`"`) + } w.Header().Set("Accept-Ranges", "bytes") if r.Method == "HEAD" { - w.Header().Set("Content-Length", strconv.Itoa(len(n.Data))) - return + w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) + return nil } rangeReq := r.Header.Get("Range") if rangeReq == "" { - w.Header().Set("Content-Length", strconv.Itoa(len(n.Data))) - if _, e = w.Write(n.Data); e != nil { - glog.V(0).Infoln("response write error:", e) + w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) + if _, e = rs.Seek(0, 0); e != nil { + return e } - return + _, e = io.Copy(w, rs) + return e } //the rest is dealing with partial content request //mostly copy from src/pkg/net/http/fs.go - size := int64(len(n.Data)) - ranges, err := parseRange(rangeReq, size) + ranges, err := parseRange(rangeReq, totalSize) if err != nil { http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return + return nil } - if sumRangesSize(ranges) > size { + if sumRangesSize(ranges) > totalSize { // The total number of bytes in all the ranges // is larger than the size of the file by // itself, so this is probably an attack, or a // dumb client. Ignore the range request. - ranges = nil - return + return nil } if len(ranges) == 0 { - return + return nil } if len(ranges) == 1 { // RFC 2616, Section 14.16: @@ -173,21 +229,23 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) // be sent using the multipart/byteranges media type." ra := ranges[0] w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) - w.Header().Set("Content-Range", ra.contentRange(size)) + w.Header().Set("Content-Range", ra.contentRange(totalSize)) w.WriteHeader(http.StatusPartialContent) - if _, e = w.Write(n.Data[ra.start : ra.start+ra.length]); e != nil { - glog.V(0).Infoln("response write error:", e) + if _, e = rs.Seek(ra.start, 0); e != nil { + return e } - return + + _, e = io.CopyN(w, rs, ra.length) + return e } - // process mulitple ranges + // process multiple ranges for _, ra := range ranges { - if ra.start > size { + if ra.start > totalSize { http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return + return nil } } - sendSize := rangesMIMESize(ranges, mtype, size) + sendSize := rangesMIMESize(ranges, mimeType, totalSize) pr, pw := io.Pipe() mw := multipart.NewWriter(pw) w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) @@ -195,13 +253,17 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. go func() { for _, ra := range ranges { - part, err := mw.CreatePart(ra.mimeHeader(mtype, size)) - if err != nil { - pw.CloseWithError(err) + part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) + if e != nil { + pw.CloseWithError(e) return } - if _, err = part.Write(n.Data[ra.start : ra.start+ra.length]); err != nil { - pw.CloseWithError(err) + if _, e = rs.Seek(ra.start, 0); e != nil { + pw.CloseWithError(e) + return + } + if _, e = io.CopyN(part, rs, ra.length); e != nil { + pw.CloseWithError(e) return } } @@ -212,6 +274,6 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) } w.WriteHeader(http.StatusPartialContent) - io.CopyN(w, sendContent, sendSize) - + _, e = io.CopyN(w, sendContent, sendSize) + return e } diff --git a/go/weed/weed_server/volume_server_handlers_write.go b/go/weed/weed_server/volume_server_handlers_write.go index 2f7e79ce9..1f0fa96dd 100644 --- a/go/weed/weed_server/volume_server_handlers_write.go +++ b/go/weed/weed_server/volume_server_handlers_write.go @@ -53,9 +53,8 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { glog.V(2).Infoln("deleting", n) cookie := n.Cookie - count, ok := vs.store.ReadVolumeNeedle(volumeId, n) - if ok != nil { + if _, ok := vs.store.ReadVolumeNeedle(volumeId, n); ok != nil { m := make(map[string]uint32) m["size"] = 0 writeJsonQuiet(w, r, http.StatusNotFound, m) @@ -64,14 +63,31 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { if n.Cookie != cookie { glog.V(0).Infoln("delete", r.URL.Path, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent()) + writeJsonError(w, r, http.StatusBadRequest, errors.New("File Random Cookie does not match.")) return } + count := int64(n.Size) + + if n.IsChunkedManifest() { + chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped()) + if e != nil { + writeJsonError(w, r, http.StatusInternalServerError, errors.New("Load chunks manifest error: "+e.Error())) + return + } + // make sure all chunks had deleted before delete manifest + if e := chunkManifest.DeleteChunks(vs.GetMasterNode()); e != nil { + writeJsonError(w, r, http.StatusInternalServerError, errors.New("Delete chunks error: "+e.Error())) + return + } + count = chunkManifest.Size + } + ret := topology.ReplicatedDelete(vs.GetMasterNode(), vs.store, volumeId, n, r) if ret != 0 { - m := make(map[string]uint32) - m["size"] = uint32(count) + m := make(map[string]int64) + m["size"] = count writeJsonQuiet(w, r, http.StatusAccepted, m) } else { writeJsonError(w, r, http.StatusInternalServerError, errors.New("Deletion Failed.")) @@ -86,7 +102,10 @@ func (vs *VolumeServer) batchDeleteHandler(w http.ResponseWriter, r *http.Reques for _, fid := range r.Form["fid"] { vid, id_cookie, err := operation.ParseFileId(fid) if err != nil { - ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()}) + ret = append(ret, operation.DeleteResult{ + Fid: fid, + Status: http.StatusBadRequest, + Error: err.Error()}) continue } n := new(storage.Needle) @@ -95,18 +114,45 @@ func (vs *VolumeServer) batchDeleteHandler(w http.ResponseWriter, r *http.Reques glog.V(4).Infoln("batch deleting", n) cookie := n.Cookie if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil { - ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()}) + ret = append(ret, operation.DeleteResult{ + Fid: fid, + Status: http.StatusNotFound, + Error: err.Error(), + }) + continue + } + + if n.IsChunkedManifest() { + //Don't allow delete manifest in batch delete mode + ret = append(ret, operation.DeleteResult{ + Fid: fid, + Status: http.StatusNotAcceptable, + Error: "ChunkManifest: not allow.", + }) continue } + if n.Cookie != cookie { - ret = append(ret, operation.DeleteResult{Fid: fid, Error: "File Random Cookie does not match."}) + ret = append(ret, operation.DeleteResult{ + Fid: fid, + Status: http.StatusBadRequest, + Error: "File Random Cookie does not match.", + }) glog.V(0).Infoln("deleting", fid, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent()) return } if size, err := vs.store.Delete(volumeId, n); err != nil { - ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()}) + ret = append(ret, operation.DeleteResult{ + Fid: fid, + Status: http.StatusInternalServerError, + Error: err.Error()}, + ) } else { - ret = append(ret, operation.DeleteResult{Fid: fid, Size: int(size)}) + ret = append(ret, operation.DeleteResult{ + Fid: fid, + Status: http.StatusAccepted, + Size: int(size)}, + ) } } |
