aboutsummaryrefslogtreecommitdiff
path: root/weed/server/filer_server_handlers_read.go
diff options
context:
space:
mode:
Diffstat (limited to 'weed/server/filer_server_handlers_read.go')
-rw-r--r--weed/server/filer_server_handlers_read.go254
1 files changed, 84 insertions, 170 deletions
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index ba21298ba..6bc09e953 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -1,19 +1,22 @@
package weed_server
import (
+ "bytes"
"context"
"io"
- "io/ioutil"
"mime"
- "mime/multipart"
"net/http"
"net/url"
- "path"
+ "path/filepath"
"strconv"
"strings"
+ "time"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/images"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -26,13 +29,13 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
path = path[:len(path)-1]
}
- entry, err := fs.filer.FindEntry(context.Background(), filer2.FullPath(path))
+ entry, err := fs.filer.FindEntry(context.Background(), util.FullPath(path))
if err != nil {
if path == "/" {
fs.listDirectoryHandler(w, r)
return
}
- if err == filer2.ErrNotFound {
+ if err == filer_pb.ErrNotFound {
glog.V(1).Infof("Not found %s: %v", path, err)
stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc()
w.WriteHeader(http.StatusNotFound)
@@ -58,196 +61,107 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
return
}
- if len(entry.Chunks) == 0 {
- glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr)
- stats.FilerRequestCounter.WithLabelValues("read.nocontent").Inc()
- w.WriteHeader(http.StatusNoContent)
- return
- }
-
w.Header().Set("Accept-Ranges", "bytes")
- if r.Method == "HEAD" {
- w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10))
- w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat))
- setEtag(w, filer2.ETag(entry.Chunks))
- return
- }
-
- if len(entry.Chunks) == 1 {
- fs.handleSingleChunk(w, r, entry)
- return
- }
-
- fs.handleMultipleChunks(w, r, entry)
-
-}
-
-func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
-
- fileId := entry.Chunks[0].GetFileIdString()
-
- urlString, err := fs.filer.MasterClient.LookupFileId(fileId)
- if err != nil {
- glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err)
- w.WriteHeader(http.StatusNotFound)
- return
- }
-
- if fs.option.RedirectOnRead {
- stats.FilerRequestCounter.WithLabelValues("redirect").Inc()
- http.Redirect(w, r, urlString, http.StatusFound)
- return
- }
-
- u, _ := url.Parse(urlString)
- q := u.Query()
- for key, values := range r.URL.Query() {
- for _, value := range values {
- q.Add(key, value)
- }
- }
- u.RawQuery = q.Encode()
- request := &http.Request{
- Method: r.Method,
- URL: u,
- Proto: r.Proto,
- ProtoMajor: r.ProtoMajor,
- ProtoMinor: r.ProtoMinor,
- Header: r.Header,
- Body: r.Body,
- Host: r.Host,
- ContentLength: r.ContentLength,
- }
- glog.V(3).Infoln("retrieving from", u)
- resp, do_err := util.Do(request)
- if do_err != nil {
- glog.V(0).Infoln("failing to connect to volume server", do_err.Error())
- writeJsonError(w, r, http.StatusInternalServerError, do_err)
- return
- }
- defer func() {
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
- }()
- for k, v := range resp.Header {
- w.Header()[k] = v
- }
- if entry.Attr.Mime != "" {
- w.Header().Set("Content-Type", entry.Attr.Mime)
- }
- w.WriteHeader(resp.StatusCode)
- io.Copy(w, resp.Body)
-}
-
-func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
+ // mime type
mimeType := entry.Attr.Mime
if mimeType == "" {
- if ext := path.Ext(entry.Name()); ext != "" {
+ if ext := filepath.Ext(entry.Name()); ext != "" {
mimeType = mime.TypeByExtension(ext)
}
}
if mimeType != "" {
w.Header().Set("Content-Type", mimeType)
}
- setEtag(w, filer2.ETag(entry.Chunks))
- totalSize := int64(filer2.TotalSize(entry.Chunks))
+ // if modified since
+ if !entry.Attr.Mtime.IsZero() {
+ w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat))
+ if r.Header.Get("If-Modified-Since") != "" {
+ if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil {
+ if !t.Before(entry.Attr.Mtime) {
+ w.WriteHeader(http.StatusNotModified)
+ return
+ }
+ }
+ }
+ }
- rangeReq := r.Header.Get("Range")
+ // print out the header from extended properties
+ for k, v := range entry.Extended {
+ w.Header().Set(k, string(v))
+ }
- if rangeReq == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
- if err := fs.writeContent(w, entry, 0, int(totalSize)); err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ //Seaweed custom header are not visible to Vue or javascript
+ seaweedHeaders := []string{}
+ for header, _ := range w.Header() {
+ if strings.HasPrefix(header, "Seaweed-") {
+ seaweedHeaders = append(seaweedHeaders, header)
}
- return
}
+ seaweedHeaders = append(seaweedHeaders, "Content-Disposition")
+ w.Header().Set("Access-Control-Expose-Headers", strings.Join(seaweedHeaders, ","))
- //the rest is dealing with partial content request
- //mostly copy from src/pkg/net/http/fs.go
- ranges, err := parseRange(rangeReq, totalSize)
- if err != nil {
- http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
- return
- }
- if sumRangesSize(ranges) > totalSize {
- // The total number of bytes in all the ranges
- // is larger than the size of the file by
- // itself, so this is probably an attack, or a
- // dumb client. Ignore the range request.
- return
+ //set tag count
+ if r.Method == "GET" {
+ tagCount := 0
+ for k := range entry.Extended {
+ if strings.HasPrefix(k, xhttp.AmzObjectTagging+"-") {
+ tagCount++
+ }
+ }
+ if tagCount > 0 {
+ w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount))
+ }
}
- if len(ranges) == 0 {
+
+ // set etag
+ etag := filer.ETagEntry(entry)
+ if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" {
+ w.WriteHeader(http.StatusNotModified)
return
}
- if len(ranges) == 1 {
- // RFC 2616, Section 14.16:
- // "When an HTTP message includes the content of a single
- // range (for example, a response to a request for a
- // single range, or to a request for a set of ranges
- // that overlap without any holes), this content is
- // transmitted with a Content-Range header, and a
- // Content-Length header showing the number of bytes
- // actually transferred.
- // ...
- // A response to a request for a single range MUST NOT
- // be sent using the multipart/byteranges media type."
- ra := ranges[0]
- w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
- w.Header().Set("Content-Range", ra.contentRange(totalSize))
- w.WriteHeader(http.StatusPartialContent)
-
- err = fs.writeContent(w, entry, ra.start, int(ra.length))
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
+ setEtag(w, etag)
+
+ filename := entry.Name()
+ filename = url.QueryEscape(filename)
+ adjustHeaderContentDisposition(w, r, filename)
+
+ totalSize := int64(entry.Size())
+
+ if r.Method == "HEAD" {
+ w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
+ processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+ return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, true)
+ })
return
}
- // process multiple ranges
- for _, ra := range ranges {
- if ra.start > totalSize {
- http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
- return
- }
- }
- sendSize := rangesMIMESize(ranges, mimeType, totalSize)
- pr, pw := io.Pipe()
- mw := multipart.NewWriter(pw)
- w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
- sendContent := pr
- defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
- go func() {
- for _, ra := range ranges {
- part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
- if e != nil {
- pw.CloseWithError(e)
- return
- }
- if e = fs.writeContent(part, entry, ra.start, int(ra.length)); e != nil {
- pw.CloseWithError(e)
+ if rangeReq := r.Header.Get("Range"); rangeReq == "" {
+ ext := filepath.Ext(filename)
+ width, height, mode, shouldResize := shouldResizeImages(ext, r)
+ if shouldResize {
+ data, err := filer.ReadAll(fs.filer.MasterClient, entry.Chunks)
+ if err != nil {
+ glog.Errorf("failed to read %s: %v", path, err)
+ w.WriteHeader(http.StatusNotModified)
return
}
+ rs, _, _ := images.Resized(ext, bytes.NewReader(data), width, height, mode)
+ io.Copy(w, rs)
+ return
}
- mw.Close()
- pw.Close()
- }()
- if w.Header().Get("Content-Encoding") == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
- }
- w.WriteHeader(http.StatusPartialContent)
- if _, err := io.CopyN(w, sendContent, sendSize); err != nil {
- http.Error(w, "Internal Error", http.StatusInternalServerError)
- return
}
-}
-
-func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error {
-
- return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, size)
+ processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+ if offset+size <= int64(len(entry.Content)) {
+ _, err := writer.Write(entry.Content[offset : offset+size])
+ if err != nil {
+ glog.Errorf("failed to write entry content: %v", err)
+ }
+ return err
+ }
+ return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, false)
+ })
}