aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author霍晓栋 <huoxd@jiedaibao.com>2016-06-08 15:46:14 +0800
committer霍晓栋 <huoxd@jiedaibao.com>2016-06-08 15:46:14 +0800
commit3a25af223f2a1b555cdc7bfdcb99e4aab7220b18 (patch)
tree1aba1800e571f2cc7f91d37300edf0d7f0ab3309
parent26bb9094d4d71184cc5532bcb907f47c98ac4ff9 (diff)
downloadseaweedfs-3a25af223f2a1b555cdc7bfdcb99e4aab7220b18.tar.xz
seaweedfs-3a25af223f2a1b555cdc7bfdcb99e4aab7220b18.zip
refactor work for filer write handler
-rw-r--r--weed/server/filer_server_handlers_write.go268
1 files changed, 146 insertions, 122 deletions
diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go
index 963eb13d6..597004f7d 100644
--- a/weed/server/filer_server_handlers_write.go
+++ b/weed/server/filer_server_handlers_write.go
@@ -70,6 +70,146 @@ func makeFormData(filename, mimeType string, content io.Reader) (formData io.Rea
return
}
+func (fs *FilerServer) queryFileInfoByPath(w http.ResponseWriter, r *http.Request, path string) (fileId, urlLocation string, err error) {
+ if fileId, err = fs.filer.FindFile(path); err != nil && err != leveldb.ErrNotFound {
+ glog.V(0).Infoln("failing to find path in filer store", path, err.Error())
+ writeJsonError(w, r, http.StatusInternalServerError, err)
+ return
+ } else if fileId != "" && err == nil {
+ urlLocation, err = operation.LookupFileId(fs.getMasterNode(), fileId)
+ if err != nil {
+ glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, err.Error())
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+ }
+ return
+}
+
+func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) {
+ assignResult, ae := operation.Assign(fs.getMasterNode(), 1, replication, collection, r.URL.Query().Get("ttl"))
+ if ae != nil {
+ glog.V(0).Infoln("failing to assign a file id", ae.Error())
+ writeJsonError(w, r, http.StatusInternalServerError, ae)
+ err = ae
+ return
+ }
+ fileId = assignResult.Fid
+ urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid
+ return
+}
+
+func (fs *FilerServer) multipartUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) {
+ //Default handle way for http multipart
+ if r.Method == "PUT" {
+ buf, _ := ioutil.ReadAll(r.Body)
+ r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
+ fileName, _, _, _, _, _, _, pe := storage.ParseUpload(r)
+ if pe != nil {
+ glog.V(0).Infoln("failing to parse post body", pe.Error())
+ writeJsonError(w, r, http.StatusInternalServerError, pe)
+ err = pe
+ return
+ }
+ //reconstruct http request body for following new request to volume server
+ r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
+
+ path := r.URL.Path
+ if strings.HasSuffix(path, "/") {
+ if fileName != "" {
+ path += fileName
+ }
+ }
+ fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path)
+ } else {
+ fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection)
+ }
+ return
+}
+
+func multipartHttpBodyBuilder(w http.ResponseWriter, r *http.Request, fileName string) (err error) {
+ body, contentType, te := makeFormData(fileName, r.Header.Get("Content-Type"), r.Body)
+ if te != nil {
+ glog.V(0).Infoln("S3 protocol to raw seaweed protocol failed", te.Error())
+ writeJsonError(w, r, http.StatusInternalServerError, te)
+ err = te
+ return
+ }
+
+ if body != nil {
+ switch v := body.(type) {
+ case *bytes.Buffer:
+ r.ContentLength = int64(v.Len())
+ case *bytes.Reader:
+ r.ContentLength = int64(v.Len())
+ case *strings.Reader:
+ r.ContentLength = int64(v.Len())
+ }
+ }
+
+ r.Header.Set("Content-Type", contentType)
+ rc, ok := body.(io.ReadCloser)
+ if !ok && body != nil {
+ rc = ioutil.NopCloser(body)
+ }
+ r.Body = rc
+ return
+}
+
+func checkContentMD5(w http.ResponseWriter, r *http.Request) (err error) {
+ if contentMD5 := r.Header.Get("Content-MD5"); contentMD5 != "" {
+ buf, _ := ioutil.ReadAll(r.Body)
+ //checkMD5
+ sum := md5.Sum(buf)
+ fileDataMD5 := base64.StdEncoding.EncodeToString(sum[0:len(sum)])
+ if strings.ToLower(fileDataMD5) != strings.ToLower(contentMD5) {
+ glog.V(0).Infof("fileDataMD5 [%s] is not equal to Content-MD5 [%s]", fileDataMD5, contentMD5)
+ err = fmt.Errorf("MD5 check failed")
+ writeJsonError(w, r, http.StatusNotAcceptable, err)
+ return
+ }
+ //reconstruct http request body for following new request to volume server
+ r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
+ }
+ return
+}
+
+func (fs *FilerServer) monolithicUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) {
+ /*
+ Amazon S3 ref link:[http://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html]
+ There is a long way to provide a completely compatibility against all Amazon S3 API, I just made
+ a simple data stream adapter between S3 PUT API and seaweedfs's volume storage Write API
+ 1. The request url format should be http://$host:$port/$bucketName/$objectName
+ 2. bucketName will be mapped to seaweedfs's collection name
+ 3. You could customize and make your enhancement.
+ */
+ lastPos := strings.LastIndex(r.URL.Path, "/")
+ if lastPos == -1 || lastPos == 0 || lastPos == len(r.URL.Path)-1 {
+ glog.V(0).Infoln("URL Path [%s] is invalid, could not retrieve file name", r.URL.Path)
+ err = fmt.Errorf("URL Path is invalid")
+ writeJsonError(w, r, http.StatusInternalServerError, err)
+ return
+ }
+
+ if err = checkContentMD5(w, r); err != nil {
+ return
+ }
+
+ fileName := r.URL.Path[lastPos+1:]
+ if err = multipartHttpBodyBuilder(w, r, fileName); err != nil {
+ return
+ }
+
+ secondPos := strings.Index(r.URL.Path[1:], "/") + 1
+ collection = r.URL.Path[1:secondPos]
+ path := r.URL.Path
+
+ if fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path); err == nil && fileId == "" {
+ fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection)
+ }
+ return
+}
+
func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
replication := query.Get("replication")
@@ -81,135 +221,19 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
collection = fs.collection
}
- var fileId string
+ var fileId, urlLocation string
var err error
- var urlLocation string
if strings.HasPrefix(r.Header.Get("Content-Type"), "multipart/form-data; boundary=") {
- //Default handle way for http multipart
- if r.Method == "PUT" {
- buf, _ := ioutil.ReadAll(r.Body)
- r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
- fileName, _, _, _, _, _, _, pe := storage.ParseUpload(r)
- if pe != nil {
- glog.V(0).Infoln("failing to parse post body", pe.Error())
- writeJsonError(w, r, http.StatusInternalServerError, pe)
- return
- }
- //reconstruct http request body for following new request to volume server
- r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
-
- path := r.URL.Path
- if strings.HasSuffix(path, "/") {
- if fileName != "" {
- path += fileName
- }
- }
-
- if fileId, err = fs.filer.FindFile(path); err != nil && err != leveldb.ErrNotFound {
- glog.V(0).Infoln("failing to find path in filer store", path, err.Error())
- writeJsonError(w, r, http.StatusInternalServerError, err)
- return
- } else if fileId != "" && err == nil {
- var le error
- urlLocation, le = operation.LookupFileId(fs.getMasterNode(), fileId)
- if le != nil {
- glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, le.Error())
- w.WriteHeader(http.StatusNotFound)
- return
- }
- }
- } else {
- assignResult, ae := operation.Assign(fs.getMasterNode(), 1, replication, collection, query.Get("ttl"))
- if ae != nil {
- glog.V(0).Infoln("failing to assign a file id", ae.Error())
- writeJsonError(w, r, http.StatusInternalServerError, ae)
- return
- }
- fileId = assignResult.Fid
- urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid
- }
- } else {
- /*
- Amazon S3 ref link:[http://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html]
- There is a long way to provide a completely compatibility against all Amazon S3 API, I just made
- a simple data stream adapter between S3 PUT API and seaweedfs's volume storage Write API
- 1. The request url format should be http://$host:$port/$bucketName/$objectName
- 2. bucketName will be mapped to seaweedfs's collection name
- */
- lastPos := strings.LastIndex(r.URL.Path, "/")
- if lastPos == -1 || lastPos == 0 || lastPos == len(r.URL.Path)-1 {
- glog.V(0).Infoln("URL Path [%s] is invalid, could not retrieve file name", r.URL.Path)
- writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("URL Path is invalid"))
- return
- }
-
- secondPos := strings.Index(r.URL.Path[1:], "/") + 1
- collection = r.URL.Path[1:secondPos]
- path := r.URL.Path
-
- if fileId, err = fs.filer.FindFile(path); err != nil && err != leveldb.ErrNotFound {
- glog.V(0).Infoln("failing to find path in filer store", path, err.Error())
- writeJsonError(w, r, http.StatusInternalServerError, err)
+ fileId, urlLocation, err = fs.multipartUploadAnalyzer(w, r, replication, collection)
+ if err != nil {
return
- } else if fileId != "" && err == nil {
- var le error
- urlLocation, le = operation.LookupFileId(fs.getMasterNode(), fileId)
- if le != nil {
- glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, le.Error())
- w.WriteHeader(http.StatusNotFound)
- return
- }
- } else {
- assignResult, ae := operation.Assign(fs.getMasterNode(), 1, replication, collection, query.Get("ttl"))
- if ae != nil {
- glog.V(0).Infoln("failing to assign a file id", ae.Error())
- writeJsonError(w, r, http.StatusInternalServerError, ae)
- return
- }
- fileId = assignResult.Fid
- urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid
}
-
- if contentMD5 := r.Header.Get("Content-MD5"); contentMD5 != "" {
- buf, _ := ioutil.ReadAll(r.Body)
- //checkMD5
- sum := md5.Sum(buf)
- fileDataMD5 := base64.StdEncoding.EncodeToString(sum[0:len(sum)])
- if strings.ToLower(fileDataMD5) != strings.ToLower(contentMD5) {
- glog.V(0).Infof("fileDataMD5 [%s] is not equal to Content-MD5 [%s]", fileDataMD5, contentMD5)
- writeJsonError(w, r, http.StatusNotAcceptable, fmt.Errorf("MD5 check failed"))
- return
- }
- //reconstruct http request body for following new request to volume server
- r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
- }
-
- fileName := r.URL.Path[lastPos+1:]
- body, contentType, te := makeFormData(fileName, r.Header.Get("Content-Type"), r.Body)
- if te != nil {
- glog.V(0).Infoln("S3 protocol to raw seaweed protocol failed", te.Error())
- writeJsonError(w, r, http.StatusInternalServerError, te)
+ } else {
+ fileId, urlLocation, err = fs.monolithicUploadAnalyzer(w, r, replication, collection)
+ if err != nil {
return
}
-
- if body != nil {
- switch v := body.(type) {
- case *bytes.Buffer:
- r.ContentLength = int64(v.Len())
- case *bytes.Reader:
- r.ContentLength = int64(v.Len())
- case *strings.Reader:
- r.ContentLength = int64(v.Len())
- }
- }
-
- r.Header.Set("Content-Type", contentType)
- rc, ok := body.(io.ReadCloser)
- if !ok && body != nil {
- rc = ioutil.NopCloser(body)
- }
- r.Body = rc
}
u, _ := url.Parse(urlLocation)