aboutsummaryrefslogtreecommitdiff
path: root/go/operation
diff options
context:
space:
mode:
authortnextday <fw2k4@163.com>2015-12-02 21:27:29 +0800
committertnextday <fw2k4@163.com>2015-12-02 21:27:29 +0800
commit662915e6915d18a9e45b73577f93ef3be6562f4e (patch)
treeef44a0237091e1bc2d395679372b87e682ebbb96 /go/operation
parent520875d45504f0b659de7f4ff656634489100416 (diff)
downloadseaweedfs-662915e6915d18a9e45b73577f93ef3be6562f4e.tar.xz
seaweedfs-662915e6915d18a9e45b73577f93ef3be6562f4e.zip
Delete all chunks when delete a ChunkManifest
LoadChunkManifest can uncompress buffer move compress.go from storage to operation because of import cycle MakeFile add cross complete command
Diffstat (limited to 'go/operation')
-rw-r--r--go/operation/chunked_file.go16
-rw-r--r--go/operation/compress.go59
2 files changed, 70 insertions, 5 deletions
diff --git a/go/operation/chunked_file.go b/go/operation/chunked_file.go
index 0e455e93a..33cb25703 100644
--- a/go/operation/chunked_file.go
+++ b/go/operation/chunked_file.go
@@ -30,10 +30,10 @@ type ChunkInfo struct {
type ChunkList []*ChunkInfo
type ChunkManifest struct {
- Name string `json:"name,omitempty"`
- Mime string `json:"mime,omitempty"`
- Size int64 `json:"size,omitempty"`
- Chunks ChunkList `json:"chunks,omitempty"`
+ Name string `json:"name,omitempty"`
+ Mime string `json:"mime,omitempty"`
+ Size int64 `json:"size,omitempty"`
+ Chunks ChunkList `json:"chunks,omitempty"`
}
// seekable chunked file reader
@@ -50,7 +50,13 @@ func (s ChunkList) Len() int { return len(s) }
func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset }
func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func LoadChunkedManifest(buffer []byte) (*ChunkManifest, error) {
+func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) {
+ if isGzipped {
+ var err error
+ if buffer, err = UnGzipData(buffer); err != nil {
+ return nil, err
+ }
+ }
cm := ChunkManifest{}
if e := json.Unmarshal(buffer, &cm); e != nil {
return nil, e
diff --git a/go/operation/compress.go b/go/operation/compress.go
new file mode 100644
index 000000000..b1105ba4b
--- /dev/null
+++ b/go/operation/compress.go
@@ -0,0 +1,59 @@
+package operation
+
+import (
+ "bytes"
+ "compress/flate"
+ "compress/gzip"
+ "io/ioutil"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/go/glog"
+)
+
+/*
+* Default more not to gzip since gzip can be done on client side.
+ */
+func IsGzippable(ext, mtype string) bool {
+ if strings.HasPrefix(mtype, "text/") {
+ return true
+ }
+ switch ext {
+ case ".zip", ".rar", ".gz", ".bz2", ".xz":
+ return false
+ case ".pdf", ".txt", ".html", ".htm", ".css", ".js", ".json":
+ return true
+ }
+ if strings.HasPrefix(mtype, "application/") {
+ if strings.HasSuffix(mtype, "xml") {
+ return true
+ }
+ if strings.HasSuffix(mtype, "script") {
+ return true
+ }
+ }
+ return false
+}
+
+func GzipData(input []byte) ([]byte, error) {
+ buf := new(bytes.Buffer)
+ w, _ := gzip.NewWriterLevel(buf, flate.BestCompression)
+ if _, err := w.Write(input); err != nil {
+ glog.V(2).Infoln("error compressing data:", err)
+ return nil, err
+ }
+ if err := w.Close(); err != nil {
+ glog.V(2).Infoln("error closing compressed data:", err)
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func UnGzipData(input []byte) ([]byte, error) {
+ buf := bytes.NewBuffer(input)
+ r, _ := gzip.NewReader(buf)
+ defer r.Close()
+ output, err := ioutil.ReadAll(r)
+ if err != nil {
+ glog.V(2).Infoln("error uncompressing data:", err)
+ }
+ return output, err
+}