aboutsummaryrefslogtreecommitdiff
path: root/go/operation/chunked_file.go
diff options
context:
space:
mode:
authortnextday <fw2k4@163.com>2015-11-29 23:49:41 +0800
committertnextday <fw2k4@163.com>2015-11-29 23:49:41 +0800
commit1817864a4589e05fa17a27ed1bc954b0cfcf12b4 (patch)
tree15d902b8e0f77efc825b6593ffcaa5ad68ad625c /go/operation/chunked_file.go
parenta4f64c011620e2ebd5dc41415e6f19713be9897a (diff)
downloadseaweedfs-1817864a4589e05fa17a27ed1bc954b0cfcf12b4.tar.xz
seaweedfs-1817864a4589e05fa17a27ed1bc954b0cfcf12b4.zip
add chunk file helper to support large file
Diffstat (limited to 'go/operation/chunked_file.go')
-rw-r--r--go/operation/chunked_file.go126
1 files changed, 126 insertions, 0 deletions
diff --git a/go/operation/chunked_file.go b/go/operation/chunked_file.go
new file mode 100644
index 000000000..a581af574
--- /dev/null
+++ b/go/operation/chunked_file.go
@@ -0,0 +1,126 @@
+package operation
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+
+ "github.com/chrislusf/seaweedfs/go/util"
+)
+
+var ErrOutOfRange = errors.New("Out of Range")
+
+type ChunkInfo struct {
+ Fid string `json:"fid,omitempty"`
+ Offset uint64 `json:"offset,omitempty"`
+ Size uint32 `json:"size,omitempty"`
+}
+
+type ChunkList []*ChunkInfo
+
+type ChunkedFile struct {
+ Name string `json:"name,omitempty"`
+ Mime string `json:"mime,omitempty"`
+ Size uint64 `json:"size,omitempty"`
+ Chunks ChunkList `json:"chunks,omitempty"`
+
+ master string `json:"-"`
+}
+
+func (s ChunkList) Len() int { return len(s) }
+func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset }
+func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func NewChunkedNeedle(buffer []byte, master string) (*ChunkedFile, error) {
+ c := ChunkedFile{}
+
+ if e := json.Unmarshal(buffer, c); e != nil {
+ return nil, e
+ }
+ sort.Sort(c.Chunks)
+ c.master = master
+ return &c, nil
+}
+
+func (c *ChunkedFile) Marshal() ([]byte, error) {
+ return json.Marshal(c)
+}
+
+func copyChunk(fileUrl string, w io.Writer, startOffset, size int64) (written int64, e error) {
+ req, err := http.NewRequest("GET", fileUrl, nil)
+ if err != nil {
+ return written, err
+ }
+ if startOffset > 0 {
+ req.Header.Set("Range", fmt.Sprintf("bytes=%d-", startOffset))
+ }
+
+ resp, err := util.Do(req)
+ if err != nil {
+ return written, err
+ }
+ defer resp.Close()
+ if startOffset > 0 && resp.StatusCode != 206 {
+ return written, fmt.Errorf("Cannot Read Needle Position: %d [%s]", startOffset, fileUrl)
+ }
+
+ if size > 0 {
+ return io.CopyN(w, resp, size)
+ } else {
+ return io.Copy(w, resp)
+ }
+}
+
+func (c *ChunkedFile) WriteBuffer(w io.Writer, offset, size int64) (written int64, e error) {
+ if offset >= c.Size || offset+size > c.Size {
+ return written, ErrOutOfRange
+ }
+ chunkIndex := -1
+ chunkStartOffset := 0
+ for i, ci := range c.Chunks {
+ if offset >= ci.Offset && offset < ci.Offset+ci.Size {
+ chunkIndex = i
+ chunkStartOffset = offset - ci.Offset
+ break
+ }
+ }
+ if chunkIndex < 0 {
+ return written, ErrOutOfRange
+ }
+ for ; chunkIndex < c.Chunks.Len(); chunkIndex++ {
+ ci := c.Chunks[chunkIndex]
+ fileUrl, lookupError := LookupFileId(c.master, ci.Fid)
+ if lookupError != nil {
+ return written, lookupError
+ }
+ rsize := 0
+ if size > 0 {
+ rsize = size - written
+ }
+ if n, e := copyChunk(fileUrl, w, chunkStartOffset, rsize); e != nil {
+ return written, e
+ } else {
+ written += n
+ }
+
+ if size > 0 && written >= size {
+ break
+ }
+ chunkStartOffset = 0
+ }
+
+ return written, nil
+}
+
+func (c *ChunkedFile) DeleteHelper() error {
+ //TODO Delete all chunks
+ return nil
+}
+
+func (c *ChunkedFile) StoredHelper() error {
+ //TODO
+ return nil
+}