diff options
| author | Chris Lu <chris.lu@gmail.com> | 2020-03-29 21:07:55 -0700 |
|---|---|---|
| committer | Chris Lu <chris.lu@gmail.com> | 2020-03-29 21:07:55 -0700 |
| commit | 9dc0b1df8f3bb19ce01b2d520436dbdc0f2a883e (patch) | |
| tree | d79bc79a897016e8266bbc4ee8140a87b3dc7129 /weed/filesys | |
| parent | be6c2f85117ce7d78ed4532cc10f84a9459f86a0 (diff) | |
| download | seaweedfs-9dc0b1df8f3bb19ce01b2d520436dbdc0f2a883e.tar.xz seaweedfs-9dc0b1df8f3bb19ce01b2d520436dbdc0f2a883e.zip | |
windows os does not like to work with fuse
Diffstat (limited to 'weed/filesys')
| -rw-r--r-- | weed/filesys/filehandle.go | 2 | ||||
| -rw-r--r-- | weed/filesys/reader_at.go | 147 |
2 files changed, 1 insertions, 148 deletions
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 4897d3a08..83a93c062 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -92,7 +92,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { if fh.f.reader == nil { chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32) - fh.f.reader = NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache) + fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache) } totalRead, err := fh.f.reader.ReadAt(buff, offset) diff --git a/weed/filesys/reader_at.go b/weed/filesys/reader_at.go deleted file mode 100644 index f819a3fa6..000000000 --- a/weed/filesys/reader_at.go +++ /dev/null @@ -1,147 +0,0 @@ -package filesys - -import ( - "bytes" - "context" - "fmt" - "io" - "sync" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/pb/pb_cache" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/chrislusf/seaweedfs/weed/wdclient" -) - -type ChunkReadAt struct { - masterClient *wdclient.MasterClient - chunkViews []*filer2.ChunkView - buffer []byte - bufferOffset int64 - lookupFileId func(fileId string) (targetUrl string, err error) - readerLock sync.Mutex - - chunkCache *pb_cache.ChunkCache -} - -// var _ = io.ReaderAt(&ChunkReadAt{}) - -func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*filer2.ChunkView, chunkCache *pb_cache.ChunkCache) *ChunkReadAt { - - return &ChunkReadAt{ - chunkViews: chunkViews, - lookupFileId: func(fileId string) (targetUrl string, err error) { - err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - vid := filer2.VolumeId(fileId) - resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ - VolumeIds: []string{vid}, - }) - if err != nil { - return err - } - - locations := resp.LocationsMap[vid] - if locations == nil || len(locations.Locations) == 0 { - glog.V(0).Infof("failed to locate %s", fileId) - return fmt.Errorf("failed to locate %s", fileId) - } - - volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url) - - targetUrl = fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId) - - return nil - }) - return - }, - bufferOffset: -1, - chunkCache: chunkCache, - } -} - -func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { - - c.readerLock.Lock() - defer c.readerLock.Unlock() - - for n < len(p) && err == nil { - readCount, readErr := c.doReadAt(p[n:], offset+int64(n)) - n += readCount - err = readErr - if readCount == 0 { - return n, nil - } - } - return -} - -func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { - - var found bool - for _, chunk := range c.chunkViews { - if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) { - found = true - if c.bufferOffset != chunk.LogicOffset { - c.buffer, err = c.fetchChunkData(chunk) - c.bufferOffset = chunk.LogicOffset - } - break - } - } - if !found { - return 0, io.EOF - } - - n = copy(p, c.buffer[offset-c.bufferOffset:]) - - // fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d)\n", offset, offset+int64(n), c.bufferOffset, c.bufferOffset+int64(len(c.buffer))) - - return - -} - -func (c *ChunkReadAt) fetchChunkData(chunkView *filer2.ChunkView) (data []byte, err error) { - - // fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) - - chunkData := c.chunkCache.GetChunk(chunkView.FileId) - if chunkData != nil { - glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) - } else { - chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped) - if err != nil { - return nil, err - } - } - - if int64(len(chunkData)) < chunkView.Offset+int64(chunkView.Size) { - return nil, fmt.Errorf("unexpected larger chunkView [%d,%d) than chunk %d", chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData)) - } - - data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)] - - c.chunkCache.SetChunk(chunkView.FileId, chunkData) - - return data, nil -} - -func (c *ChunkReadAt) doFetchFullChunkData(fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) { - - urlString, err := c.lookupFileId(fileId) - if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err) - return nil, err - } - var buffer bytes.Buffer - err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, true, 0, 0, func(data []byte) { - buffer.Write(data) - }) - if err != nil { - glog.V(1).Infof("read %s failed, err: %v", fileId, err) - return nil, err - } - - return buffer.Bytes(), nil -} |
