aboutsummaryrefslogtreecommitdiff
path: root/weed
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2018-05-24 01:22:37 -0700
committerChris Lu <chris.lu@gmail.com>2018-05-24 01:22:37 -0700
commitd773e11c7a65903b3ee1adea801a20f91cb0c7aa (patch)
tree9e40f834e929d826c9ce5dacd9fa57ca0de57bc6 /weed
parent00d0274fd7c829f5d26c051f5832e0f602929b08 (diff)
downloadseaweedfs-d773e11c7a65903b3ee1adea801a20f91cb0c7aa.tar.xz
seaweedfs-d773e11c7a65903b3ee1adea801a20f91cb0c7aa.zip
file handler directly read from volume servers
this mostly works fine now! next: need to cache files to local disk
Diffstat (limited to 'weed')
-rw-r--r--weed/filer2/filechunks.go18
-rw-r--r--weed/filer2/filechunks_test.go44
-rw-r--r--weed/filesys/filehandle.go99
-rw-r--r--weed/pb/filer.proto30
-rw-r--r--weed/pb/filer_pb/filer.pb.go302
-rw-r--r--weed/server/filer_grpc_server.go28
-rw-r--r--weed/util/http_util.go30
7 files changed, 383 insertions, 168 deletions
diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go
index 93cee81de..6bdfbd48e 100644
--- a/weed/filer2/filechunks.go
+++ b/weed/filer2/filechunks.go
@@ -52,7 +52,14 @@ func FindUnusedFileChunks(oldChunks, newChunks []*filer_pb.FileChunk) (unused []
return
}
-func ReadFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*filer_pb.FileChunk) {
+type ChunkView struct {
+ FileId string
+ Offset int64
+ Size uint64
+ LogicOffset int64
+}
+
+func ReadFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) {
visibles := nonOverlappingVisibleIntervals(chunks)
@@ -60,10 +67,11 @@ func ReadFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views
for _, chunk := range visibles {
if chunk.start <= offset && offset < chunk.stop {
- views = append(views, &filer_pb.FileChunk{
- FileId: chunk.fileId,
- Offset: offset - chunk.start, // offset is the data starting location in this file id
- Size: uint64(min(chunk.stop, stop) - offset),
+ views = append(views, &ChunkView{
+ FileId: chunk.fileId,
+ Offset: offset - chunk.start, // offset is the data starting location in this file id
+ Size: uint64(min(chunk.stop, stop) - offset),
+ LogicOffset: offset,
})
offset = min(chunk.stop, stop)
}
diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go
index 9e39477be..24897215e 100644
--- a/weed/filer2/filechunks_test.go
+++ b/weed/filer2/filechunks_test.go
@@ -151,7 +151,7 @@ func TestChunksReading(t *testing.T) {
Chunks []*filer_pb.FileChunk
Offset int64
Size int
- Expected []*filer_pb.FileChunk
+ Expected []*ChunkView
}{
// case 0: normal
{
@@ -162,10 +162,10 @@ func TestChunksReading(t *testing.T) {
},
Offset: 0,
Size: 250,
- Expected: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc"},
- {Offset: 0, Size: 100, FileId: "asdf"},
- {Offset: 0, Size: 50, FileId: "fsad"},
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 100, FileId: "abc", LogicOffset:0},
+ {Offset: 0, Size: 100, FileId: "asdf", LogicOffset:100},
+ {Offset: 0, Size: 50, FileId: "fsad", LogicOffset:200},
},
},
// case 1: updates overwrite full chunks
@@ -176,8 +176,8 @@ func TestChunksReading(t *testing.T) {
},
Offset: 50,
Size: 100,
- Expected: []*filer_pb.FileChunk{
- {Offset: 50, Size: 100, FileId: "asdf"},
+ Expected: []*ChunkView{
+ {Offset: 50, Size: 100, FileId: "asdf", LogicOffset:50},
},
},
// case 2: updates overwrite part of previous chunks
@@ -188,9 +188,9 @@ func TestChunksReading(t *testing.T) {
},
Offset: 25,
Size: 50,
- Expected: []*filer_pb.FileChunk{
- {Offset: 25, Size: 25, FileId: "asdf"},
- {Offset: 0, Size: 25, FileId: "abc"},
+ Expected: []*ChunkView{
+ {Offset: 25, Size: 25, FileId: "asdf", LogicOffset:25},
+ {Offset: 0, Size: 25, FileId: "abc", LogicOffset:50},
},
},
// case 3: updates overwrite full chunks
@@ -202,9 +202,9 @@ func TestChunksReading(t *testing.T) {
},
Offset: 0,
Size: 200,
- Expected: []*filer_pb.FileChunk{
- {Offset: 0, Size: 50, FileId: "asdf"},
- {Offset: 0, Size: 150, FileId: "xxxx"},
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 50, FileId: "asdf", LogicOffset:0},
+ {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset:50},
},
},
// case 4: updates far away from prev chunks
@@ -216,8 +216,8 @@ func TestChunksReading(t *testing.T) {
},
Offset: 0,
Size: 400,
- Expected: []*filer_pb.FileChunk{
- {Offset: 0, Size: 200, FileId: "asdf"},
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 200, FileId: "asdf", LogicOffset:0},
// {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen
},
},
@@ -231,9 +231,9 @@ func TestChunksReading(t *testing.T) {
},
Offset: 0,
Size: 220,
- Expected: []*filer_pb.FileChunk{
- {Offset: 0, Size: 200, FileId: "asdf"},
- {Offset: 0, Size: 20, FileId: "abc"},
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 200, FileId: "asdf", LogicOffset:0},
+ {Offset: 0, Size: 20, FileId: "abc", LogicOffset:200},
},
},
// case 6: same updates
@@ -245,8 +245,8 @@ func TestChunksReading(t *testing.T) {
},
Offset: 0,
Size: 100,
- Expected: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc"},
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 100, FileId: "abc", LogicOffset:0},
},
},
}
@@ -269,6 +269,10 @@ func TestChunksReading(t *testing.T) {
t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
i, x, chunk.FileId, testcase.Expected[x].FileId)
}
+ if chunk.LogicOffset != testcase.Expected[x].LogicOffset {
+ t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d",
+ i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset)
+ }
}
if len(chunks) != len(testcase.Expected) {
t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected))
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 55d574342..c71f1ee36 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -11,6 +11,9 @@ import (
"bytes"
"github.com/chrislusf/seaweedfs/weed/operation"
"time"
+ "strings"
+ "sync"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type FileHandle struct {
@@ -33,46 +36,94 @@ type FileHandle struct {
}
var _ = fs.Handle(&FileHandle{})
-var _ = fs.HandleReadAller(&FileHandle{})
-// var _ = fs.HandleReader(&FileHandle{})
+// var _ = fs.HandleReadAller(&FileHandle{})
+var _ = fs.HandleReader(&FileHandle{})
var _ = fs.HandleFlusher(&FileHandle{})
var _ = fs.HandleWriter(&FileHandle{})
var _ = fs.HandleReleaser(&FileHandle{})
-func (fh *FileHandle) ReadAll(ctx context.Context) (content []byte, err error) {
+func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
- glog.V(3).Infof("%v/%v read all fh ", fh.dirPath, fh.name)
+ glog.V(3).Infof("%v/%v read fh: [%d,%d)", fh.dirPath, fh.name, req.Offset, req.Offset+int64(req.Size))
if len(fh.Chunks) == 0 {
glog.V(0).Infof("empty fh %v/%v", fh.dirPath, fh.name)
- return
+ return fmt.Errorf("empty file %v/%v", fh.dirPath, fh.name)
}
- err = fh.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ buff := make([]byte, req.Size)
- // FIXME: need to either use Read() or implement differently
- chunks, _ := filer2.CompactFileChunks(fh.Chunks)
- glog.V(1).Infof("read fh %v/%v %d/%d chunks", fh.dirPath, fh.name, len(chunks), len(fh.Chunks))
- for i, chunk := range chunks {
- glog.V(1).Infof("read fh %v/%v %d/%d chunk %s [%d,%d)", fh.dirPath, fh.name, i, len(chunks), chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
- }
- request := &filer_pb.GetFileContentRequest{
- FileId: chunks[0].FileId,
- }
+ chunkViews := filer2.ReadFromChunks(fh.Chunks, req.Offset, req.Size)
+
+ var vids []string
+ for _, chunkView := range chunkViews {
+ vids = append(vids, volumeId(chunkView.FileId))
+ }
+
+ vid2Locations := make(map[string]*filer_pb.Locations)
+
+ err := fh.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- glog.V(1).Infof("read fh content %d chunk %s [%d,%d): %v", len(chunks),
- chunks[0].FileId, chunks[0].Offset, chunks[0].Offset+int64(chunks[0].Size), request)
- resp, err := client.GetFileContent(ctx, request)
+ glog.V(4).Infof("read fh lookup volume id locations: %v", vids)
+ resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
+ VolumeIds: vids,
+ })
if err != nil {
return err
}
- content = resp.Content
+ vid2Locations = resp.LocationsMap
return nil
})
- return content, err
+ if err != nil {
+ glog.V(3).Infof("%v/%v read fh lookup volume ids: %v", fh.dirPath, fh.name, err)
+ return fmt.Errorf("failed to lookup volume ids %v: %v", vids, err)
+ }
+
+ var totalRead int64
+ var wg sync.WaitGroup
+ for _, chunkView := range chunkViews {
+ wg.Add(1)
+ go func(chunkView *filer2.ChunkView) {
+ defer wg.Done()
+
+ glog.V(3).Infof("read fh reading chunk: %+v", chunkView)
+
+ locations := vid2Locations[volumeId(chunkView.FileId)]
+ if locations == nil || len(locations.Locations) == 0 {
+ glog.V(0).Infof("failed to locate %s", chunkView.FileId)
+ err = fmt.Errorf("failed to locate %s", chunkView.FileId)
+ return
+ }
+
+ var n int64
+ n, err = util.ReadUrl(
+ fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId),
+ chunkView.Offset,
+ int(chunkView.Size),
+ buff[chunkView.LogicOffset-req.Offset:chunkView.LogicOffset-req.Offset+int64(chunkView.Size)])
+
+ if err != nil {
+
+ glog.V(0).Infof("%v/%v read http://%s/%v %v bytes: %v", fh.dirPath, fh.name, locations.Locations[0].Url, chunkView.FileId, n, err)
+
+ err = fmt.Errorf("failed to read http://%s/%s: %v",
+ locations.Locations[0].Url, chunkView.FileId, err)
+ return
+ }
+
+ glog.V(3).Infof("read fh read %d bytes: %+v", n, chunkView)
+ totalRead += n
+
+ }(chunkView)
+ }
+ wg.Wait()
+
+ resp.Data = buff[:totalRead]
+
+ return err
}
// Write to the file handle
@@ -179,3 +230,11 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
return err
}
+
+func volumeId(fileId string) string {
+ lastCommaIndex := strings.LastIndex(fileId, ",")
+ if lastCommaIndex > 0 {
+ return fileId[:lastCommaIndex]
+ }
+ return fileId
+}
diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto
index b9c3e87f7..455de81d7 100644
--- a/weed/pb/filer.proto
+++ b/weed/pb/filer.proto
@@ -15,9 +15,6 @@ service SeaweedFiler {
rpc GetEntryAttributes (GetEntryAttributesRequest) returns (GetEntryAttributesResponse) {
}
- rpc GetFileContent (GetFileContentRequest) returns (GetFileContentResponse) {
- }
-
rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) {
}
@@ -30,6 +27,9 @@ service SeaweedFiler {
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
}
+ rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -100,6 +100,13 @@ message CreateEntryRequest {
message CreateEntryResponse {
}
+message UpdateEntryRequest {
+ string directory = 1;
+ Entry entry = 2;
+}
+message UpdateEntryResponse {
+}
+
message DeleteEntryRequest {
string directory = 1;
string name = 2;
@@ -122,9 +129,18 @@ message AssignVolumeResponse {
int32 count = 4;
}
-message UpdateEntryRequest {
- string directory = 1;
- Entry entry = 2;
+message LookupVolumeRequest {
+ repeated string volume_ids = 1;
}
-message UpdateEntryResponse {
+
+message Locations {
+ repeated Location locations = 1;
+}
+
+message Location {
+ string url = 1;
+ string public_url = 2;
+}
+message LookupVolumeResponse {
+ map<string, Locations> locations_map = 1;
}
diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go
index b1f0a3e0d..fb0388307 100644
--- a/weed/pb/filer_pb/filer.pb.go
+++ b/weed/pb/filer_pb/filer.pb.go
@@ -22,12 +22,16 @@ It has these top-level messages:
GetFileContentResponse
CreateEntryRequest
CreateEntryResponse
+ UpdateEntryRequest
+ UpdateEntryResponse
DeleteEntryRequest
DeleteEntryResponse
AssignVolumeRequest
AssignVolumeResponse
- UpdateEntryRequest
- UpdateEntryResponse
+ LookupVolumeRequest
+ Locations
+ Location
+ LookupVolumeResponse
*/
package filer_pb
@@ -371,6 +375,38 @@ func (m *CreateEntryResponse) String() string { return proto.CompactT
func (*CreateEntryResponse) ProtoMessage() {}
func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+type UpdateEntryRequest struct {
+ Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
+ Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
+}
+
+func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} }
+func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateEntryRequest) ProtoMessage() {}
+func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+func (m *UpdateEntryRequest) GetDirectory() string {
+ if m != nil {
+ return m.Directory
+ }
+ return ""
+}
+
+func (m *UpdateEntryRequest) GetEntry() *Entry {
+ if m != nil {
+ return m.Entry
+ }
+ return nil
+}
+
+type UpdateEntryResponse struct {
+}
+
+func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} }
+func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) }
+func (*UpdateEntryResponse) ProtoMessage() {}
+func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
type DeleteEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
@@ -380,7 +416,7 @@ type DeleteEntryRequest struct {
func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteEntryRequest) ProtoMessage() {}
-func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
func (m *DeleteEntryRequest) GetDirectory() string {
if m != nil {
@@ -409,7 +445,7 @@ type DeleteEntryResponse struct {
func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} }
func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteEntryResponse) ProtoMessage() {}
-func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
type AssignVolumeRequest struct {
Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
@@ -420,7 +456,7 @@ type AssignVolumeRequest struct {
func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
func (*AssignVolumeRequest) ProtoMessage() {}
-func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
func (m *AssignVolumeRequest) GetCount() int32 {
if m != nil {
@@ -453,7 +489,7 @@ type AssignVolumeResponse struct {
func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
func (*AssignVolumeResponse) ProtoMessage() {}
-func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *AssignVolumeResponse) GetFileId() string {
if m != nil {
@@ -483,37 +519,77 @@ func (m *AssignVolumeResponse) GetCount() int32 {
return 0
}
-type UpdateEntryRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
+type LookupVolumeRequest struct {
+ VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
}
-func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} }
-func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) }
-func (*UpdateEntryRequest) ProtoMessage() {}
-func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} }
+func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*LookupVolumeRequest) ProtoMessage() {}
+func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
-func (m *UpdateEntryRequest) GetDirectory() string {
+func (m *LookupVolumeRequest) GetVolumeIds() []string {
if m != nil {
- return m.Directory
+ return m.VolumeIds
}
- return ""
+ return nil
}
-func (m *UpdateEntryRequest) GetEntry() *Entry {
+type Locations struct {
+ Locations []*Location `protobuf:"bytes,1,rep,name=locations" json:"locations,omitempty"`
+}
+
+func (m *Locations) Reset() { *m = Locations{} }
+func (m *Locations) String() string { return proto.CompactTextString(m) }
+func (*Locations) ProtoMessage() {}
+func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+func (m *Locations) GetLocations() []*Location {
if m != nil {
- return m.Entry
+ return m.Locations
}
return nil
}
-type UpdateEntryResponse struct {
+type Location struct {
+ Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
}
-func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} }
-func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) }
-func (*UpdateEntryResponse) ProtoMessage() {}
-func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+func (m *Location) Reset() { *m = Location{} }
+func (m *Location) String() string { return proto.CompactTextString(m) }
+func (*Location) ProtoMessage() {}
+func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+
+func (m *Location) GetUrl() string {
+ if m != nil {
+ return m.Url
+ }
+ return ""
+}
+
+func (m *Location) GetPublicUrl() string {
+ if m != nil {
+ return m.PublicUrl
+ }
+ return ""
+}
+
+type LookupVolumeResponse struct {
+ LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} }
+func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*LookupVolumeResponse) ProtoMessage() {}
+func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+
+func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
+ if m != nil {
+ return m.LocationsMap
+ }
+ return nil
+}
func init() {
proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
@@ -529,12 +605,16 @@ func init() {
proto.RegisterType((*GetFileContentResponse)(nil), "filer_pb.GetFileContentResponse")
proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest")
proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse")
+ proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest")
+ proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse")
proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest")
proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse")
proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest")
proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse")
- proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest")
- proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse")
+ proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest")
+ proto.RegisterType((*Locations)(nil), "filer_pb.Locations")
+ proto.RegisterType((*Location)(nil), "filer_pb.Location")
+ proto.RegisterType((*LookupVolumeResponse)(nil), "filer_pb.LookupVolumeResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -551,11 +631,11 @@ type SeaweedFilerClient interface {
LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error)
ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error)
GetEntryAttributes(ctx context.Context, in *GetEntryAttributesRequest, opts ...grpc.CallOption) (*GetEntryAttributesResponse, error)
- GetFileContent(ctx context.Context, in *GetFileContentRequest, opts ...grpc.CallOption) (*GetFileContentResponse, error)
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error)
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
+ LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
}
type seaweedFilerClient struct {
@@ -593,15 +673,6 @@ func (c *seaweedFilerClient) GetEntryAttributes(ctx context.Context, in *GetEntr
return out, nil
}
-func (c *seaweedFilerClient) GetFileContent(ctx context.Context, in *GetFileContentRequest, opts ...grpc.CallOption) (*GetFileContentResponse, error) {
- out := new(GetFileContentResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFileContent", in, out, c.cc, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) {
out := new(CreateEntryResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, c.cc, opts...)
@@ -638,17 +709,26 @@ func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeR
return out, nil
}
+func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) {
+ out := new(LookupVolumeResponse)
+ err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// Server API for SeaweedFiler service
type SeaweedFilerServer interface {
LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error)
ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error)
GetEntryAttributes(context.Context, *GetEntryAttributesRequest) (*GetEntryAttributesResponse, error)
- GetFileContent(context.Context, *GetFileContentRequest) (*GetFileContentResponse, error)
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error)
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
+ LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
}
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@@ -709,24 +789,6 @@ func _SeaweedFiler_GetEntryAttributes_Handler(srv interface{}, ctx context.Conte
return interceptor(ctx, in, info, handler)
}
-func _SeaweedFiler_GetFileContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetFileContentRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SeaweedFilerServer).GetFileContent(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/filer_pb.SeaweedFiler/GetFileContent",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SeaweedFilerServer).GetFileContent(ctx, req.(*GetFileContentRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateEntryRequest)
if err := dec(in); err != nil {
@@ -799,6 +861,24 @@ func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_LookupVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LookupVolumeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).LookupVolume(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/LookupVolume",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).LookupVolume(ctx, req.(*LookupVolumeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
ServiceName: "filer_pb.SeaweedFiler",
HandlerType: (*SeaweedFilerServer)(nil),
@@ -816,10 +896,6 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
Handler: _SeaweedFiler_GetEntryAttributes_Handler,
},
{
- MethodName: "GetFileContent",
- Handler: _SeaweedFiler_GetFileContent_Handler,
- },
- {
MethodName: "CreateEntry",
Handler: _SeaweedFiler_CreateEntry_Handler,
},
@@ -835,6 +911,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "AssignVolume",
Handler: _SeaweedFiler_AssignVolume_Handler,
},
+ {
+ MethodName: "LookupVolume",
+ Handler: _SeaweedFiler_LookupVolume_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "filer.proto",
@@ -843,53 +923,61 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 763 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0xd3, 0x4a,
- 0x10, 0xae, 0xe3, 0x24, 0x6d, 0x26, 0x69, 0xcf, 0xd1, 0x26, 0xed, 0xf1, 0x49, 0x7f, 0x08, 0x86,
- 0xa2, 0x22, 0xa4, 0x0a, 0x85, 0x1b, 0x2e, 0xa9, 0xda, 0x52, 0x21, 0x15, 0x55, 0x72, 0x55, 0x24,
- 0xae, 0xa2, 0xc4, 0x9e, 0x84, 0x55, 0x1d, 0x3b, 0x78, 0xd7, 0xa0, 0x72, 0x0b, 0xaf, 0xc2, 0x4b,
- 0xf0, 0x74, 0x68, 0x7f, 0xe2, 0xac, 0xb1, 0xd3, 0x9f, 0x0b, 0xee, 0x76, 0x67, 0x76, 0xbe, 0xf9,
- 0x76, 0x66, 0xbe, 0xb5, 0xa1, 0x39, 0xa6, 0x21, 0x26, 0x87, 0xb3, 0x24, 0xe6, 0x31, 0x59, 0x93,
- 0x9b, 0xc1, 0x6c, 0xe4, 0x5e, 0xc0, 0xf6, 0x79, 0x1c, 0x5f, 0xa7, 0xb3, 0x13, 0x9a, 0xa0, 0xcf,
- 0xe3, 0xe4, 0xe6, 0x34, 0xe2, 0xc9, 0x8d, 0x87, 0x9f, 0x53, 0x64, 0x9c, 0xec, 0x40, 0x23, 0x98,
- 0x3b, 0x1c, 0xab, 0x67, 0x1d, 0x34, 0xbc, 0x85, 0x81, 0x10, 0xa8, 0x46, 0xc3, 0x29, 0x3a, 0x15,
- 0xe9, 0x90, 0x6b, 0xf7, 0x14, 0x76, 0xca, 0x01, 0xd9, 0x2c, 0x8e, 0x18, 0x92, 0x7d, 0xa8, 0xa1,
- 0x30, 0x48, 0xb4, 0x66, 0xff, 0x9f, 0xc3, 0x39, 0x95, 0x43, 0x75, 0x4e, 0x79, 0xdd, 0x3e, 0x90,
- 0x73, 0xca, 0xb8, 0xb0, 0x51, 0x64, 0xf7, 0xa2, 0xe3, 0xbe, 0x81, 0x76, 0x2e, 0x46, 0x67, 0x7c,
- 0x0e, 0xab, 0xa8, 0x4c, 0x8e, 0xd5, 0xb3, 0xcb, 0x72, 0xce, 0xfd, 0xee, 0x4f, 0x0b, 0x6a, 0xd2,
- 0x94, 0x5d, 0xcd, 0x5a, 0x5c, 0x8d, 0x3c, 0x86, 0x16, 0x65, 0x83, 0x05, 0x01, 0x71, 0xed, 0x35,
- 0xaf, 0x49, 0x59, 0x76, 0x55, 0xf2, 0x02, 0xea, 0xfe, 0xa7, 0x34, 0xba, 0x66, 0x8e, 0x2d, 0x53,
- 0xb5, 0x17, 0xa9, 0xde, 0xd2, 0x10, 0x8f, 0x85, 0xcf, 0xd3, 0x47, 0xc8, 0x6b, 0x80, 0x21, 0xe7,
- 0x09, 0x1d, 0xa5, 0x1c, 0x99, 0x53, 0x95, 0xf5, 0x70, 0x8c, 0x80, 0x94, 0xe1, 0x51, 0xe6, 0xf7,
- 0x8c, 0xb3, 0xee, 0x18, 0x1a, 0x19, 0x1c, 0xf9, 0x0f, 0x56, 0x45, 0xcc, 0x80, 0x06, 0x9a, 0x6d,
- 0x5d, 0x6c, 0xdf, 0x05, 0x64, 0x0b, 0xea, 0xf1, 0x78, 0xcc, 0x90, 0x4b, 0xa6, 0xb6, 0xa7, 0x77,
- 0xe2, 0x6e, 0x8c, 0x7e, 0x43, 0xc7, 0xee, 0x59, 0x07, 0x55, 0x4f, 0xae, 0x49, 0x07, 0x6a, 0x53,
- 0x4e, 0xa7, 0x28, 0x69, 0xd8, 0x9e, 0xda, 0xb8, 0x3f, 0x2c, 0xd8, 0xc8, 0xd3, 0x20, 0xdb, 0xd0,
- 0x90, 0xd9, 0x24, 0x82, 0x25, 0x11, 0xe4, 0x34, 0x5d, 0xe6, 0x50, 0x2a, 0x06, 0x4a, 0x16, 0x32,
- 0x8d, 0x03, 0x95, 0x74, 0x5d, 0x85, 0xbc, 0x8f, 0x03, 0x24, 0xff, 0x82, 0x9d, 0xd2, 0x40, 0xa6,
- 0x5d, 0xf7, 0xc4, 0x52, 0x58, 0x26, 0x34, 0x70, 0x6a, 0xca, 0x32, 0xa1, 0x81, 0x3b, 0x81, 0xff,
- 0xcf, 0x50, 0xf6, 0xf5, 0xc6, 0x28, 0x88, 0x9e, 0x89, 0xb2, 0x4e, 0xed, 0x02, 0xcc, 0x86, 0x09,
- 0x46, 0x5c, 0x74, 0x4b, 0x8f, 0x67, 0x43, 0x59, 0x4e, 0x68, 0x62, 0x56, 0xcc, 0x36, 0x2b, 0xe6,
- 0x7e, 0xb7, 0xa0, 0x5b, 0x96, 0x49, 0x4f, 0x52, 0xbe, 0x61, 0xd6, 0xfd, 0x1b, 0x66, 0xcc, 0x45,
- 0xe5, 0xce, 0xb9, 0x70, 0x5f, 0xc2, 0xe6, 0x19, 0x72, 0x69, 0x8f, 0x23, 0x8e, 0x11, 0x9f, 0x5f,
- 0x75, 0x59, 0xa7, 0xdd, 0x3e, 0x6c, 0xfd, 0x19, 0xa1, 0x29, 0x3b, 0xb0, 0xea, 0x2b, 0x93, 0x0c,
- 0x69, 0x79, 0xf3, 0xad, 0xfb, 0x11, 0xc8, 0x71, 0x82, 0x43, 0x8e, 0x0f, 0x10, 0x7c, 0x26, 0xde,
- 0xca, 0xad, 0xe2, 0xdd, 0x84, 0x76, 0x0e, 0x5a, 0x71, 0x71, 0x29, 0x90, 0x13, 0x0c, 0xf1, 0x41,
- 0x19, 0x4b, 0x9e, 0x98, 0x82, 0x0e, 0xed, 0x82, 0x0e, 0x05, 0x83, 0x5c, 0x2a, 0xcd, 0x60, 0x0a,
- 0xed, 0x23, 0xc6, 0xe8, 0x24, 0xfa, 0x10, 0x87, 0xe9, 0x14, 0xe7, 0x14, 0x3a, 0x50, 0xf3, 0xe3,
- 0x54, 0x97, 0xa8, 0xe6, 0xa9, 0x0d, 0xd9, 0x03, 0xf0, 0xe3, 0x30, 0x44, 0x9f, 0xd3, 0x38, 0xd2,
- 0x04, 0x0c, 0x0b, 0xe9, 0x41, 0x33, 0xc1, 0x59, 0x48, 0xfd, 0xa1, 0x3c, 0xa0, 0x26, 0xc9, 0x34,
- 0xb9, 0x5f, 0xa0, 0x93, 0x4f, 0xa7, 0x9b, 0xb2, 0x54, 0xb1, 0x42, 0x0c, 0x49, 0xa8, 0x73, 0x89,
- 0xa5, 0x9c, 0xe4, 0x74, 0x14, 0x52, 0x7f, 0x20, 0x1c, 0xb6, 0x9e, 0x64, 0x69, 0xb9, 0x4a, 0xc2,
- 0x05, 0xf3, 0xaa, 0xc1, 0x5c, 0xb4, 0xf6, 0x6a, 0x16, 0xfc, 0xad, 0xd6, 0xe6, 0xa0, 0xd5, 0x8d,
- 0xfa, 0xbf, 0x6a, 0xd0, 0xba, 0xc4, 0xe1, 0x57, 0xc4, 0x40, 0x4c, 0x61, 0x42, 0x26, 0xd0, 0x29,
- 0xfb, 0x0c, 0x90, 0xfd, 0x05, 0xee, 0x2d, 0xdf, 0x9d, 0xee, 0xb3, 0xbb, 0x8e, 0xe9, 0x86, 0xae,
- 0x90, 0x73, 0x68, 0x1a, 0x8f, 0x3e, 0xd9, 0x31, 0x02, 0x0b, 0xdf, 0x8f, 0xee, 0xee, 0x12, 0x6f,
- 0x86, 0x36, 0x04, 0x52, 0xd4, 0x3f, 0x79, 0xb2, 0x08, 0x5b, 0xfa, 0x0e, 0x75, 0x9f, 0xde, 0x7e,
- 0x28, 0x4b, 0x71, 0x05, 0x1b, 0x79, 0xad, 0x92, 0x47, 0xb9, 0xc8, 0xa2, 0xee, 0xbb, 0xbd, 0xe5,
- 0x07, 0xcc, 0x3a, 0x18, 0x9a, 0x33, 0xeb, 0x50, 0x54, 0xb9, 0x59, 0x87, 0x32, 0xa1, 0x4a, 0x34,
- 0xa3, 0xcd, 0x26, 0x5a, 0x71, 0xb0, 0x4c, 0xb4, 0x92, 0xd9, 0x50, 0x68, 0x86, 0x1a, 0x4d, 0xb4,
- 0xe2, 0x7b, 0x60, 0xa2, 0x95, 0x49, 0x78, 0x85, 0x5c, 0x40, 0xcb, 0x54, 0x15, 0x31, 0x02, 0x4a,
- 0xc4, 0xdd, 0xdd, 0x5b, 0xe6, 0x9e, 0x03, 0x8e, 0xea, 0xf2, 0xa7, 0xe8, 0xd5, 0xef, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0xec, 0x38, 0x88, 0xb2, 0x23, 0x09, 0x00, 0x00,
+ // 890 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0x6d, 0x6f, 0xdc, 0x44,
+ 0x10, 0x8e, 0xcf, 0x71, 0x12, 0xcf, 0x5d, 0x78, 0xd9, 0x4b, 0x8b, 0xb9, 0x26, 0x55, 0x58, 0x28,
+ 0x6a, 0x85, 0x14, 0x45, 0x81, 0x0f, 0x15, 0x08, 0x89, 0xaa, 0x29, 0x55, 0xa5, 0x54, 0x95, 0x5c,
+ 0x82, 0xc4, 0xa7, 0x93, 0xcf, 0x9e, 0x3b, 0x56, 0xf1, 0xd9, 0xc6, 0xbb, 0x0e, 0x0a, 0x5f, 0xe1,
+ 0xaf, 0xf0, 0x0f, 0xf8, 0x07, 0xfc, 0x31, 0xb4, 0x2f, 0xb6, 0xd7, 0xb1, 0xaf, 0x2f, 0x1f, 0xf8,
+ 0xb6, 0x3b, 0x3b, 0xf3, 0xcc, 0x33, 0xbb, 0x33, 0x8f, 0x0d, 0xe3, 0x25, 0x4b, 0xb1, 0x3c, 0x29,
+ 0xca, 0x5c, 0xe4, 0x64, 0x4f, 0x6d, 0xe6, 0xc5, 0x82, 0xbe, 0x82, 0x7b, 0x17, 0x79, 0x7e, 0x55,
+ 0x15, 0xe7, 0xac, 0xc4, 0x58, 0xe4, 0xe5, 0xcd, 0xb3, 0x4c, 0x94, 0x37, 0x21, 0xfe, 0x56, 0x21,
+ 0x17, 0xe4, 0x10, 0xfc, 0xa4, 0x3e, 0x08, 0x9c, 0x63, 0xe7, 0xa1, 0x1f, 0xb6, 0x06, 0x42, 0x60,
+ 0x3b, 0x8b, 0xd6, 0x18, 0x8c, 0xd4, 0x81, 0x5a, 0xd3, 0x67, 0x70, 0x38, 0x0c, 0xc8, 0x8b, 0x3c,
+ 0xe3, 0x48, 0x1e, 0x80, 0x87, 0xd2, 0xa0, 0xd0, 0xc6, 0x67, 0x1f, 0x9e, 0xd4, 0x54, 0x4e, 0xb4,
+ 0x9f, 0x3e, 0xa5, 0x67, 0x40, 0x2e, 0x18, 0x17, 0xd2, 0xc6, 0x90, 0xbf, 0x13, 0x1d, 0xfa, 0x03,
+ 0x4c, 0x3b, 0x31, 0x26, 0xe3, 0x23, 0xd8, 0x45, 0x6d, 0x0a, 0x9c, 0x63, 0x77, 0x28, 0x67, 0x7d,
+ 0x4e, 0xff, 0x76, 0xc0, 0x53, 0xa6, 0xa6, 0x34, 0xa7, 0x2d, 0x8d, 0x7c, 0x06, 0x13, 0xc6, 0xe7,
+ 0x2d, 0x01, 0x59, 0xf6, 0x5e, 0x38, 0x66, 0xbc, 0x29, 0x95, 0x7c, 0x05, 0x3b, 0xf1, 0xaf, 0x55,
+ 0x76, 0xc5, 0x03, 0x57, 0xa5, 0x9a, 0xb6, 0xa9, 0x7e, 0x64, 0x29, 0x3e, 0x95, 0x67, 0xa1, 0x71,
+ 0x21, 0x8f, 0x01, 0x22, 0x21, 0x4a, 0xb6, 0xa8, 0x04, 0xf2, 0x60, 0x5b, 0xdd, 0x47, 0x60, 0x05,
+ 0x54, 0x1c, 0x9f, 0x34, 0xe7, 0xa1, 0xe5, 0x4b, 0x97, 0xe0, 0x37, 0x70, 0xe4, 0x13, 0xd8, 0x95,
+ 0x31, 0x73, 0x96, 0x18, 0xb6, 0x3b, 0x72, 0xfb, 0x22, 0x21, 0x77, 0x61, 0x27, 0x5f, 0x2e, 0x39,
+ 0x0a, 0xc5, 0xd4, 0x0d, 0xcd, 0x4e, 0xd6, 0xc6, 0xd9, 0x1f, 0x18, 0xb8, 0xc7, 0xce, 0xc3, 0xed,
+ 0x50, 0xad, 0xc9, 0x01, 0x78, 0x6b, 0xc1, 0xd6, 0xa8, 0x68, 0xb8, 0xa1, 0xde, 0xd0, 0xbf, 0x1c,
+ 0xf8, 0xa0, 0x4b, 0x83, 0xdc, 0x03, 0x5f, 0x65, 0x53, 0x08, 0x8e, 0x42, 0x50, 0xdd, 0xf4, 0xba,
+ 0x83, 0x32, 0xb2, 0x50, 0x9a, 0x90, 0x75, 0x9e, 0xe8, 0xa4, 0xfb, 0x3a, 0xe4, 0x65, 0x9e, 0x20,
+ 0xf9, 0x08, 0xdc, 0x8a, 0x25, 0x2a, 0xed, 0x7e, 0x28, 0x97, 0xd2, 0xb2, 0x62, 0x49, 0xe0, 0x69,
+ 0xcb, 0x8a, 0x25, 0x74, 0x05, 0x9f, 0x3e, 0x47, 0xf5, 0xae, 0x37, 0xd6, 0x85, 0x98, 0x9e, 0x18,
+ 0x7a, 0xa9, 0x23, 0x80, 0x22, 0x2a, 0x31, 0x13, 0xf2, 0xb5, 0x4c, 0x7b, 0xfa, 0xda, 0x72, 0xce,
+ 0x4a, 0xfb, 0xc6, 0x5c, 0xfb, 0xc6, 0xe8, 0x9f, 0x0e, 0xcc, 0x86, 0x32, 0x99, 0x4e, 0xea, 0x3e,
+ 0x98, 0xf3, 0xee, 0x0f, 0x66, 0xf5, 0xc5, 0xe8, 0xad, 0x7d, 0x41, 0x4f, 0xe1, 0xce, 0x73, 0x14,
+ 0xca, 0x9e, 0x67, 0x02, 0x33, 0x51, 0x97, 0xba, 0xe9, 0xa5, 0xe9, 0x19, 0xdc, 0xbd, 0x1d, 0x61,
+ 0x28, 0x07, 0xb0, 0x1b, 0x6b, 0x93, 0x0a, 0x99, 0x84, 0xf5, 0x96, 0xfe, 0x02, 0xe4, 0x69, 0x89,
+ 0x91, 0xc0, 0xf7, 0x18, 0xf8, 0x66, 0x78, 0x47, 0x6f, 0x1c, 0xde, 0x3b, 0x30, 0xed, 0x40, 0x6b,
+ 0x2e, 0x32, 0xe3, 0x65, 0x91, 0xfc, 0x5f, 0x19, 0x3b, 0xd0, 0x26, 0x23, 0x03, 0x72, 0x8e, 0x29,
+ 0xbe, 0x57, 0xc6, 0x01, 0x51, 0xeb, 0x4d, 0xbe, 0xdb, 0x9b, 0x7c, 0xc9, 0xa0, 0x93, 0xca, 0x30,
+ 0x58, 0xc3, 0xf4, 0x09, 0xe7, 0x6c, 0x95, 0xfd, 0x9c, 0xa7, 0xd5, 0x1a, 0x6b, 0x0a, 0x07, 0xe0,
+ 0xc5, 0x79, 0x65, 0x1e, 0xc5, 0x0b, 0xf5, 0x86, 0xdc, 0x07, 0x88, 0xf3, 0x34, 0xc5, 0x58, 0xb0,
+ 0x3c, 0x33, 0x04, 0x2c, 0x0b, 0x39, 0x86, 0x71, 0x89, 0x45, 0xca, 0xe2, 0x48, 0x39, 0xe8, 0xde,
+ 0xb5, 0x4d, 0xf4, 0x1a, 0x0e, 0xba, 0xe9, 0x4c, 0x1b, 0x6c, 0xd4, 0x08, 0x39, 0x7e, 0x65, 0x6a,
+ 0x72, 0xc9, 0xa5, 0x9a, 0x9d, 0x6a, 0x91, 0xb2, 0x78, 0x2e, 0x0f, 0x5c, 0x33, 0x3b, 0xca, 0x72,
+ 0x59, 0xa6, 0x2d, 0xf3, 0x6d, 0x8b, 0x39, 0xfd, 0x06, 0xa6, 0x5a, 0xf5, 0xbb, 0x65, 0x1e, 0x01,
+ 0x5c, 0x2b, 0xc3, 0x9c, 0x25, 0x5a, 0x7d, 0xfd, 0xd0, 0xd7, 0x96, 0x17, 0x09, 0xa7, 0xdf, 0x83,
+ 0x7f, 0x91, 0x6b, 0xe6, 0x9c, 0x9c, 0x82, 0x9f, 0xd6, 0x1b, 0x23, 0xd4, 0xa4, 0x7d, 0xed, 0xda,
+ 0x2f, 0x6c, 0x9d, 0xe8, 0x77, 0xb0, 0x57, 0x9b, 0xeb, 0x3a, 0x9c, 0x4d, 0x75, 0x8c, 0x6e, 0xd5,
+ 0x41, 0xff, 0x75, 0xe0, 0xa0, 0x4b, 0xd9, 0x5c, 0xd5, 0x25, 0xec, 0x37, 0x29, 0xe6, 0xeb, 0xa8,
+ 0x30, 0x5c, 0x4e, 0x6d, 0x2e, 0xfd, 0xb0, 0x86, 0x20, 0x7f, 0x19, 0x15, 0xba, 0x05, 0x26, 0xa9,
+ 0x65, 0x9a, 0xfd, 0x04, 0x1f, 0xf7, 0x5c, 0x24, 0xeb, 0x2b, 0xac, 0x7b, 0x50, 0x2e, 0xc9, 0x23,
+ 0xf0, 0xae, 0xa3, 0xb4, 0x42, 0xd3, 0xef, 0xd3, 0xfe, 0x0d, 0xf0, 0x50, 0x7b, 0x7c, 0x3b, 0x7a,
+ 0xec, 0x9c, 0xfd, 0xe3, 0xc1, 0xe4, 0x35, 0x46, 0xbf, 0x23, 0x26, 0x72, 0xfa, 0x4b, 0xb2, 0xaa,
+ 0xab, 0xea, 0x7e, 0x7e, 0xc9, 0x83, 0xdb, 0xf4, 0x07, 0xbf, 0xf7, 0xb3, 0x2f, 0xdf, 0xe6, 0x66,
+ 0xda, 0x7a, 0x8b, 0x5c, 0xc0, 0xd8, 0xfa, 0xd8, 0x92, 0x43, 0x2b, 0xb0, 0xf7, 0xdd, 0x9e, 0x1d,
+ 0x6d, 0x38, 0x6d, 0xd0, 0x22, 0x20, 0x7d, 0xdd, 0x25, 0x9f, 0xb7, 0x61, 0x1b, 0xf5, 0x7f, 0xf6,
+ 0xc5, 0x9b, 0x9d, 0x6c, 0xc2, 0x96, 0x28, 0xd9, 0x84, 0xfb, 0x32, 0x68, 0x13, 0x1e, 0x52, 0x32,
+ 0x85, 0x66, 0x09, 0x8e, 0x8d, 0xd6, 0x97, 0x38, 0x1b, 0x6d, 0x48, 0xa5, 0x14, 0x9a, 0x25, 0x1e,
+ 0x36, 0x5a, 0x5f, 0xbe, 0x6c, 0xb4, 0x21, 0xc5, 0xd9, 0x22, 0xaf, 0x60, 0x62, 0x8b, 0x00, 0xb1,
+ 0x02, 0x06, 0xb4, 0x68, 0x76, 0x7f, 0xd3, 0xb1, 0x0d, 0x68, 0xf7, 0xbc, 0x0d, 0x38, 0x30, 0xf5,
+ 0x36, 0xe0, 0xd0, 0xa8, 0xd0, 0xad, 0xc5, 0x8e, 0xfa, 0x0d, 0xfd, 0xfa, 0xbf, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x01, 0xeb, 0x13, 0xfa, 0x95, 0x0a, 0x00, 0x00,
}
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index da02ce169..7fba6a7ce 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -3,7 +3,6 @@ package weed_server
import (
"context"
"github.com/chrislusf/seaweedfs/weed/operation"
- "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
@@ -88,20 +87,31 @@ func (fs *FilerServer) GetEntryAttributes(ctx context.Context, req *filer_pb.Get
}, nil
}
-func (fs *FilerServer) GetFileContent(ctx context.Context, req *filer_pb.GetFileContentRequest) (*filer_pb.GetFileContentResponse, error) {
+func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVolumeRequest) (*filer_pb.LookupVolumeResponse, error) {
- server, err := operation.LookupFileId(fs.getMasterNode(), req.FileId)
+ lookupResult, err := operation.LookupVolumeIds(fs.getMasterNode(), req.VolumeIds)
if err != nil {
return nil, err
}
- content, err := util.Get(server)
- if err != nil {
- return nil, err
+
+ resp := &filer_pb.LookupVolumeResponse{
+ LocationsMap: make(map[string]*filer_pb.Locations),
}
- return &filer_pb.GetFileContentResponse{
- Content: content,
- }, nil
+ for vid, locations := range lookupResult {
+ var locs []*filer_pb.Location
+ for _, loc := range locations.Locations {
+ locs = append(locs, &filer_pb.Location{
+ Url: loc.Url,
+ PublicUrl: loc.PublicUrl,
+ })
+ }
+ resp.LocationsMap[vid] = &filer_pb.Locations{
+ Locations: locs,
+ }
+ }
+
+ return resp, nil
}
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
diff --git a/weed/util/http_util.go b/weed/util/http_util.go
index ca9f7c50e..00dbdf90f 100644
--- a/weed/util/http_util.go
+++ b/weed/util/http_util.go
@@ -183,3 +183,33 @@ func NormalizeUrl(url string) string {
}
return "http://" + url
}
+
+func ReadUrl(fileUrl string, offset int64, size int, buf []byte) (n int64, e error) {
+
+ req, _ := http.NewRequest("GET", fileUrl, nil)
+ req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)))
+
+ r, err := client.Do(req)
+ if err != nil {
+ return 0, err
+ }
+ defer r.Body.Close()
+ if r.StatusCode >= 400 {
+ return 0, fmt.Errorf("%s: %s", fileUrl, r.Status)
+ }
+
+ var i, m int
+
+ for {
+ m, err = r.Body.Read(buf[i:cap(buf)])
+ i += m
+ n += int64(m)
+ if err == io.EOF {
+ return n, nil
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+
+}