aboutsummaryrefslogtreecommitdiff
path: root/weed/server
diff options
context:
space:
mode:
authorKonstantin Lebedev <lebedev_k@tochka.com>2020-12-01 16:03:34 +0500
committerKonstantin Lebedev <lebedev_k@tochka.com>2020-12-01 16:03:34 +0500
commit03620776ece3175dac979b05c491d26d14faef0f (patch)
tree4fce7d0a55c3fa7f5b7c43e9d90d5e194c439239 /weed/server
parent4e55baf5b109cfe5cf9f65c44cd92c542b4acf5e (diff)
parent005a6123e98170b2bdf99eb5b8a67ca3cea94190 (diff)
downloadseaweedfs-03620776ece3175dac979b05c491d26d14faef0f.tar.xz
seaweedfs-03620776ece3175dac979b05c491d26d14faef0f.zip
Merge branch 'upstream_master' into store_s3cred
Diffstat (limited to 'weed/server')
-rw-r--r--weed/server/filer_grpc_server.go2
-rw-r--r--weed/server/filer_grpc_server_rename.go1
-rw-r--r--weed/server/filer_server.go1
-rw-r--r--weed/server/filer_server_handlers_read.go4
-rw-r--r--weed/server/filer_server_handlers_write.go1
-rw-r--r--weed/server/filer_server_handlers_write_autochunk.go43
-rw-r--r--weed/server/master_grpc_server_volume.go13
-rw-r--r--weed/server/volume_grpc_copy.go31
-rw-r--r--weed/server/volume_grpc_erasure_coding.go71
-rw-r--r--weed/server/volume_grpc_tier_download.go4
-rw-r--r--weed/server/volume_grpc_tier_upload.go2
-rw-r--r--weed/server/volume_server.go3
-rw-r--r--weed/server/volume_server_handlers_read.go4
-rw-r--r--weed/server/webdav_server.go1
14 files changed, 109 insertions, 72 deletions
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index 7b04e4fab..46e5c5957 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -39,6 +39,7 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
Extended: entry.Extended,
HardLinkId: entry.HardLinkId,
HardLinkCounter: entry.HardLinkCounter,
+ Content: entry.Content,
},
}, nil
}
@@ -84,6 +85,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
Extended: entry.Extended,
HardLinkId: entry.HardLinkId,
HardLinkCounter: entry.HardLinkCounter,
+ Content: entry.Content,
},
}); err != nil {
return err
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
index f9ddeb600..391efb793 100644
--- a/weed/server/filer_grpc_server_rename.go
+++ b/weed/server/filer_grpc_server_rename.go
@@ -110,6 +110,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
Attr: entry.Attr,
Chunks: entry.Chunks,
Extended: entry.Extended,
+ Content: entry.Content,
}
createErr := fs.filer.CreateEntry(ctx, newEntry, false, false, nil)
if createErr != nil {
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index b11448db4..461c08aad 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -126,6 +126,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
defaultMux.HandleFunc("/", fs.filerHandler)
}
if defaultMux != readonlyMux {
+ handleStaticResources(readonlyMux)
readonlyMux.HandleFunc("/", fs.readonlyFilerHandler)
}
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index 69d485e90..f77b7f08d 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -147,6 +147,10 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
}
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+ if offset+size <= int64(len(entry.Content)) {
+ _, err := writer.Write(entry.Content[offset:offset+size])
+ return err
+ }
return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size)
})
diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go
index 09a8e3626..c2d92f8ba 100644
--- a/weed/server/filer_server_handlers_write.go
+++ b/weed/server/filer_server_handlers_write.go
@@ -66,6 +66,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
)
fs.autoChunk(ctx, w, r, so)
+ util.CloseRequest(r)
}
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index fd2db884f..4d8a4d44c 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -87,7 +87,7 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
contentType = ""
}
- fileChunks, md5Hash, chunkOffset, err := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, so)
+ fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, so)
if err != nil {
return nil, nil, err
}
@@ -99,7 +99,7 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
}
md5bytes = md5Hash.Sum(nil)
- filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset)
+ filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent)
return
}
@@ -109,7 +109,7 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter
fileName := ""
contentType := ""
- fileChunks, md5Hash, chunkOffset, err := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, so)
+ fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, so)
if err != nil {
return nil, nil, err
}
@@ -121,12 +121,12 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter
}
md5bytes = md5Hash.Sum(nil)
- filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset)
+ filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent)
return
}
-func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64) (filerResult *FilerPostResult, replyerr error) {
+func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) {
// detect file mode
modeStr := r.URL.Query().Get("mode")
@@ -147,19 +147,12 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
}
}
- // fix the crTime
- existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path))
- crTime := time.Now()
- if err == nil && existingEntry != nil {
- crTime = existingEntry.Crtime
- }
-
glog.V(4).Infoln("saving", path)
entry := &filer.Entry{
FullPath: util.FullPath(path),
Attr: filer.Attr{
Mtime: time.Now(),
- Crtime: crTime,
+ Crtime: time.Now(),
Mode: os.FileMode(mode),
Uid: OS_UID,
Gid: OS_GID,
@@ -170,7 +163,8 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
Md5: md5bytes,
FileSize: uint64(chunkOffset),
},
- Chunks: fileChunks,
+ Chunks: fileChunks,
+ Content: content,
}
filerResult = &FilerPostResult{
@@ -199,13 +193,14 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
return filerResult, replyerr
}
-func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error) {
+func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) {
var fileChunks []*filer_pb.FileChunk
md5Hash := md5.New()
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
chunkOffset := int64(0)
+ var smallContent, content []byte
for {
limitedReader := io.LimitReader(partReader, int64(chunkSize))
@@ -213,14 +208,15 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
// assign one file id for one chunk
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(so)
if assignErr != nil {
- return nil, nil, 0, assignErr
+ return nil, nil, 0, assignErr, nil
}
// upload the chunk to the volume server
- uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth)
+ uploadResult, uploadErr, data := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth)
if uploadErr != nil {
- return nil, nil, 0, uploadErr
+ return nil, nil, 0, uploadErr, nil
}
+ content = data
// if last chunk exhausted the reader exactly at the border
if uploadResult.Size == 0 {
@@ -240,10 +236,13 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
break
}
}
- return fileChunks, md5Hash, chunkOffset, nil
+ if chunkOffset < 2048 {
+ smallContent = content
+ }
+ return fileChunks, md5Hash, chunkOffset, nil, smallContent
}
-func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error) {
+func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) {
stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc()
start := time.Now()
@@ -251,8 +250,8 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht
stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds())
}()
- uploadResult, err, _ := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth)
- return uploadResult, err
+ uploadResult, err, data := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth)
+ return uploadResult, err, data
}
func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAsChunkFunctionType {
diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go
index 03b718291..20a22d126 100644
--- a/weed/server/master_grpc_server_volume.go
+++ b/weed/server/master_grpc_server_volume.go
@@ -177,3 +177,16 @@ func (ms *MasterServer) LookupEcVolume(ctx context.Context, req *master_pb.Looku
return resp, nil
}
+
+func (ms *MasterServer) VacuumVolume(ctx context.Context, req *master_pb.VacuumVolumeRequest) (*master_pb.VacuumVolumeResponse, error) {
+
+ if !ms.Topo.IsLeader() {
+ return nil, raft.NotLeaderError
+ }
+
+ resp := &master_pb.VacuumVolumeResponse{}
+
+ ms.Topo.Vacuum(ms.grpcDialOption, float64(req.GarbageThreshold), ms.preallocateSize)
+
+ return resp, nil
+}
diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go
index 2aecb140f..cfa3710a8 100644
--- a/weed/server/volume_grpc_copy.go
+++ b/weed/server/volume_grpc_copy.go
@@ -48,7 +48,7 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
// send .dat file
// confirm size and timestamp
var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse
- var volumeFileName, idxFileName, datFileName string
+ var dataBaseFileName, indexBaseFileName, idxFileName, datFileName string
err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
var err error
volFileInfoResp, err = client.ReadVolumeFileStatus(context.Background(),
@@ -59,24 +59,25 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
return fmt.Errorf("read volume file status failed, %v", err)
}
- volumeFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId))
+ dataBaseFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId))
+ indexBaseFileName = storage.VolumeFileName(location.IdxDirectory, volFileInfoResp.Collection, int(req.VolumeId))
- ioutil.WriteFile(volumeFileName+".note", []byte(fmt.Sprintf("copying from %s", req.SourceDataNode)), 0755)
+ ioutil.WriteFile(dataBaseFileName+".note", []byte(fmt.Sprintf("copying from %s", req.SourceDataNode)), 0755)
// println("source:", volFileInfoResp.String())
- if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil {
+ if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, dataBaseFileName, ".dat", false, true); err != nil {
return err
}
- if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil {
+ if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, indexBaseFileName, ".idx", false, false); err != nil {
return err
}
- if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil {
+ if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, dataBaseFileName, ".vif", false, true); err != nil {
return err
}
- os.Remove(volumeFileName + ".note")
+ os.Remove(dataBaseFileName + ".note")
return nil
})
@@ -84,18 +85,18 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
if err != nil {
return nil, err
}
- if volumeFileName == "" {
+ if dataBaseFileName == "" {
return nil, fmt.Errorf("not found volume %d file", req.VolumeId)
}
- idxFileName = volumeFileName + ".idx"
- datFileName = volumeFileName + ".dat"
+ idxFileName = indexBaseFileName + ".idx"
+ datFileName = dataBaseFileName + ".dat"
defer func() {
- if err != nil && volumeFileName != "" {
+ if err != nil && dataBaseFileName != "" {
os.Remove(idxFileName)
os.Remove(datFileName)
- os.Remove(volumeFileName + ".vif")
+ os.Remove(dataBaseFileName + ".vif")
}
}()
@@ -223,7 +224,7 @@ func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream v
if uint32(v.CompactionRevision) != req.CompactionRevision && req.CompactionRevision != math.MaxUint32 {
return fmt.Errorf("volume %d is compacted", req.VolumeId)
}
- fileName = v.FileName() + req.Ext
+ fileName = v.FileName(req.Ext)
} else {
baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + req.Ext
for _, location := range vs.store.Locations {
@@ -231,6 +232,10 @@ func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream v
if util.FileExists(tName) {
fileName = tName
}
+ tName = util.Join(location.IdxDirectory, baseFileName)
+ if util.FileExists(tName) {
+ fileName = tName
+ }
}
if fileName == "" {
if req.IgnoreSourceFileNotFound {
diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go
index 55e0261c8..2a7076e04 100644
--- a/weed/server/volume_grpc_erasure_coding.go
+++ b/weed/server/volume_grpc_erasure_coding.go
@@ -8,7 +8,6 @@ import (
"math"
"os"
"path"
- "path/filepath"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -27,7 +26,7 @@ import (
Steps to apply erasure coding to .dat .idx files
0. ensure the volume is readonly
1. client call VolumeEcShardsGenerate to generate the .ecx and .ec00 ~ .ec13 files
-2. client ask master for possible servers to hold the ec files, at least 4 servers
+2. client ask master for possible servers to hold the ec files
3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server
4. target servers report the new ec files to the master
5. master stores vid -> [14]*DataNode
@@ -44,7 +43,7 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_
if v == nil {
return nil, fmt.Errorf("volume %d not found", req.VolumeId)
}
- baseFileName := v.FileName()
+ baseFileName := v.DataFileName()
if v.Collection != req.Collection {
return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
@@ -56,8 +55,8 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_
}
// write .ecx file
- if err := erasure_coding.WriteSortedFileFromIdx(baseFileName, ".ecx"); err != nil {
- return nil, fmt.Errorf("WriteSortedFileFromIdx %s: %v", baseFileName, err)
+ if err := erasure_coding.WriteSortedFileFromIdx(v.IndexFileName(), ".ecx"); err != nil {
+ return nil, fmt.Errorf("WriteSortedFileFromIdx %s: %v", v.IndexFileName(), err)
}
// write .vif files
@@ -78,17 +77,18 @@ func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_s
var rebuiltShardIds []uint32
for _, location := range vs.store.Locations {
- if util.FileExists(path.Join(location.Directory, baseFileName+".ecx")) {
+ if util.FileExists(path.Join(location.IdxDirectory, baseFileName+".ecx")) {
// write .ec00 ~ .ec13 files
- baseFileName = path.Join(location.Directory, baseFileName)
- if generatedShardIds, err := erasure_coding.RebuildEcFiles(baseFileName); err != nil {
- return nil, fmt.Errorf("RebuildEcFiles %s: %v", baseFileName, err)
+ dataBaseFileName := path.Join(location.Directory, baseFileName)
+ if generatedShardIds, err := erasure_coding.RebuildEcFiles(dataBaseFileName); err != nil {
+ return nil, fmt.Errorf("RebuildEcFiles %s: %v", dataBaseFileName, err)
} else {
rebuiltShardIds = generatedShardIds
}
- if err := erasure_coding.RebuildEcxFile(baseFileName); err != nil {
- return nil, fmt.Errorf("RebuildEcxFile %s: %v", baseFileName, err)
+ indexBaseFileName := path.Join(location.IdxDirectory, baseFileName)
+ if err := erasure_coding.RebuildEcxFile(indexBaseFileName); err != nil {
+ return nil, fmt.Errorf("RebuildEcxFile %s: %v", dataBaseFileName, err)
}
break
@@ -110,13 +110,14 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
return nil, fmt.Errorf("no space left")
}
- baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId))
+ dataBaseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId))
+ indexBaseFileName := storage.VolumeFileName(location.IdxDirectory, req.Collection, int(req.VolumeId))
err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
// copy ec data slices
for _, shardId := range req.ShardIds {
- if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil {
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, dataBaseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil {
return err
}
}
@@ -124,7 +125,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
if req.CopyEcxFile {
// copy ecx file
- if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil {
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, indexBaseFileName, ".ecx", false, false); err != nil {
return err
}
return nil
@@ -132,14 +133,14 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
if req.CopyEcjFile {
// copy ecj file
- if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil {
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, indexBaseFileName, ".ecj", true, true); err != nil {
return err
}
}
if req.CopyVifFile {
// copy vif file
- if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil {
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, dataBaseFileName, ".vif", false, true); err != nil {
return err
}
}
@@ -157,17 +158,19 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
// the shard should not be mounted before calling this.
func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_server_pb.VolumeEcShardsDeleteRequest) (*volume_server_pb.VolumeEcShardsDeleteResponse, error) {
- baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
+ bName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
glog.V(0).Infof("ec volume %d shard delete %v", req.VolumeId, req.ShardIds)
found := false
+ var indexBaseFilename, dataBaseFilename string
for _, location := range vs.store.Locations {
- if util.FileExists(path.Join(location.Directory, baseFilename+".ecx")) {
+ if util.FileExists(path.Join(location.IdxDirectory, bName+".ecx")) {
found = true
- baseFilename = path.Join(location.Directory, baseFilename)
+ indexBaseFilename = path.Join(location.IdxDirectory, bName)
+ dataBaseFilename = path.Join(location.Directory, bName)
for _, shardId := range req.ShardIds {
- os.Remove(baseFilename + erasure_coding.ToExt(int(shardId)))
+ os.Remove(dataBaseFilename + erasure_coding.ToExt(int(shardId)))
}
break
}
@@ -182,12 +185,18 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se
hasIdxFile := false
existingShardCount := 0
- bName := filepath.Base(baseFilename)
for _, location := range vs.store.Locations {
fileInfos, err := ioutil.ReadDir(location.Directory)
if err != nil {
continue
}
+ if location.IdxDirectory != location.Directory {
+ idxFileInfos, err := ioutil.ReadDir(location.IdxDirectory)
+ if err != nil {
+ continue
+ }
+ fileInfos = append(fileInfos, idxFileInfos...)
+ }
for _, fileInfo := range fileInfos {
if fileInfo.Name() == bName+".ecx" || fileInfo.Name() == bName+".ecj" {
hasEcxFile = true
@@ -204,14 +213,14 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se
}
if hasEcxFile && existingShardCount == 0 {
- if err := os.Remove(baseFilename + ".ecx"); err != nil {
+ if err := os.Remove(indexBaseFilename + ".ecx"); err != nil {
return nil, err
}
- os.Remove(baseFilename + ".ecj")
+ os.Remove(indexBaseFilename + ".ecj")
}
if !hasIdxFile {
// .vif is used for ec volumes and normal volumes
- os.Remove(baseFilename + ".vif")
+ os.Remove(dataBaseFilename + ".vif")
}
return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil
@@ -365,26 +374,26 @@ func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_
if !found {
return nil, fmt.Errorf("ec volume %d not found", req.VolumeId)
}
- baseFileName := v.FileName()
if v.Collection != req.Collection {
return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
}
+ dataBaseFileName, indexBaseFileName := v.DataBaseFileName(), v.IndexBaseFileName()
// calculate .dat file size
- datFileSize, err := erasure_coding.FindDatFileSize(baseFileName)
+ datFileSize, err := erasure_coding.FindDatFileSize(dataBaseFileName, indexBaseFileName)
if err != nil {
- return nil, fmt.Errorf("FindDatFileSize %s: %v", baseFileName, err)
+ return nil, fmt.Errorf("FindDatFileSize %s: %v", dataBaseFileName, err)
}
// write .dat file from .ec00 ~ .ec09 files
- if err := erasure_coding.WriteDatFile(baseFileName, datFileSize); err != nil {
- return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
+ if err := erasure_coding.WriteDatFile(dataBaseFileName, datFileSize); err != nil {
+ return nil, fmt.Errorf("WriteEcFiles %s: %v", dataBaseFileName, err)
}
// write .idx file from .ecx and .ecj files
- if err := erasure_coding.WriteIdxFileFromEcIndex(baseFileName); err != nil {
- return nil, fmt.Errorf("WriteIdxFileFromEcIndex %s: %v", baseFileName, err)
+ if err := erasure_coding.WriteIdxFileFromEcIndex(indexBaseFileName); err != nil {
+ return nil, fmt.Errorf("WriteIdxFileFromEcIndex %s: %v", v.IndexBaseFileName(), err)
}
return &volume_server_pb.VolumeEcShardsToVolumeResponse{}, nil
diff --git a/weed/server/volume_grpc_tier_download.go b/weed/server/volume_grpc_tier_download.go
index 7b3982e40..73d8ae7cb 100644
--- a/weed/server/volume_grpc_tier_download.go
+++ b/weed/server/volume_grpc_tier_download.go
@@ -58,9 +58,9 @@ func (vs *VolumeServer) VolumeTierMoveDatFromRemote(req *volume_server_pb.Volume
})
}
// copy the data file
- _, err := backendStorage.DownloadFile(v.FileName()+".dat", storageKey, fn)
+ _, err := backendStorage.DownloadFile(v.FileName(".dat"), storageKey, fn)
if err != nil {
- return fmt.Errorf("backend %s copy file %s: %v", storageName, v.FileName()+".dat", err)
+ return fmt.Errorf("backend %s copy file %s: %v", storageName, v.FileName(".dat"), err)
}
if req.KeepRemoteDatFile {
diff --git a/weed/server/volume_grpc_tier_upload.go b/weed/server/volume_grpc_tier_upload.go
index c9694df59..e51de5f1d 100644
--- a/weed/server/volume_grpc_tier_upload.go
+++ b/weed/server/volume_grpc_tier_upload.go
@@ -93,7 +93,7 @@ func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTi
}
if !req.KeepLocalDatFile {
- os.Remove(v.FileName() + ".dat")
+ os.Remove(v.FileName(".dat"))
}
return nil
diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go
index 83df32fdd..468f75890 100644
--- a/weed/server/volume_server.go
+++ b/weed/server/volume_server.go
@@ -38,6 +38,7 @@ type VolumeServer struct {
func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
port int, publicUrl string,
folders []string, maxCounts []int, minFreeSpacePercents []float32,
+ idxFolder string,
needleMapKind storage.NeedleMapType,
masterNodes []string, pulseSeconds int,
dataCenter string, rack string,
@@ -75,7 +76,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
vs.checkWithMaster()
- vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpacePercents, vs.needleMapKind)
+ vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpacePercents, idxFolder, vs.needleMapKind)
vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec)
handleStaticResources(adminMux)
diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go
index 15fd446e7..1c963b39c 100644
--- a/weed/server/volume_server_handlers_read.go
+++ b/weed/server/volume_server_handlers_read.go
@@ -159,8 +159,8 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if n.Data, err = util.DecompressData(n.Data); err != nil {
glog.V(0).Infoln("ungzip error:", err, r.URL.Path)
}
- } else if strings.Contains(r.Header.Get("Accept-Encoding"), "zstd") && util.IsZstdContent(n.Data) {
- w.Header().Set("Content-Encoding", "zstd")
+ // } else if strings.Contains(r.Header.Get("Accept-Encoding"), "zstd") && util.IsZstdContent(n.Data) {
+ // w.Header().Set("Content-Encoding", "zstd")
} else if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") && util.IsGzippedContent(n.Data) {
w.Header().Set("Content-Encoding", "gzip")
} else {
diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go
index 3e9f882e3..f4b6a6f28 100644
--- a/weed/server/webdav_server.go
+++ b/weed/server/webdav_server.go
@@ -418,6 +418,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
return 0, fmt.Errorf("upload result: %v", uploadResult.Error)
}
+ f.entry.Content = nil
f.entry.Chunks = append(f.entry.Chunks, uploadResult.ToPbFileChunk(fileId, f.off))
err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {