aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--other/metrics/grafana_seaweedfs.json16
-rw-r--r--weed/command/filer.go3
-rw-r--r--weed/command/server.go6
-rw-r--r--weed/command/volume.go5
-rw-r--r--weed/s3api/stats.go6
-rw-r--r--weed/server/common.go4
-rw-r--r--weed/server/filer_server.go3
-rw-r--r--weed/server/filer_server_handlers_read_dir.go7
-rw-r--r--weed/server/filer_ui/filer.html40
-rw-r--r--weed/server/volume_server.go6
-rw-r--r--weed/server/volume_server_handlers.go28
-rw-r--r--weed/stats/metrics.go5
-rw-r--r--weed/wdclient/vid_map.go2
13 files changed, 86 insertions, 45 deletions
diff --git a/other/metrics/grafana_seaweedfs.json b/other/metrics/grafana_seaweedfs.json
index 3b9b222b4..88844b3c3 100644
--- a/other/metrics/grafana_seaweedfs.json
+++ b/other/metrics/grafana_seaweedfs.json
@@ -539,11 +539,12 @@
"step": 60
},
{
- "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))",
+ "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type, bucket))",
"format": "time_series",
"hide": false,
+ "interval": "",
"intervalFactor": 2,
- "legendFormat": "{{type}}",
+ "legendFormat": "{{bucket}} {{type}}",
"refId": "B",
"step": 60
}
@@ -645,11 +646,12 @@
"step": 60
},
{
- "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))",
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type, bucket))",
"format": "time_series",
"hide": false,
+ "interval": "",
"intervalFactor": 2,
- "legendFormat": "{{type}}",
+ "legendFormat": "{{bucket}} {{type}}",
"refId": "B",
"step": 60
}
@@ -751,11 +753,11 @@
"step": 60
},
{
- "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))",
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type, bucket))",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
- "legendFormat": "{{type}}",
+ "legendFormat": "{{bucket}} {{type}}",
"refId": "B",
"step": 60
}
@@ -864,7 +866,7 @@
"expr": "rate(SeaweedFS_s3_request_total[1m])",
"format": "time_series",
"intervalFactor": 2,
- "legendFormat": "{{type}}",
+ "legendFormat": "{{bucket}} {{type}}",
"refId": "A",
"step": 30
}
diff --git a/weed/command/filer.go b/weed/command/filer.go
index 2c91e6fec..c9f9a1956 100644
--- a/weed/command/filer.go
+++ b/weed/command/filer.go
@@ -55,6 +55,7 @@ type FilerOptions struct {
debug *bool
debugPort *int
localSocket *string
+ showUIDirectoryDelete *bool
}
func init() {
@@ -82,6 +83,7 @@ func init() {
f.debug = cmdFiler.Flag.Bool("debug", false, "serves runtime profiling data, e.g., http://localhost:<debug.port>/debug/pprof/goroutine?debug=2")
f.debugPort = cmdFiler.Flag.Int("debug.port", 6060, "http port for debugging")
f.localSocket = cmdFiler.Flag.String("localSocket", "", "default to /tmp/seaweedfs-filer-<port>.sock")
+ f.showUIDirectoryDelete = cmdFiler.Flag.Bool("ui.deleteDir", true, "enable filer UI show delete directory button")
// start s3 on filer
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
@@ -216,6 +218,7 @@ func (fo *FilerOptions) startFiler() {
Cipher: *fo.cipher,
SaveToFilerLimit: int64(*fo.saveToFilerLimit),
ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024,
+ ShowUIDirectoryDelete: *fo.showUIDirectoryDelete,
})
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)
diff --git a/weed/command/server.go b/weed/command/server.go
index 4b6b6c642..b1812bb9b 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -2,8 +2,6 @@ package command
import (
"fmt"
- "github.com/chrislusf/seaweedfs/weed/pb"
- "github.com/chrislusf/seaweedfs/weed/util/grace"
"net/http"
"os"
"strings"
@@ -12,7 +10,9 @@ import (
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
)
type ServerOptions struct {
@@ -114,6 +114,7 @@ func init() {
filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
filerOptions.localSocket = cmdServer.Flag.String("filer.localSocket", "", "default to /tmp/seaweedfs-filer-<port>.sock")
+ filerOptions.showUIDirectoryDelete = cmdServer.Flag.Bool("filer.ui.deleteDir", true, "enable filer UI show delete directory button")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.portGrpc = cmdServer.Flag.Int("volume.port.grpc", 0, "volume server grpc listen port")
@@ -131,6 +132,7 @@ func init() {
serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files")
serverOptions.v.enableTcp = cmdServer.Flag.Bool("volume.tcp", false, "<exprimental> enable tcp port")
+ serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
s3Options.portGrpc = cmdServer.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port")
diff --git a/weed/command/volume.go b/weed/command/volume.go
index b1455352c..158bdf162 100644
--- a/weed/command/volume.go
+++ b/weed/command/volume.go
@@ -65,7 +65,8 @@ type VolumeServerOptions struct {
preStopSeconds *int
metricsHttpPort *int
// pulseSeconds *int
- enableTcp *bool
+ enableTcp *bool
+ inflightUploadDataTimeout *time.Duration
}
func init() {
@@ -96,6 +97,7 @@ func init() {
v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
v.enableTcp = cmdVolume.Flag.Bool("tcp", false, "<experimental> enable tcp port")
+ v.inflightUploadDataTimeout = cmdVolume.Flag.Duration("inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
}
var cmdVolume = &Command{
@@ -244,6 +246,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
*v.fileSizeLimitMB,
int64(*v.concurrentUploadLimitMB)*1024*1024,
int64(*v.concurrentDownloadLimitMB)*1024*1024,
+ *v.inflightUploadDataTimeout,
)
// starting grpc server
grpcS := v.startGrpcService(volumeServer)
diff --git a/weed/s3api/stats.go b/weed/s3api/stats.go
index 973d8c0eb..003807a25 100644
--- a/weed/s3api/stats.go
+++ b/weed/s3api/stats.go
@@ -1,6 +1,7 @@
package s3api
import (
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"net/http"
"strconv"
@@ -27,11 +28,12 @@ func (r *StatusRecorder) Flush() {
func track(f http.HandlerFunc, action string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
+ bucket, _ := s3_constants.GetBucketAndObject(r)
w.Header().Set("Server", "SeaweedFS S3")
recorder := NewStatusResponseWriter(w)
start := time.Now()
f(recorder, r)
- stats_collect.S3RequestHistogram.WithLabelValues(action).Observe(time.Since(start).Seconds())
- stats_collect.S3RequestCounter.WithLabelValues(action, strconv.Itoa(recorder.Status)).Inc()
+ stats_collect.S3RequestHistogram.WithLabelValues(action, bucket).Observe(time.Since(start).Seconds())
+ stats_collect.S3RequestCounter.WithLabelValues(action, strconv.Itoa(recorder.Status), bucket).Inc()
}
}
diff --git a/weed/server/common.go b/weed/server/common.go
index 39a8637ac..f02ec67ac 100644
--- a/weed/server/common.go
+++ b/weed/server/common.go
@@ -284,6 +284,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
if rangeReq == "" {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
if err := writeFn(bufferedWriter, 0, totalSize); err != nil {
+ glog.Errorf("processRangeRequest headers: %+v err: %v", w.Header(), err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -294,6 +295,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
//mostly copy from src/pkg/net/http/fs.go
ranges, err := parseRange(rangeReq, totalSize)
if err != nil {
+ glog.Errorf("processRangeRequest headers: %+v err: %v", w.Header(), err)
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
@@ -326,6 +328,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
w.WriteHeader(http.StatusPartialContent)
err = writeFn(bufferedWriter, ra.start, ra.length)
if err != nil {
+ glog.Errorf("processRangeRequest headers: %+v err: %v", w.Header(), err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -365,6 +368,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
}
w.WriteHeader(http.StatusPartialContent)
if _, err := io.CopyN(bufferedWriter, sendContent, sendSize); err != nil {
+ glog.Errorf("processRangeRequest err: %v", err)
http.Error(w, "Internal Error", http.StatusInternalServerError)
return
}
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index 8908b5e5f..6bf0261ee 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -3,7 +3,6 @@ package weed_server
import (
"context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"net/http"
"os"
"sync"
@@ -17,6 +16,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -67,6 +67,7 @@ type FilerOption struct {
Cipher bool
SaveToFilerLimit int64
ConcurrentUploadLimit int64
+ ShowUIDirectoryDelete bool
}
type FilerServer struct {
diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go
index 8382cfc76..eaf17fa18 100644
--- a/weed/server/filer_server_handlers_read_dir.go
+++ b/weed/server/filer_server_handlers_read_dir.go
@@ -73,7 +73,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
return
}
- ui.StatusTpl.Execute(w, struct {
+ err = ui.StatusTpl.Execute(w, struct {
Path string
Breadcrumbs []ui.Breadcrumb
Entries interface{}
@@ -81,6 +81,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
LastFileName string
ShouldDisplayLoadMore bool
EmptyFolder bool
+ ShowDirectoryDelete bool
}{
path,
ui.ToBreadcrumb(path),
@@ -89,5 +90,9 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
lastFileName,
shouldDisplayLoadMore,
emptyFolder,
+ fs.option.ShowUIDirectoryDelete,
})
+ if err != nil {
+ glog.V(0).Infof("Template Execute Error: %v", err)
+ }
}
diff --git a/weed/server/filer_ui/filer.html b/weed/server/filer_ui/filer.html
index f9c35440e..c9d832e8f 100644
--- a/weed/server/filer_ui/filer.html
+++ b/weed/server/filer_ui/filer.html
@@ -109,38 +109,37 @@
<form class="upload-form">
<input type="file" id="fileElem" multiple onchange="handleFiles(this.files)">
- {{if .EmptyFolder}}
+ {{ if .EmptyFolder }}
<div class="row add-files">
+
</div>
- {{else}}
+ {{ else }}
<table width="100%" class="table table-hover">
- {{$path := .Path }}
+ {{ $path := .Path }}
+ {{ $showDirDel := .ShowDirectoryDelete }}
{{ range $entry_index, $entry := .Entries }}
<tr>
<td>
- {{if $entry.IsDirectory}}
+ {{ if $entry.IsDirectory }}
<span class="glyphicon glyphicon-folder-open" aria-hidden="true"></span>&nbsp;
<a href="{{ printpath $path "/" $entry.Name "/"}}" >
{{ $entry.Name }}
</a>
- {{else}}
+ {{ else }}
<a href="{{ printpath $path "/" $entry.Name }}" >
{{ $entry.Name }}
</a>
- {{end}}
+ {{ end }}
</td>
<td align="right" nowrap>
- {{if $entry.IsDirectory}}
- {{else}}
+ {{ if not $entry.IsDirectory }}
{{ $entry.Mime }}&nbsp;
- {{end}}
+ {{ end }}
</td>
<td align="right" nowrap>
- {{if $entry.IsDirectory}}
- {{else}}
+ {{ if not $entry.IsDirectory }}
{{ $entry.Size | humanizeBytes }}&nbsp;
- {{end}}
+ {{ end }}
</td>
<td align="right" nowrap>
{{ $entry.Timestamp.Format "2006-01-02 15:04" }}
@@ -150,31 +149,32 @@
<label class="btn" onclick="handleRename('{{ $entry.Name }}', '{{ printpath $path "/" }}')">
<span class="glyphicon glyphicon-edit" aria-hidden="true"></span>
</label>
- {{if $entry.IsDirectory}}
- <label class="btn" onclick="handleDelete('{{ printpath $path "/" $entry.Name "/" }}')">
+ {{ if and $entry.IsDirectory $showDirDel }}
+ <label class="btn" onclick="handleDelete('{{ printpath $path "/" $entry.Name "/" }}')">
<span class="glyphicon glyphicon-trash" aria-hidden="true"></span>
</label>
- {{else}}
+ {{ end }}
+ {{ if not $entry.IsDirectory }}
<label class="btn" onclick="handleDelete('{{ printpath $path "/" $entry.Name }}')">
<span class="glyphicon glyphicon-trash" aria-hidden="true"></span>
</label>
- {{end}}
+ {{ end }}
</div>
</td>
</tr>
{{ end }}
</table>
- {{end}}
+ {{ end }}
</form>
</div>
- {{if .ShouldDisplayLoadMore}}
+ {{ if .ShouldDisplayLoadMore }}
<div class="row">
- <a href={{ print .Path "?limit=" .Limit "&lastFileName=" .LastFileName}} >
+ <a href={{ print .Path "?limit=" .Limit "&lastFileName=" .LastFileName }} >
Load more
</a>
</div>
- {{end}}
+ {{ end }}
<br/>
<br/>
diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go
index 477a3709c..abb30229a 100644
--- a/weed/server/volume_server.go
+++ b/weed/server/volume_server.go
@@ -3,6 +3,7 @@ package weed_server
import (
"net/http"
"sync"
+ "time"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
@@ -24,7 +25,9 @@ type VolumeServer struct {
inFlightDownloadDataSize int64
concurrentUploadLimit int64
concurrentDownloadLimit int64
+ inFlightUploadDataLimitCond *sync.Cond
inFlightDownloadDataLimitCond *sync.Cond
+ inflightUploadDataTimeout time.Duration
SeedMasterNodes []pb.ServerAddress
currentMaster pb.ServerAddress
@@ -60,6 +63,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
fileSizeLimitMB int,
concurrentUploadLimit int64,
concurrentDownloadLimit int64,
+ inflightUploadDataTimeout time.Duration,
) *VolumeServer {
v := util.GetViper()
@@ -84,9 +88,11 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024,
isHeartbeating: true,
stopChan: make(chan bool),
+ inFlightUploadDataLimitCond: sync.NewCond(new(sync.Mutex)),
inFlightDownloadDataLimitCond: sync.NewCond(new(sync.Mutex)),
concurrentUploadLimit: concurrentUploadLimit,
concurrentDownloadLimit: concurrentDownloadLimit,
+ inflightUploadDataTimeout: inflightUploadDataTimeout,
}
vs.SeedMasterNodes = masterNodes
diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go
index 49bc297fb..293f36f14 100644
--- a/weed/server/volume_server_handlers.go
+++ b/weed/server/volume_server_handlers.go
@@ -6,6 +6,7 @@ import (
"strconv"
"strings"
"sync/atomic"
+ "time"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -56,20 +57,31 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque
vs.guard.WhiteList(vs.DeleteHandler)(w, r)
case "PUT", "POST":
- // wait until in flight data is less than the limit
contentLength := getContentLength(r)
-
// exclude the replication from the concurrentUploadLimitMB
- if vs.concurrentUploadLimit != 0 && r.URL.Query().Get("type") != "replicate" &&
- atomic.LoadInt64(&vs.inFlightUploadDataSize) > vs.concurrentUploadLimit {
- err := fmt.Errorf("reject because inflight upload data %d > %d", vs.inFlightUploadDataSize, vs.concurrentUploadLimit)
- glog.V(1).Infof("too many requests: %v", err)
- writeJsonError(w, r, http.StatusTooManyRequests, err)
- return
+ if r.URL.Query().Get("type") != "replicate" && vs.concurrentUploadLimit != 0 {
+ startTime := time.Now()
+ vs.inFlightUploadDataLimitCond.L.Lock()
+ for vs.inFlightUploadDataSize > vs.concurrentUploadLimit {
+ //wait timeout check
+ if startTime.Add(vs.inflightUploadDataTimeout).Before(time.Now()) {
+ vs.inFlightUploadDataLimitCond.L.Unlock()
+ err := fmt.Errorf("reject because inflight upload data %d > %d, and wait timeout", vs.inFlightUploadDataSize, vs.concurrentUploadLimit)
+ glog.V(1).Infof("too many requests: %v", err)
+ writeJsonError(w, r, http.StatusTooManyRequests, err)
+ return
+ }
+ glog.V(4).Infof("wait because inflight upload data %d > %d", vs.inFlightUploadDataSize, vs.concurrentUploadLimit)
+ vs.inFlightUploadDataLimitCond.Wait()
+ }
+ vs.inFlightUploadDataLimitCond.L.Unlock()
}
atomic.AddInt64(&vs.inFlightUploadDataSize, contentLength)
defer func() {
atomic.AddInt64(&vs.inFlightUploadDataSize, -contentLength)
+ if vs.concurrentUploadLimit != 0 {
+ vs.inFlightUploadDataLimitCond.Signal()
+ }
}()
// processs uploads
diff --git a/weed/stats/metrics.go b/weed/stats/metrics.go
index 207b37c81..f0b810608 100644
--- a/weed/stats/metrics.go
+++ b/weed/stats/metrics.go
@@ -173,7 +173,8 @@ var (
Subsystem: "s3",
Name: "request_total",
Help: "Counter of s3 requests.",
- }, []string{"type", "code"})
+ }, []string{"type", "code", "bucket"})
+
S3RequestHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "SeaweedFS",
@@ -181,7 +182,7 @@ var (
Name: "request_seconds",
Help: "Bucketed histogram of s3 request processing time.",
Buckets: prometheus.ExponentialBuckets(0.0001, 2, 24),
- }, []string{"type"})
+ }, []string{"type", "bucket"})
)
func init() {
diff --git a/weed/wdclient/vid_map.go b/weed/wdclient/vid_map.go
index cdd783d91..f7a9a0f1a 100644
--- a/weed/wdclient/vid_map.go
+++ b/weed/wdclient/vid_map.go
@@ -133,7 +133,7 @@ func (vc *vidMap) GetLocations(vid uint32) (locations []Location, found bool) {
return
}
locations, found = vc.ecVid2Locations[vid]
- return
+ return locations, found && len(locations) > 0
}
func (vc *vidMap) addLocation(vid uint32, location Location) {