aboutsummaryrefslogtreecommitdiff
path: root/weed
diff options
context:
space:
mode:
Diffstat (limited to 'weed')
-rw-r--r--weed/command/filer_copy.go2
-rw-r--r--weed/command/filer_meta_tail.go72
-rw-r--r--weed/command/filer_meta_tail_elastic.go82
-rw-r--r--weed/command/filer_meta_tail_non_elastic.go14
-rw-r--r--weed/command/imports.go1
-rw-r--r--weed/command/scaffold/filer.toml10
-rw-r--r--weed/filer/arangodb/arangodb_store.go347
-rw-r--r--weed/filer/arangodb/arangodb_store_bucket.go40
-rw-r--r--weed/filer/arangodb/arangodb_store_kv.go54
-rw-r--r--weed/filer/arangodb/helpers.go136
-rw-r--r--weed/filer/arangodb/readme.md52
-rw-r--r--weed/filer/elastic/v7/doc.go9
-rw-r--r--weed/filer/elastic/v7/elastic_store.go3
-rw-r--r--weed/filer/elastic/v7/elastic_store_kv.go3
-rw-r--r--weed/filer/filer_delete_entry.go2
-rw-r--r--weed/filer/sqlite/doc.go9
-rw-r--r--weed/filer/sqlite/sqlite_store.go3
-rw-r--r--weed/filer/sqlite/sqlite_store_unsupported.go4
-rw-r--r--weed/iamapi/iamapi_management_handlers.go13
-rw-r--r--weed/notification/gocdk_pub_sub/doc.go9
-rw-r--r--weed/notification/gocdk_pub_sub/gocdk_pub_sub.go3
-rw-r--r--weed/remote_storage/hdfs/doc.go9
-rw-r--r--weed/remote_storage/hdfs/hdfs_kerberos.go3
-rw-r--r--weed/remote_storage/hdfs/hdfs_storage_client.go5
-rw-r--r--weed/replication/sub/notification_gocdk_pub_sub.go3
-rw-r--r--weed/server/filer_server.go1
-rw-r--r--weed/shell/command_volume_fsck.go237
-rw-r--r--weed/shell/command_volume_list.go52
-rw-r--r--weed/storage/store.go10
29 files changed, 1004 insertions, 184 deletions
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index 06bb82319..9a41dd933 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -71,7 +71,7 @@ var cmdFilerCopy = &Command{
It can copy one or a list of files or folders.
If copying a whole folder recursively:
- All files under the folder and subfolders will be copyed.
+ All files under the folder and sub folders will be copied.
Optional parameter "-include" allows you to specify the file name patterns.
If "maxMB" is set to a positive number, files larger than it would be split into chunks.
diff --git a/weed/command/filer_meta_tail.go b/weed/command/filer_meta_tail.go
index 51c4e7128..7dbeee444 100644
--- a/weed/command/filer_meta_tail.go
+++ b/weed/command/filer_meta_tail.go
@@ -1,12 +1,9 @@
package command
import (
- "context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/golang/protobuf/jsonpb"
- jsoniter "github.com/json-iterator/go"
- elastic "github.com/olivere/elastic/v7"
"os"
"path/filepath"
"strings"
@@ -124,72 +121,3 @@ func runFilerMetaTail(cmd *Command, args []string) bool {
return true
}
-
-type EsDocument struct {
- Dir string `json:"dir,omitempty"`
- Name string `json:"name,omitempty"`
- IsDirectory bool `json:"isDir,omitempty"`
- Size uint64 `json:"size,omitempty"`
- Uid uint32 `json:"uid,omitempty"`
- Gid uint32 `json:"gid,omitempty"`
- UserName string `json:"userName,omitempty"`
- Collection string `json:"collection,omitempty"`
- Crtime int64 `json:"crtime,omitempty"`
- Mtime int64 `json:"mtime,omitempty"`
- Mime string `json:"mime,omitempty"`
-}
-
-func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) {
- entry := event.NewEntry
- dir, name := event.NewParentPath, entry.Name
- id := util.Md5String([]byte(util.NewFullPath(dir, name)))
- esEntry := &EsDocument{
- Dir: dir,
- Name: name,
- IsDirectory: entry.IsDirectory,
- Size: entry.Attributes.FileSize,
- Uid: entry.Attributes.Uid,
- Gid: entry.Attributes.Gid,
- UserName: entry.Attributes.UserName,
- Collection: entry.Attributes.Collection,
- Crtime: entry.Attributes.Crtime,
- Mtime: entry.Attributes.Mtime,
- Mime: entry.Attributes.Mime,
- }
- return esEntry, id
-}
-
-func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
- options := []elastic.ClientOptionFunc{}
- options = append(options, elastic.SetURL(strings.Split(servers, ",")...))
- options = append(options, elastic.SetSniff(false))
- options = append(options, elastic.SetHealthcheck(false))
- client, err := elastic.NewClient(options...)
- if err != nil {
- return nil, err
- }
- return func(resp *filer_pb.SubscribeMetadataResponse) error {
- event := resp.EventNotification
- if event.OldEntry != nil &&
- (event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) {
- // delete or not update the same file
- dir, name := resp.Directory, event.OldEntry.Name
- id := util.Md5String([]byte(util.NewFullPath(dir, name)))
- println("delete", id)
- _, err := client.Delete().Index(esIndex).Id(id).Do(context.Background())
- return err
- }
- if event.NewEntry != nil {
- // add a new file or update the same file
- esEntry, id := toEsEntry(event)
- value, err := jsoniter.Marshal(esEntry)
- if err != nil {
- return err
- }
- println(string(value))
- _, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background())
- return err
- }
- return nil
- }, nil
-}
diff --git a/weed/command/filer_meta_tail_elastic.go b/weed/command/filer_meta_tail_elastic.go
new file mode 100644
index 000000000..4c5b606a3
--- /dev/null
+++ b/weed/command/filer_meta_tail_elastic.go
@@ -0,0 +1,82 @@
+//go:build elastic
+// +build elastic
+
+package command
+
+import (
+ "context"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ jsoniter "github.com/json-iterator/go"
+ elastic "github.com/olivere/elastic/v7"
+ "strings"
+)
+
+type EsDocument struct {
+ Dir string `json:"dir,omitempty"`
+ Name string `json:"name,omitempty"`
+ IsDirectory bool `json:"isDir,omitempty"`
+ Size uint64 `json:"size,omitempty"`
+ Uid uint32 `json:"uid,omitempty"`
+ Gid uint32 `json:"gid,omitempty"`
+ UserName string `json:"userName,omitempty"`
+ Collection string `json:"collection,omitempty"`
+ Crtime int64 `json:"crtime,omitempty"`
+ Mtime int64 `json:"mtime,omitempty"`
+ Mime string `json:"mime,omitempty"`
+}
+
+func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) {
+ entry := event.NewEntry
+ dir, name := event.NewParentPath, entry.Name
+ id := util.Md5String([]byte(util.NewFullPath(dir, name)))
+ esEntry := &EsDocument{
+ Dir: dir,
+ Name: name,
+ IsDirectory: entry.IsDirectory,
+ Size: entry.Attributes.FileSize,
+ Uid: entry.Attributes.Uid,
+ Gid: entry.Attributes.Gid,
+ UserName: entry.Attributes.UserName,
+ Collection: entry.Attributes.Collection,
+ Crtime: entry.Attributes.Crtime,
+ Mtime: entry.Attributes.Mtime,
+ Mime: entry.Attributes.Mime,
+ }
+ return esEntry, id
+}
+
+func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
+ options := []elastic.ClientOptionFunc{}
+ options = append(options, elastic.SetURL(strings.Split(servers, ",")...))
+ options = append(options, elastic.SetSniff(false))
+ options = append(options, elastic.SetHealthcheck(false))
+ client, err := elastic.NewClient(options...)
+ if err != nil {
+ return nil, err
+ }
+ return func(resp *filer_pb.SubscribeMetadataResponse) error {
+ event := resp.EventNotification
+ if event.OldEntry != nil &&
+ (event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) {
+ // delete or not update the same file
+ dir, name := resp.Directory, event.OldEntry.Name
+ id := util.Md5String([]byte(util.NewFullPath(dir, name)))
+ println("delete", id)
+ _, err := client.Delete().Index(esIndex).Id(id).Do(context.Background())
+ return err
+ }
+ if event.NewEntry != nil {
+ // add a new file or update the same file
+ esEntry, id := toEsEntry(event)
+ value, err := jsoniter.Marshal(esEntry)
+ if err != nil {
+ return err
+ }
+ println(string(value))
+ _, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background())
+ return err
+ }
+ return nil
+ }, nil
+}
diff --git a/weed/command/filer_meta_tail_non_elastic.go b/weed/command/filer_meta_tail_non_elastic.go
new file mode 100644
index 000000000..f78f3ee09
--- /dev/null
+++ b/weed/command/filer_meta_tail_non_elastic.go
@@ -0,0 +1,14 @@
+//go:build !elastic
+// +build !elastic
+
+package command
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
+ return func(resp *filer_pb.SubscribeMetadataResponse) error {
+ return nil
+ }, nil
+}
diff --git a/weed/command/imports.go b/weed/command/imports.go
index 3792c45c4..5b3195907 100644
--- a/weed/command/imports.go
+++ b/weed/command/imports.go
@@ -15,6 +15,7 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
+ _ "github.com/chrislusf/seaweedfs/weed/filer/arangodb"
_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7"
_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"
diff --git a/weed/command/scaffold/filer.toml b/weed/command/scaffold/filer.toml
index 5d4513c36..0a505bbdc 100644
--- a/weed/command/scaffold/filer.toml
+++ b/weed/command/scaffold/filer.toml
@@ -285,6 +285,16 @@ healthcheck_enabled = false
index.max_result_window = 10000
+[arangodb] # in development dont use it
+enabled = false
+db_name = "seaweedfs"
+servers=["http://localhost:8529"] # list of servers to connect to
+# only basic auth supported for now
+username=""
+password=""
+# skip tls cert validation
+insecure_skip_verify = true
+
##########################
##########################
diff --git a/weed/filer/arangodb/arangodb_store.go b/weed/filer/arangodb/arangodb_store.go
new file mode 100644
index 000000000..9fd1fffb3
--- /dev/null
+++ b/weed/filer/arangodb/arangodb_store.go
@@ -0,0 +1,347 @@
+package arangodb
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/arangodb/go-driver"
+ "github.com/arangodb/go-driver/http"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &ArangodbStore{})
+}
+
+var (
+ BUCKET_PREFIX = "/buckets"
+ DEFAULT_COLLECTION = "seaweed_no_bucket"
+ KVMETA_COLLECTION = "seaweed_kvmeta"
+)
+
+type ArangodbStore struct {
+ connect driver.Connection
+ client driver.Client
+ database driver.Database
+ kvCollection driver.Collection
+
+ buckets map[string]driver.Collection
+ mu sync.RWMutex
+
+ databaseName string
+}
+
+type Model struct {
+ Key string `json:"_key"`
+ Directory string `json:"directory,omitempty"`
+ Name string `json:"name,omitempty"`
+ Ttl string `json:"ttl,omitempty"`
+
+ //arangodb does not support binary blobs
+ //we encode byte slice into uint64 slice
+ //see helpers.go
+ Meta []uint64 `json:"meta"`
+}
+
+func (store *ArangodbStore) GetName() string {
+ return "arangodb"
+}
+
+func (store *ArangodbStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+ store.buckets = make(map[string]driver.Collection, 3)
+ store.databaseName = configuration.GetString(prefix + "db_name")
+ return store.connection(configuration.GetStringSlice(prefix+"servers"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetBool(prefix+"insecure_skip_verify"),
+ )
+}
+
+func (store *ArangodbStore) connection(uris []string, user string, pass string, insecure bool) (err error) {
+ ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
+
+ store.connect, err = http.NewConnection(http.ConnectionConfig{
+ Endpoints: uris,
+ TLSConfig: &tls.Config{
+ InsecureSkipVerify: insecure,
+ },
+ })
+ if err != nil {
+ return err
+ }
+ store.client, err = driver.NewClient(driver.ClientConfig{
+ Connection: store.connect,
+ Authentication: driver.BasicAuthentication(user, pass),
+ })
+ if err != nil {
+ return err
+ }
+ ok, err := store.client.DatabaseExists(ctx, store.databaseName)
+ if err != nil {
+ return err
+ }
+ if ok {
+ store.database, err = store.client.Database(ctx, store.databaseName)
+ } else {
+ store.database, err = store.client.CreateDatabase(ctx, store.databaseName, &driver.CreateDatabaseOptions{})
+ }
+ if err != nil {
+ return err
+ }
+ if store.kvCollection, err = store.ensureCollection(ctx, KVMETA_COLLECTION); err != nil {
+ return err
+ }
+ return err
+}
+
+type key int
+
+const (
+ transactionKey key = 0
+)
+
+func (store *ArangodbStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ keys := make([]string, 0, len(store.buckets)+1)
+ for k := range store.buckets {
+ keys = append(keys, k)
+ }
+ keys = append(keys, store.kvCollection.Name())
+ txn, err := store.database.BeginTransaction(ctx, driver.TransactionCollections{
+ Exclusive: keys,
+ }, &driver.BeginTransactionOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ return context.WithValue(ctx, transactionKey, txn), nil
+}
+
+func (store *ArangodbStore) CommitTransaction(ctx context.Context) error {
+ val := ctx.Value(transactionKey)
+ cast, ok := val.(driver.TransactionID)
+ if !ok {
+ return fmt.Errorf("txn cast fail %s:", val)
+ }
+ err := store.database.CommitTransaction(ctx, cast, &driver.CommitTransactionOptions{})
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (store *ArangodbStore) RollbackTransaction(ctx context.Context) error {
+ val := ctx.Value(transactionKey)
+ cast, ok := val.(driver.TransactionID)
+ if !ok {
+ return fmt.Errorf("txn cast fail %s:", val)
+ }
+ err := store.database.AbortTransaction(ctx, cast, &driver.AbortTransactionOptions{})
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (store *ArangodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
+ dir, name := entry.FullPath.DirAndName()
+ meta, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encode %s: %s", entry.FullPath, err)
+ }
+
+ if len(entry.Chunks) > 50 {
+ meta = util.MaybeGzipData(meta)
+ }
+ model := &Model{
+ Key: hashString(string(entry.FullPath)),
+ Directory: dir,
+ Name: name,
+ Meta: bytesToArray(meta),
+ }
+ if entry.TtlSec > 0 {
+ model.Ttl = time.Now().Add(time.Second * time.Duration(entry.TtlSec)).Format(time.RFC3339)
+ } else {
+ model.Ttl = ""
+ }
+
+ targetCollection, err := store.extractBucketCollection(ctx, entry.FullPath)
+ if err != nil {
+ return err
+ }
+ _, err = targetCollection.CreateDocument(ctx, model)
+ if driver.IsConflict(err) {
+ return store.UpdateEntry(ctx, entry)
+ }
+
+ if err != nil {
+ return fmt.Errorf("InsertEntry %s: %v", entry.FullPath, err)
+ }
+
+ return nil
+
+}
+
+func (store *ArangodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+ dir, name := entry.FullPath.DirAndName()
+ meta, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encode %s: %s", entry.FullPath, err)
+ }
+
+ if len(entry.Chunks) > 50 {
+ meta = util.MaybeGzipData(meta)
+ }
+ model := &Model{
+ Key: hashString(string(entry.FullPath)),
+ Directory: dir,
+ Name: name,
+ Meta: bytesToArray(meta),
+ }
+ if entry.TtlSec > 0 {
+ model.Ttl = time.Now().Add(time.Duration(entry.TtlSec) * time.Second).Format(time.RFC3339)
+ } else {
+ model.Ttl = "none"
+ }
+ targetCollection, err := store.extractBucketCollection(ctx, entry.FullPath)
+ if err != nil {
+ return err
+ }
+ _, err = targetCollection.UpdateDocument(ctx, model.Key, model)
+ if err != nil {
+ return fmt.Errorf("UpdateEntry %s: %v", entry.FullPath, err)
+ }
+
+ return nil
+}
+
+func (store *ArangodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
+ var data Model
+ targetCollection, err := store.extractBucketCollection(ctx, fullpath)
+ if err != nil {
+ return nil, err
+ }
+ _, err = targetCollection.ReadDocument(ctx, hashString(string(fullpath)), &data)
+ if err != nil {
+ if driver.IsNotFound(err) {
+ return nil, filer_pb.ErrNotFound
+ }
+ glog.Errorf("find %s: %v", fullpath, err)
+ return nil, filer_pb.ErrNotFound
+ }
+ if len(data.Meta) == 0 {
+ return nil, filer_pb.ErrNotFound
+ }
+ entry = &filer.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(arrayToBytes(data.Meta)))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *ArangodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
+ targetCollection, err := store.extractBucketCollection(ctx, fullpath)
+ if err != nil {
+ return err
+ }
+ _, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath)))
+ if err != nil && !driver.IsNotFound(err) {
+ glog.Errorf("find %s: %v", fullpath, err)
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+ return nil
+}
+
+// this runs in log time
+func (store *ArangodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
+ var query string
+ targetCollection, err := store.extractBucketCollection(ctx, fullpath)
+ if err != nil {
+ return err
+ }
+ query = query + fmt.Sprintf(`
+ for d in %s
+ filter starts_with(d.directory, "%s/") || d.directory == "%s"
+ remove d._key in %s`,
+ targetCollection.Name(),
+ strings.Join(strings.Split(string(fullpath), "/"), ","),
+ string(fullpath),
+ targetCollection.Name(),
+ )
+ cur, err := store.database.Query(ctx, query, nil)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+ defer cur.Close()
+ return nil
+}
+
+func (store *ArangodbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
+}
+
+func (store *ArangodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ targetCollection, err := store.extractBucketCollection(ctx, dirPath+"/")
+ if err != nil {
+ return lastFileName, err
+ }
+ query := "for d in " + targetCollection.Name()
+ if includeStartFile {
+ query = query + " filter d.name >= \"" + startFileName + "\" "
+ } else {
+ query = query + " filter d.name > \"" + startFileName + "\" "
+ }
+ if prefix != "" {
+ query = query + fmt.Sprintf(`&& starts_with(d.name, "%s")`, prefix)
+ }
+ query = query + `
+filter d.directory == @dir
+sort d.name asc
+`
+ if limit > 0 {
+ query = query + "limit " + strconv.Itoa(int(limit))
+ }
+ query = query + "\n return d"
+ cur, err := store.database.Query(ctx, query, map[string]interface{}{"dir": dirPath})
+ if err != nil {
+ return lastFileName, fmt.Errorf("failed to list directory entries: find error: %w", err)
+ }
+ defer cur.Close()
+ for cur.HasMore() {
+ var data Model
+ _, err = cur.ReadDocument(ctx, &data)
+ if err != nil {
+ break
+ }
+ entry := &filer.Entry{
+ FullPath: util.NewFullPath(data.Directory, data.Name),
+ }
+ lastFileName = data.Name
+ converted := arrayToBytes(data.Meta)
+ if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+
+ if !eachEntryFunc(entry) {
+ break
+ }
+
+ }
+ return lastFileName, err
+}
+
+func (store *ArangodbStore) Shutdown() {
+}
diff --git a/weed/filer/arangodb/arangodb_store_bucket.go b/weed/filer/arangodb/arangodb_store_bucket.go
new file mode 100644
index 000000000..810d639a7
--- /dev/null
+++ b/weed/filer/arangodb/arangodb_store_bucket.go
@@ -0,0 +1,40 @@
+package arangodb
+
+import (
+ "context"
+ "github.com/arangodb/go-driver"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+var _ filer.BucketAware = (*ArangodbStore)(nil)
+
+func (store *ArangodbStore) OnBucketCreation(bucket string) {
+ timeout, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ // create the collection && add to cache
+ _, err := store.ensureBucket(timeout, bucket)
+ if err != nil {
+ glog.Errorf("bucket create %s: %v", bucket, err)
+ }
+}
+func (store *ArangodbStore) OnBucketDeletion(bucket string) {
+ timeout, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ collection, err := store.ensureBucket(timeout, bucket)
+ if err != nil {
+ glog.Errorf("bucket delete %s: %v", bucket, err)
+ return
+ }
+ err = collection.Remove(timeout)
+ if err != nil && !driver.IsNotFound(err) {
+ glog.Errorf("bucket delete %s: %v", bucket, err)
+ return
+ }
+}
+func (store *ArangodbStore) CanDropWholeBucket() bool {
+ return true
+}
diff --git a/weed/filer/arangodb/arangodb_store_kv.go b/weed/filer/arangodb/arangodb_store_kv.go
new file mode 100644
index 000000000..c1307e78d
--- /dev/null
+++ b/weed/filer/arangodb/arangodb_store_kv.go
@@ -0,0 +1,54 @@
+package arangodb
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/arangodb/go-driver"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+ model := &Model{
+ Key: hashString(".kvstore." + string(key)),
+ Directory: ".kvstore." + string(key),
+ Meta: bytesToArray(value),
+ }
+
+ exists, err := store.kvCollection.DocumentExists(ctx, model.Key)
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+ if exists {
+ _, err = store.kvCollection.UpdateDocument(ctx, model.Key, model)
+ } else {
+ _, err = store.kvCollection.CreateDocument(ctx, model)
+ }
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+
+ return nil
+}
+func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+ var model Model
+ _, err = store.kvCollection.ReadDocument(ctx, hashString(".kvstore."+string(key)), &model)
+ if driver.IsNotFound(err) {
+ return nil, filer.ErrKvNotFound
+ }
+ if err != nil {
+ glog.Errorf("kv get: %s %v", string(key), err)
+ return nil, filer.ErrKvNotFound
+ }
+ return arrayToBytes(model.Meta), nil
+}
+
+func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
+ _, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key)))
+ if err != nil {
+ glog.Errorf("kv del: %v", err)
+ return filer.ErrKvNotFound
+ }
+ return nil
+}
diff --git a/weed/filer/arangodb/helpers.go b/weed/filer/arangodb/helpers.go
new file mode 100644
index 000000000..943189781
--- /dev/null
+++ b/weed/filer/arangodb/helpers.go
@@ -0,0 +1,136 @@
+package arangodb
+
+import (
+ "context"
+ "crypto/md5"
+ "encoding/binary"
+ "encoding/hex"
+ "io"
+ "strings"
+
+ "github.com/arangodb/go-driver"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+//convert a string into arango-key safe hex bytes hash
+func hashString(dir string) string {
+ h := md5.New()
+ io.WriteString(h, dir)
+ b := h.Sum(nil)
+ return hex.EncodeToString(b)
+}
+
+// convert slice of bytes into slice of uint64
+// the first uint64 indicates the length in bytes
+func bytesToArray(bs []byte) []uint64 {
+ out := make([]uint64, 0, 2+len(bs)/8)
+ out = append(out, uint64(len(bs)))
+ for len(bs)%8 != 0 {
+ bs = append(bs, 0)
+ }
+ for i := 0; i < len(bs); i = i + 8 {
+ out = append(out, binary.BigEndian.Uint64(bs[i:]))
+ }
+ return out
+}
+
+// convert from slice of uint64 back to bytes
+// if input length is 0 or 1, will return nil
+func arrayToBytes(xs []uint64) []byte {
+ if len(xs) < 2 {
+ return nil
+ }
+ first := xs[0]
+ out := make([]byte, len(xs)*8) // i think this can actually be len(xs)*8-8, but i dont think an extra 8 bytes hurts...
+ for i := 1; i < len(xs); i = i + 1 {
+ binary.BigEndian.PutUint64(out[((i-1)*8):], xs[i])
+ }
+ return out[:first]
+}
+
+// gets the collection the bucket points to from filepath
+func (store *ArangodbStore) extractBucketCollection(ctx context.Context, fullpath util.FullPath) (c driver.Collection, err error) {
+ bucket, _ := extractBucket(fullpath)
+ if bucket == "" {
+ bucket = DEFAULT_COLLECTION
+ }
+ c, err = store.ensureBucket(ctx, bucket)
+ if err != nil {
+ return nil, err
+ }
+ return c, err
+}
+
+// called by extractBucketCollection
+func extractBucket(fullpath util.FullPath) (string, string) {
+ if !strings.HasPrefix(string(fullpath), BUCKET_PREFIX+"/") {
+ return "", string(fullpath)
+ }
+ if strings.Count(string(fullpath), "/") < 3 {
+ return "", string(fullpath)
+ }
+ bucketAndObjectKey := string(fullpath)[len(BUCKET_PREFIX+"/"):]
+ t := strings.Index(bucketAndObjectKey, "/")
+ bucket := bucketAndObjectKey
+ shortPath := "/"
+ if t > 0 {
+ bucket = bucketAndObjectKey[:t]
+ shortPath = string(util.FullPath(bucketAndObjectKey[t:]))
+ }
+ return bucket, shortPath
+}
+
+// get bucket collection from cache. if not exist, creates the buckets collection and grab it
+func (store *ArangodbStore) ensureBucket(ctx context.Context, bucket string) (bc driver.Collection, err error) {
+ var ok bool
+ store.mu.RLock()
+ bc, ok = store.buckets[bucket]
+ store.mu.RUnlock()
+ if ok {
+ return bc, nil
+ }
+ store.mu.Lock()
+ defer store.mu.Unlock()
+ store.buckets[bucket], err = store.ensureCollection(ctx, bucket)
+ if err != nil {
+ return nil, err
+ }
+ return store.buckets[bucket], nil
+}
+
+// creates collection if not exist, ensures indices if not exist
+func (store *ArangodbStore) ensureCollection(ctx context.Context, name string) (c driver.Collection, err error) {
+ ok, err := store.database.CollectionExists(ctx, name)
+ if err != nil {
+ return
+ }
+ if ok {
+ c, err = store.database.Collection(ctx, name)
+ } else {
+ c, err = store.database.CreateCollection(ctx, name, &driver.CreateCollectionOptions{})
+ }
+ if err != nil {
+ return
+ }
+ // ensure indices
+ if _, _, err = c.EnsurePersistentIndex(ctx, []string{"directory", "name"},
+ &driver.EnsurePersistentIndexOptions{
+ Name: "directory_name_multi", Unique: true,
+ }); err != nil {
+ return
+ }
+ if _, _, err = c.EnsurePersistentIndex(ctx, []string{"directory"},
+ &driver.EnsurePersistentIndexOptions{Name: "IDX_directory"}); err != nil {
+ return
+ }
+ if _, _, err = c.EnsureTTLIndex(ctx, "ttl", 1,
+ &driver.EnsureTTLIndexOptions{Name: "IDX_TTL"}); err != nil {
+ return
+ }
+ if _, _, err = c.EnsurePersistentIndex(ctx, []string{"name"}, &driver.EnsurePersistentIndexOptions{
+ Name: "IDX_name",
+ }); err != nil {
+ return
+ }
+ return c, nil
+}
diff --git a/weed/filer/arangodb/readme.md b/weed/filer/arangodb/readme.md
new file mode 100644
index 000000000..e189811fb
--- /dev/null
+++ b/weed/filer/arangodb/readme.md
@@ -0,0 +1,52 @@
+##arangodb
+
+database: https://github.com/arangodb/arangodb
+go driver: https://github.com/arangodb/go-driver
+
+options:
+
+```
+[arangodb]
+enabled=true
+db_name="seaweedfs"
+servers=["http://localhost:8529"]
+#basic auth
+user="root"
+pass="test"
+
+# tls settings
+insecure_skip_verify=true
+```
+
+i test using this dev database:
+`docker run -p 8529:8529 -e ARANGO_ROOT_PASSWORD=test arangodb/arangodb:3.9.0`
+
+
+## features i don't personally need but are missing
+ [ ] provide tls cert to arango
+ [ ] authentication that is not basic auth
+ [ ] synchronise endpoint interval config
+ [ ] automatic creation of custom index
+ [ ] configure default arangodb collection sharding rules
+ [ ] configure default arangodb collection replication rules
+
+
+## complexity
+
+ok, so if https://www.arangodb.com/docs/stable/indexing-index-basics.html#persistent-index is correct
+
+O(1)
+- InsertEntry
+- UpdateEntry
+- FindEntry
+- DeleteEntry
+- KvPut
+- KvGet
+- KvDelete
+
+O(log(BUCKET_SIZE))
+- DeleteFolderChildren
+
+O(log(DIRECTORY_SIZE))
+- ListDirectoryEntries
+- ListDirectoryPrefixedEntries
diff --git a/weed/filer/elastic/v7/doc.go b/weed/filer/elastic/v7/doc.go
new file mode 100644
index 000000000..704bbf6de
--- /dev/null
+++ b/weed/filer/elastic/v7/doc.go
@@ -0,0 +1,9 @@
+/*
+
+Package elastic is for elastic filer store.
+
+The referenced "github.com/olivere/elastic/v7" library is too big when compiled.
+So this is only compiled in "make full_install".
+
+*/
+package elastic
diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go
index a16e5ebca..cb2c66f5a 100644
--- a/weed/filer/elastic/v7/elastic_store.go
+++ b/weed/filer/elastic/v7/elastic_store.go
@@ -1,3 +1,6 @@
+//go:build elastic
+// +build elastic
+
package elastic
import (
diff --git a/weed/filer/elastic/v7/elastic_store_kv.go b/weed/filer/elastic/v7/elastic_store_kv.go
index 99c03314e..43835c153 100644
--- a/weed/filer/elastic/v7/elastic_store_kv.go
+++ b/weed/filer/elastic/v7/elastic_store_kv.go
@@ -1,3 +1,6 @@
+//go:build elastic
+// +build elastic
+
package elastic
import (
diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go
index c774f5d27..27e68433d 100644
--- a/weed/filer/filer_delete_entry.go
+++ b/weed/filer/filer_delete_entry.go
@@ -25,9 +25,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
if findErr != nil {
return findErr
}
-
isDeleteCollection := f.isBucket(entry)
-
if entry.IsDirectory() {
// delete the folder children, not including the folder itself
err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures, func(chunks []*filer_pb.FileChunk) error {
diff --git a/weed/filer/sqlite/doc.go b/weed/filer/sqlite/doc.go
new file mode 100644
index 000000000..833addf54
--- /dev/null
+++ b/weed/filer/sqlite/doc.go
@@ -0,0 +1,9 @@
+/*
+
+Package sqlite is for sqlite filer store.
+
+The referenced "modernc.org/sqlite" library is too big when compiled.
+So this is only compiled in "make full_install".
+
+*/
+package sqlite
diff --git a/weed/filer/sqlite/sqlite_store.go b/weed/filer/sqlite/sqlite_store.go
index ca9d38786..70a4bf390 100644
--- a/weed/filer/sqlite/sqlite_store.go
+++ b/weed/filer/sqlite/sqlite_store.go
@@ -1,5 +1,6 @@
-//go:build linux || darwin || windows
+//go:build (linux || darwin || windows) && sqlite
// +build linux darwin windows
+// +build sqlite
// limited GOOS due to modernc.org/libc/unistd
diff --git a/weed/filer/sqlite/sqlite_store_unsupported.go b/weed/filer/sqlite/sqlite_store_unsupported.go
index 0fba1ea33..351d2e501 100644
--- a/weed/filer/sqlite/sqlite_store_unsupported.go
+++ b/weed/filer/sqlite/sqlite_store_unsupported.go
@@ -1,5 +1,5 @@
-//go:build !linux && !darwin && !windows && !s390 && !ppc64le && !mips64
-// +build !linux,!darwin,!windows,!s390,!ppc64le,!mips64
+//go:build !linux && !darwin && !windows && !s390 && !ppc64le && !mips64 && !sqlite
+// +build !linux,!darwin,!windows,!s390,!ppc64le,!mips64,!sqlite
// limited GOOS due to modernc.org/libc/unistd
diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go
index 3ba2590b0..fedc837d6 100644
--- a/weed/iamapi/iamapi_management_handlers.go
+++ b/weed/iamapi/iamapi_management_handlers.go
@@ -385,27 +385,28 @@ func handleImplicitUsername(r *http.Request, values url.Values) {
if len(r.Header["Authorization"]) == 0 || values.Get("UserName") != "" {
return
}
- // get username who signs the request
- // for a typical Authorization:
+ // get username who signs the request. For a typical Authorization:
// "AWS4-HMAC-SHA256 Credential=197FSAQ7HHTA48X64O3A/20220420/test1/iam/aws4_request, SignedHeaders=content-type;
- // host;x-amz-date, Signature=6757dc6b3d7534d67e17842760310e99ee695408497f6edc4fdb84770c252dc8"
+ // host;x-amz-date, Signature=6757dc6b3d7534d67e17842760310e99ee695408497f6edc4fdb84770c252dc8",
// the "test1" will be extracted as the username
+ glog.V(4).Infof("Authorization field: %v", r.Header["Authorization"][0])
s := strings.Split(r.Header["Authorization"][0], "Credential=")
if len(s) < 2 {
return
}
- glog.V(6).Infof("s: %v\n", s)
+ glog.V(4).Infof("First strip: %v", s)
s = strings.Split(s[1], ",")
if len(s) < 2 {
return
}
- glog.V(6).Infof("s: %v\n", s)
+ glog.V(4).Infof("Second strip: %v", s)
s = strings.Split(s[0], "/")
if len(s) < 5 {
return
}
- glog.V(6).Infof("s: %v\n", s)
+ glog.V(4).Infof("Third strip: %v", s)
userName := s[2]
+ glog.V(4).Infof("UserName: %v", userName)
values.Set("UserName", userName)
}
diff --git a/weed/notification/gocdk_pub_sub/doc.go b/weed/notification/gocdk_pub_sub/doc.go
new file mode 100644
index 000000000..d7fbb9f78
--- /dev/null
+++ b/weed/notification/gocdk_pub_sub/doc.go
@@ -0,0 +1,9 @@
+/*
+
+Package gocdk_pub_sub is for Azure Service Bus and RabbitMQ.
+
+The referenced "gocloud.dev/pubsub" library is too big when compiled.
+So this is only compiled in "make full_install".
+
+*/
+package gocdk_pub_sub
diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
index 01c4d901f..f31b6997e 100644
--- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
+++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
@@ -1,3 +1,6 @@
+//go:build gocdk
+// +build gocdk
+
// Package gocdk_pub_sub supports the Go CDK (Cloud Development Kit) PubSub API,
// which in turn supports many providers, including Amazon SNS/SQS, Azure Service Bus,
// Google Cloud PubSub, and RabbitMQ.
diff --git a/weed/remote_storage/hdfs/doc.go b/weed/remote_storage/hdfs/doc.go
new file mode 100644
index 000000000..086c9de3f
--- /dev/null
+++ b/weed/remote_storage/hdfs/doc.go
@@ -0,0 +1,9 @@
+/*
+
+Package hdfs is for remote hdfs storage.
+
+The referenced "github.com/colinmarc/hdfs/v2" library is too big when compiled.
+So this is only compiled in "make full_install".
+
+*/
+package hdfs
diff --git a/weed/remote_storage/hdfs/hdfs_kerberos.go b/weed/remote_storage/hdfs/hdfs_kerberos.go
index 50abc0ad5..ba152020a 100644
--- a/weed/remote_storage/hdfs/hdfs_kerberos.go
+++ b/weed/remote_storage/hdfs/hdfs_kerberos.go
@@ -1,3 +1,6 @@
+//go:build hdfs
+// +build hdfs
+
package hdfs
import (
diff --git a/weed/remote_storage/hdfs/hdfs_storage_client.go b/weed/remote_storage/hdfs/hdfs_storage_client.go
index 4d76ac0ba..3b71958fd 100644
--- a/weed/remote_storage/hdfs/hdfs_storage_client.go
+++ b/weed/remote_storage/hdfs/hdfs_storage_client.go
@@ -1,3 +1,6 @@
+//go:build hdfs
+// +build hdfs
+
package hdfs
import (
@@ -7,7 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/colinmarc/hdfs/v2"
+ hdfs "github.com/colinmarc/hdfs/v2"
"io"
"os"
"path"
diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go
index b16eec2e1..cb690e3ce 100644
--- a/weed/replication/sub/notification_gocdk_pub_sub.go
+++ b/weed/replication/sub/notification_gocdk_pub_sub.go
@@ -1,3 +1,6 @@
+//go:build gocdk
+// +build gocdk
+
package sub
import (
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index 7edd5870f..8779e9ac0 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -21,6 +21,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/filer"
+ _ "github.com/chrislusf/seaweedfs/weed/filer/arangodb"
_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7"
_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"
diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
index 7d3aa28a5..1aa33e054 100644
--- a/weed/shell/command_volume_fsck.go
+++ b/weed/shell/command_volume_fsck.go
@@ -5,16 +5,6 @@ import (
"context"
"flag"
"fmt"
- "io"
- "io/ioutil"
- "math"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "strings"
- "sync"
-
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
@@ -25,6 +15,17 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
+ "io"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
)
func init() {
@@ -65,8 +66,11 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
verbose := fsckCommand.Bool("v", false, "verbose mode")
findMissingChunksInFiler := fsckCommand.Bool("findMissingChunksInFiler", false, "see \"help volume.fsck\"")
findMissingChunksInFilerPath := fsckCommand.String("findMissingChunksInFilerPath", "/", "used together with findMissingChunksInFiler")
+ findMissingChunksInVolumeId := fsckCommand.Int("findMissingChunksInVolumeId", 0, "used together with findMissingChunksInFiler")
applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, "<expert only!> after detection, delete missing data from volumes / delete missing file entries from filer")
purgeAbsent := fsckCommand.Bool("reallyDeleteFilerEntries", false, "<expert only!> delete missing file entries from filer if the corresponding volume is missing for any reason, please ensure all still existing/expected volumes are connected! used together with findMissingChunksInFiler")
+ tempPath := fsckCommand.String("tempPath", path.Join(os.TempDir()), "path for temporary idx files")
+
if err = fsckCommand.Parse(args); err != nil {
return nil
}
@@ -78,7 +82,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
c.env = commandEnv
// create a temp folder
- tempFolder, err := os.MkdirTemp("", "sw_fsck")
+ tempFolder, err := os.MkdirTemp(*tempPath, "sw_fsck")
if err != nil {
return fmt.Errorf("failed to create temp folder: %v", err)
}
@@ -88,14 +92,14 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
defer os.RemoveAll(tempFolder)
// collect all volume id locations
- volumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
+ dataNodeVolumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
if err != nil {
return fmt.Errorf("failed to collect all volume locations: %v", err)
}
isBucketsPath := false
var fillerBucketsPath string
- if *findMissingChunksInFiler && *findMissingChunksInFilerPath != "" {
+ if *findMissingChunksInFiler && *findMissingChunksInFilerPath != "/" {
fillerBucketsPath, err = readFilerBucketsPath(commandEnv)
if err != nil {
return fmt.Errorf("read filer buckets path: %v", err)
@@ -108,34 +112,43 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
return fmt.Errorf("read filer buckets path: %v", err)
}
+ collectMtime := time.Now().Unix()
// collect each volume file ids
- for volumeId, vinfo := range volumeIdToVInfo {
- if isBucketsPath && !strings.HasPrefix(*findMissingChunksInFilerPath, fillerBucketsPath+"/"+vinfo.collection) {
- delete(volumeIdToVInfo, volumeId)
- continue
- }
- err = c.collectOneVolumeFileIds(tempFolder, volumeId, vinfo, *verbose, writer)
- if err != nil {
- return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
+ for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
+ for volumeId, vinfo := range volumeIdToVInfo {
+ if *findMissingChunksInVolumeId > 0 && uint32(*findMissingChunksInVolumeId) != volumeId {
+ delete(volumeIdToVInfo, volumeId)
+ continue
+ }
+ if isBucketsPath && !strings.HasPrefix(*findMissingChunksInFilerPath, fillerBucketsPath+"/"+vinfo.collection) {
+ delete(volumeIdToVInfo, volumeId)
+ continue
+ }
+ err = c.collectOneVolumeFileIds(tempFolder, dataNodeId, volumeId, vinfo, *verbose, writer)
+ if err != nil {
+ return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
+ }
}
}
if *findMissingChunksInFiler {
// collect all filer file ids and paths
- if err = c.collectFilerFileIdAndPaths(volumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, *purgeAbsent); err != nil {
+ if err = c.collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, *purgeAbsent, collectMtime); err != nil {
return fmt.Errorf("collectFilerFileIdAndPaths: %v", err)
}
- // for each volume, check filer file ids
- if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
- return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
+ for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
+ // for each volume, check filer file ids
+ if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, tempFolder, dataNodeId, writer, *verbose, *applyPurging); err != nil {
+ return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
+ }
}
} else {
// collect all filer file ids
- if err = c.collectFilerFileIds(volumeIdToVInfo, tempFolder, writer, *verbose); err != nil {
+ if err = c.collectFilerFileIds(dataNodeVolumeIdToVInfo, tempFolder, writer, *verbose); err != nil {
return fmt.Errorf("failed to collect file ids from filer: %v", err)
}
// volume file ids subtract filer file ids
- if err = c.findExtraChunksInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
+ if err = c.findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
return fmt.Errorf("findExtraChunksInVolumeServers: %v", err)
}
}
@@ -143,19 +156,24 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
return nil
}
-func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, purgeAbsent bool) error {
+func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, purgeAbsent bool, collectMtime int64) error {
if verbose {
fmt.Fprintf(writer, "checking each file from filer ...\n")
}
files := make(map[uint32]*os.File)
- for vid := range volumeIdToServer {
- dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
- if openErr != nil {
- return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+ for _, volumeIdToServer := range dataNodeVolumeIdToVInfo {
+ for vid := range volumeIdToServer {
+ if _, ok := files[vid]; ok {
+ continue
+ }
+ dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if openErr != nil {
+ return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+ }
+ files[vid] = dst
}
- files[vid] = dst
}
defer func() {
for _, f := range files {
@@ -179,6 +197,9 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint
}
dataChunks = append(dataChunks, manifestChunks...)
for _, chunk := range dataChunks {
+ if chunk.Mtime > collectMtime {
+ continue
+ }
outputChan <- &Item{
vid: chunk.Fid.VolumeId,
fileKey: chunk.Fid.FileKey,
@@ -210,10 +231,10 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint
}
-func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
+func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, dataNodeId string, writer io.Writer, verbose bool, applyPurging bool) error {
for volumeId, vinfo := range volumeIdToVInfo {
- checkErr := c.oneVolumeFileIdsCheckOneVolume(tempFolder, volumeId, writer, verbose, applyPurging)
+ checkErr := c.oneVolumeFileIdsCheckOneVolume(tempFolder, dataNodeId, volumeId, writer, verbose, applyPurging)
if checkErr != nil {
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
}
@@ -221,55 +242,93 @@ func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInf
return nil
}
-func (c *commandVolumeFsck) findExtraChunksInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
+func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
+ volumeIdOrphanFileIds := make(map[uint32]map[string]bool)
+ isSeveralReplicas := make(map[uint32]bool)
+ isEcVolumeReplicas := make(map[uint32]bool)
+ isReadOnlyReplicas := make(map[uint32]bool)
+ serverReplicas := make(map[uint32][]pb.ServerAddress)
+ for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
+ for volumeId, vinfo := range volumeIdToVInfo {
+ inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, dataNodeId, volumeId, writer, verbose)
+ if checkErr != nil {
+ return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
+ }
+ isSeveralReplicas[volumeId] = false
+ if _, found := volumeIdOrphanFileIds[volumeId]; !found {
+ volumeIdOrphanFileIds[volumeId] = make(map[string]bool)
+ } else {
+ isSeveralReplicas[volumeId] = true
+ }
+ for _, fid := range orphanFileIds {
+ if isSeveralReplicas[volumeId] {
+ if _, found := volumeIdOrphanFileIds[volumeId][fid]; !found {
+ continue
+ }
+ }
+ volumeIdOrphanFileIds[volumeId][fid] = isSeveralReplicas[volumeId]
+ }
- for volumeId, vinfo := range volumeIdToVInfo {
- inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, volumeId, writer, verbose)
- if checkErr != nil {
- return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
- }
- totalInUseCount += inUseCount
- totalOrphanChunkCount += uint64(len(orphanFileIds))
- totalOrphanDataSize += orphanDataSize
+ totalInUseCount += inUseCount
+ totalOrphanChunkCount += uint64(len(orphanFileIds))
+ totalOrphanDataSize += orphanDataSize
- if verbose {
- for _, fid := range orphanFileIds {
- fmt.Fprintf(writer, "%s\n", fid)
+ if verbose {
+ for _, fid := range orphanFileIds {
+ fmt.Fprintf(writer, "%s\n", fid)
+ }
+ }
+ isEcVolumeReplicas[volumeId] = vinfo.isEcVolume
+ if isReadOnly, found := isReadOnlyReplicas[volumeId]; !(found && isReadOnly) {
+ isReadOnlyReplicas[volumeId] = vinfo.isReadOnly
}
+ serverReplicas[volumeId] = append(serverReplicas[volumeId], vinfo.server)
}
- if applyPurging && len(orphanFileIds) > 0 {
+ for volumeId, orphanReplicaFileIds := range volumeIdOrphanFileIds {
+ if !(applyPurging && len(orphanReplicaFileIds) > 0) {
+ continue
+ }
+ orphanFileIds := []string{}
+ for fid, foundInAllReplicas := range orphanReplicaFileIds {
+ if !isSeveralReplicas[volumeId] || (isSeveralReplicas[volumeId] && foundInAllReplicas) {
+ orphanFileIds = append(orphanFileIds, fid)
+ }
+ }
+ if !(len(orphanFileIds) > 0) {
+ continue
+ }
if verbose {
fmt.Fprintf(writer, "purging process for volume %d", volumeId)
}
- if vinfo.isEcVolume {
+ if isEcVolumeReplicas[volumeId] {
fmt.Fprintf(writer, "skip purging for Erasure Coded volume %d.\n", volumeId)
continue
}
+ for _, server := range serverReplicas[volumeId] {
+ needleVID := needle.VolumeId(volumeId)
- needleVID := needle.VolumeId(volumeId)
-
- if vinfo.isReadOnly {
- err := markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, true)
- if err != nil {
- return fmt.Errorf("mark volume %d read/write: %v", volumeId, err)
- }
-
- fmt.Fprintf(writer, "temporarily marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
- defer markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, false)
- }
+ if isReadOnlyReplicas[volumeId] {
+ err := markVolumeWritable(c.env.option.GrpcDialOption, needleVID, server, true)
+ if err != nil {
+ return fmt.Errorf("mark volume %d read/write: %v", volumeId, err)
+ }
- fmt.Fprintf(writer, "marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
+ fmt.Fprintf(writer, "temporarily marked %d on server %v writable for forced purge\n", volumeId, server)
+ defer markVolumeWritable(c.env.option.GrpcDialOption, needleVID, server, false)
- if verbose {
- fmt.Fprintf(writer, "purging files from volume %d\n", volumeId)
- }
+ fmt.Fprintf(writer, "marked %d on server %v writable for forced purge\n", volumeId, server)
+ }
+ if verbose {
+ fmt.Fprintf(writer, "purging files from volume %d\n", volumeId)
+ }
- if err := c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
- return fmt.Errorf("purging volume %d: %v", volumeId, err)
+ if err := c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
+ return fmt.Errorf("purging volume %d: %v", volumeId, err)
+ }
}
}
}
@@ -290,7 +349,7 @@ func (c *commandVolumeFsck) findExtraChunksInVolumeServers(volumeIdToVInfo map[u
return nil
}
-func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
+func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, dataNodeId string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
if verbose {
fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
@@ -316,7 +375,7 @@ func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId
return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
}
- err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, volumeId))
+ err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, dataNodeId, volumeId))
if err != nil {
return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, vinfo.server, err)
}
@@ -327,19 +386,21 @@ func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId
}
-func (c *commandVolumeFsck) collectFilerFileIds(volumeIdToServer map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool) error {
+func (c *commandVolumeFsck) collectFilerFileIds(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool) error {
if verbose {
fmt.Fprintf(writer, "collecting file ids from filer ...\n")
}
files := make(map[uint32]*os.File)
- for vid := range volumeIdToServer {
- dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
- if openErr != nil {
- return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+ for _, volumeIdToServer := range dataNodeVolumeIdToVInfo {
+ for vid := range volumeIdToServer {
+ dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if openErr != nil {
+ return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+ }
+ files[vid] = dst
}
- files[vid] = dst
}
defer func() {
for _, f := range files {
@@ -377,16 +438,16 @@ func (c *commandVolumeFsck) collectFilerFileIds(volumeIdToServer map[uint32]VInf
})
}
-func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, volumeId uint32, writer io.Writer, verbose bool, applyPurging bool) (err error) {
+func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, dataNodeId string, volumeId uint32, writer io.Writer, verbose bool, applyPurging bool) (err error) {
if verbose {
- fmt.Fprintf(writer, "find missing file chunks in volume %d ...\n", volumeId)
+ fmt.Fprintf(writer, "find missing file chunks in dataNodeId %s volume %d ...\n", dataNodeId, volumeId)
}
db := needle_map.NewMemDb()
defer db.Close()
- if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
+ if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, dataNodeId, volumeId)); err != nil {
return
}
@@ -473,12 +534,12 @@ func (c *commandVolumeFsck) httpDelete(path util.FullPath, verbose bool) {
}
}
-func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
+func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, dataNodeId string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
db := needle_map.NewMemDb()
defer db.Close()
- if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
+ if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, dataNodeId, volumeId)); err != nil {
return
}
@@ -509,8 +570,8 @@ func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder stri
if orphanFileCount > 0 {
pct := float64(orphanFileCount*100) / (float64(orphanFileCount + inUseCount))
- fmt.Fprintf(writer, "volume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
- volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
+ fmt.Fprintf(writer, "dataNode:%s\tvolume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
+ dataNodeId, volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
}
return
@@ -524,13 +585,13 @@ type VInfo struct {
isReadOnly bool
}
-func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) {
+func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[string]map[uint32]VInfo, err error) {
if verbose {
fmt.Fprintf(writer, "collecting volume id and locations from master ...\n")
}
- volumeIdToServer = make(map[uint32]VInfo)
+ volumeIdToServer = make(map[string]map[uint32]VInfo)
// collect topology information
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
if err != nil {
@@ -539,8 +600,10 @@ func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose boo
eachDataNode(topologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
for _, diskInfo := range t.DiskInfos {
+ dataNodeId := t.GetId()
+ volumeIdToServer[dataNodeId] = make(map[uint32]VInfo)
for _, vi := range diskInfo.VolumeInfos {
- volumeIdToServer[vi.Id] = VInfo{
+ volumeIdToServer[dataNodeId][vi.Id] = VInfo{
server: pb.NewServerAddressFromDataNode(t),
collection: vi.Collection,
isEcVolume: false,
@@ -548,7 +611,7 @@ func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose boo
}
}
for _, ecShardInfo := range diskInfo.EcShardInfos {
- volumeIdToServer[ecShardInfo.Id] = VInfo{
+ volumeIdToServer[dataNodeId][ecShardInfo.Id] = VInfo{
server: pb.NewServerAddressFromDataNode(t),
collection: ecShardInfo.Collection,
isEcVolume: true,
@@ -600,8 +663,8 @@ func (c *commandVolumeFsck) purgeFileIdsForOneVolume(volumeId uint32, fileIds []
return
}
-func getVolumeFileIdFile(tempFolder string, vid uint32) string {
- return filepath.Join(tempFolder, fmt.Sprintf("%d.idx", vid))
+func getVolumeFileIdFile(tempFolder string, dataNodeid string, vid uint32) string {
+ return filepath.Join(tempFolder, fmt.Sprintf("%s_%d.idx", dataNodeid, vid))
}
func getFilerFileIdFile(tempFolder string, vid uint32) string {
diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go
index 9150752d5..3a5633168 100644
--- a/weed/shell/command_volume_list.go
+++ b/weed/shell/command_volume_list.go
@@ -7,6 +7,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"golang.org/x/exp/slices"
+ "path/filepath"
"io"
)
@@ -16,6 +17,9 @@ func init() {
}
type commandVolumeList struct {
+ collectionPattern *string
+ readonly *bool
+ volumeId *uint64
}
func (c *commandVolumeList) Name() string {
@@ -34,6 +38,10 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
volumeListCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
verbosityLevel := volumeListCommand.Int("v", 5, "verbose mode: 0, 1, 2, 3, 4, 5")
+ c.collectionPattern = volumeListCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
+ c.readonly = volumeListCommand.Bool("readonly", false, "show only readonly")
+ c.volumeId = volumeListCommand.Uint64("volumeId", 0, "show only volume id")
+
if err = volumeListCommand.Parse(args); err != nil {
return nil
}
@@ -44,7 +52,7 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
return err
}
- writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
+ c.writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
return nil
}
@@ -65,53 +73,71 @@ func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
return buf.String()
}
-func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
+func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) bool {
return a.Id < b.Id
})
var s statistics
for _, dc := range t.DataCenterInfos {
- s = s.plus(writeDataCenterInfo(writer, dc, verbosityLevel))
+ s = s.plus(c.writeDataCenterInfo(writer, dc, verbosityLevel))
}
output(verbosityLevel >= 0, writer, "%+v \n", s)
return s
}
-func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
+
+func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) bool {
return a.Id < b.Id
})
for _, r := range t.RackInfos {
- s = s.plus(writeRackInfo(writer, r, verbosityLevel))
+ s = s.plus(c.writeRackInfo(writer, r, verbosityLevel))
}
output(verbosityLevel >= 1, writer, " DataCenter %s %+v \n", t.Id, s)
return s
}
-func writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
+
+func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) bool {
return a.Id < b.Id
})
for _, dn := range t.DataNodeInfos {
- s = s.plus(writeDataNodeInfo(writer, dn, verbosityLevel))
+ s = s.plus(c.writeDataNodeInfo(writer, dn, verbosityLevel))
}
output(verbosityLevel >= 2, writer, " Rack %s %+v \n", t.Id, s)
return s
}
-func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
+
+func (c *commandVolumeList) writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 3, writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
for _, diskInfo := range t.DiskInfos {
- s = s.plus(writeDiskInfo(writer, diskInfo, verbosityLevel))
+ s = s.plus(c.writeDiskInfo(writer, diskInfo, verbosityLevel))
}
output(verbosityLevel >= 3, writer, " DataNode %s %+v \n", t.Id, s)
return s
}
-func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
+func (c *commandVolumeList) isNotMatchDiskInfo(readOnly bool, collection string, volumeId uint32) bool {
+ if *c.readonly && !readOnly {
+ return true
+ }
+ if *c.collectionPattern != "" {
+ if matched, _ := filepath.Match(*c.collectionPattern, collection); !matched {
+ return true
+ }
+ }
+ if *c.volumeId > 0 && *c.volumeId != uint64(volumeId) {
+ return true
+ }
+ return false
+}
+
+func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
var s statistics
diskType := t.Type
if diskType == "" {
@@ -122,9 +148,15 @@ func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int)
return a.Id < b.Id
})
for _, vi := range t.VolumeInfos {
+ if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {
+ continue
+ }
s = s.plus(writeVolumeInformationMessage(writer, vi, verbosityLevel))
}
for _, ecShardInfo := range t.EcShardInfos {
+ if c.isNotMatchDiskInfo(false, ecShardInfo.Collection, ecShardInfo.Id) {
+ continue
+ }
output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
}
output(verbosityLevel >= 4, writer, " Disk %s %+v \n", diskType, s)
diff --git a/weed/storage/store.go b/weed/storage/store.go
index 30fe63b63..fa2897fbc 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -434,10 +434,13 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error {
}
for _, location := range s.Locations {
- if err := location.UnloadVolume(i); err == nil || err == ErrVolumeNotFound {
+ err := location.UnloadVolume(i)
+ if err == nil {
glog.V(0).Infof("UnmountVolume %d", i)
s.DeletedVolumesChan <- message
return nil
+ } else if err == ErrVolumeNotFound {
+ continue
}
}
@@ -458,10 +461,13 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error {
DiskType: string(v.location.DiskType),
}
for _, location := range s.Locations {
- if err := location.DeleteVolume(i); err == nil || err == ErrVolumeNotFound {
+ err := location.DeleteVolume(i)
+ if err == nil {
glog.V(0).Infof("DeleteVolume %d", i)
s.DeletedVolumesChan <- message
return nil
+ } else if err == ErrVolumeNotFound {
+ continue
} else {
glog.Errorf("DeleteVolume %d: %v", i, err)
}