aboutsummaryrefslogtreecommitdiff
path: root/weed/filer2
diff options
context:
space:
mode:
Diffstat (limited to 'weed/filer2')
-rw-r--r--weed/filer2/abstract_sql/abstract_sql_store.go98
-rw-r--r--weed/filer2/abstract_sql/hashing.go32
-rw-r--r--weed/filer2/cassandra/cassandra_store.go55
-rw-r--r--weed/filer2/configuration.go3
-rw-r--r--weed/filer2/entry.go27
-rw-r--r--weed/filer2/entry_codec.go33
-rw-r--r--weed/filer2/etcd/etcd_store.go202
-rw-r--r--weed/filer2/filechunks.go92
-rw-r--r--weed/filer2/filechunks_test.go38
-rw-r--r--weed/filer2/filer.go219
-rw-r--r--weed/filer2/filer_buckets.go121
-rw-r--r--weed/filer2/filer_delete_entry.go128
-rw-r--r--weed/filer2/filer_deletion.go76
-rw-r--r--weed/filer2/filer_notify.go153
-rw-r--r--weed/filer2/filer_notify_append.go73
-rw-r--r--weed/filer2/filer_notify_test.go4
-rw-r--r--weed/filer2/filerstore.go136
-rw-r--r--weed/filer2/fullpath.go31
-rw-r--r--weed/filer2/leveldb/leveldb_store.go90
-rw-r--r--weed/filer2/leveldb/leveldb_store_test.go25
-rw-r--r--weed/filer2/leveldb2/leveldb2_store.go248
-rw-r--r--weed/filer2/leveldb2/leveldb2_store_test.go90
-rw-r--r--weed/filer2/memdb/memdb_store.go113
-rw-r--r--weed/filer2/memdb/memdb_store_test.go144
-rw-r--r--weed/filer2/meta_aggregator.go91
-rw-r--r--weed/filer2/mongodb/mongodb_store.go210
-rw-r--r--weed/filer2/mysql/mysql_store.go25
-rw-r--r--weed/filer2/postgres/README.txt4
-rw-r--r--weed/filer2/postgres/postgres_store.go29
-rw-r--r--weed/filer2/reader_at.go162
-rw-r--r--weed/filer2/redis/redis_cluster_store.go18
-rw-r--r--weed/filer2/redis/redis_store.go8
-rw-r--r--weed/filer2/redis/universal_redis_store.go71
-rw-r--r--weed/filer2/redis2/redis_cluster_store.go42
-rw-r--r--weed/filer2/redis2/redis_store.go36
-rw-r--r--weed/filer2/redis2/universal_redis_store.go162
-rw-r--r--weed/filer2/stream.go199
-rw-r--r--weed/filer2/topics.go6
38 files changed, 2690 insertions, 604 deletions
diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go
index 5f2990475..5ade18960 100644
--- a/weed/filer2/abstract_sql/abstract_sql_store.go
+++ b/weed/filer2/abstract_sql/abstract_sql_store.go
@@ -1,24 +1,65 @@
package abstract_sql
import (
+ "context"
"database/sql"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type AbstractSqlStore struct {
- DB *sql.DB
- SqlInsert string
- SqlUpdate string
- SqlFind string
- SqlDelete string
- SqlListExclusive string
- SqlListInclusive string
+ DB *sql.DB
+ SqlInsert string
+ SqlUpdate string
+ SqlFind string
+ SqlDelete string
+ SqlDeleteFolderChildren string
+ SqlListExclusive string
+ SqlListInclusive string
}
-func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) {
+type TxOrDB interface {
+ ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
+ QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
+ QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
+}
+
+func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{
+ Isolation: sql.LevelReadCommitted,
+ ReadOnly: false,
+ })
+ if err != nil {
+ return ctx, err
+ }
+
+ return context.WithValue(ctx, "tx", tx), nil
+}
+func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error {
+ if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
+ return tx.Commit()
+ }
+ return nil
+}
+func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error {
+ if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
+ return tx.Rollback()
+ }
+ return nil
+}
+
+func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB {
+ if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
+ return tx
+ }
+ return store.DB
+}
+
+func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
@@ -26,7 +67,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
- res, err := store.DB.Exec(store.SqlInsert, hashToLong(dir), name, dir, meta)
+ res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta)
if err != nil {
return fmt.Errorf("insert %s: %s", entry.FullPath, err)
}
@@ -38,7 +79,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil
}
-func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) {
+func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
@@ -46,7 +87,7 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
- res, err := store.DB.Exec(store.SqlUpdate, meta, hashToLong(dir), name, dir)
+ res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
if err != nil {
return fmt.Errorf("update %s: %s", entry.FullPath, err)
}
@@ -58,13 +99,13 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) {
return nil
}
-func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entry, error) {
+func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer2.Entry, error) {
dir, name := fullpath.DirAndName()
- row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir)
+ row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir)
var data []byte
if err := row.Scan(&data); err != nil {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
entry := &filer2.Entry{
@@ -77,11 +118,11 @@ func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entr
return entry, nil
}
-func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error {
+func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
dir, name := fullpath.DirAndName()
- res, err := store.DB.Exec(store.SqlDelete, hashToLong(dir), name, dir)
+ res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, util.HashStringToLong(dir), name, dir)
if err != nil {
return fmt.Errorf("delete %s: %s", fullpath, err)
}
@@ -94,14 +135,29 @@ func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error {
return nil
}
-func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
+func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
+
+ res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath)
+ if err != nil {
+ return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
+ }
+
+ _, err = res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
sqlText := store.SqlListExclusive
if inclusive {
sqlText = store.SqlListInclusive
}
- rows, err := store.DB.Query(sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit)
+ rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), limit)
if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
}
@@ -116,7 +172,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, st
}
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), name),
+ FullPath: util.NewFullPath(string(fullpath), name),
}
if err = entry.DecodeAttributesAndChunks(data); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
@@ -128,3 +184,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, st
return entries, nil
}
+
+func (store *AbstractSqlStore) Shutdown() {
+ store.DB.Close()
+}
diff --git a/weed/filer2/abstract_sql/hashing.go b/weed/filer2/abstract_sql/hashing.go
deleted file mode 100644
index 5c982c537..000000000
--- a/weed/filer2/abstract_sql/hashing.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package abstract_sql
-
-import (
- "crypto/md5"
- "io"
-)
-
-// returns a 64 bit big int
-func hashToLong(dir string) (v int64) {
- h := md5.New()
- io.WriteString(h, dir)
-
- b := h.Sum(nil)
-
- v += int64(b[0])
- v <<= 8
- v += int64(b[1])
- v <<= 8
- v += int64(b[2])
- v <<= 8
- v += int64(b[3])
- v <<= 8
- v += int64(b[4])
- v <<= 8
- v += int64(b[5])
- v <<= 8
- v += int64(b[6])
- v <<= 8
- v += int64(b[7])
-
- return
-}
diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go
index 2c1f03182..5dd7d8036 100644
--- a/weed/filer2/cassandra/cassandra_store.go
+++ b/weed/filer2/cassandra/cassandra_store.go
@@ -1,11 +1,15 @@
package cassandra
import (
+ "context"
"fmt"
+
+ "github.com/gocql/gocql"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/gocql/gocql"
)
func init() {
@@ -21,10 +25,10 @@ func (store *CassandraStore) GetName() string {
return "cassandra"
}
-func (store *CassandraStore) Initialize(configuration util.Configuration) (err error) {
+func (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
- configuration.GetString("keyspace"),
- configuration.GetStringSlice("hosts"),
+ configuration.GetString(prefix+"keyspace"),
+ configuration.GetStringSlice(prefix+"hosts"),
)
}
@@ -39,7 +43,17 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string) (err er
return
}
-func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) {
+func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *CassandraStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *CassandraStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
@@ -56,12 +70,12 @@ func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil
}
-func (store *CassandraStore) UpdateEntry(entry *filer2.Entry) (err error) {
+func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- return store.InsertEntry(entry)
+ return store.InsertEntry(ctx, entry)
}
-func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
dir, name := fullpath.DirAndName()
var data []byte
@@ -69,12 +83,12 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.
"SELECT meta FROM filemeta WHERE directory=? AND name=?",
dir, name).Consistency(gocql.One).Scan(&data); err != nil {
if err != gocql.ErrNotFound {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
}
if len(data) == 0 {
- return nil, fmt.Errorf("not found: %s", fullpath)
+ return nil, filer_pb.ErrNotFound
}
entry = &filer2.Entry{
@@ -88,7 +102,7 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.
return entry, nil
}
-func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error {
+func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
dir, name := fullpath.DirAndName()
@@ -101,7 +115,18 @@ func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error {
return nil
}
-func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool,
+func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
+
+ if err := store.session.Query(
+ "DELETE FROM filemeta WHERE directory=?",
+ fullpath).Exec(); err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
@@ -114,7 +139,7 @@ func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, star
iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
for iter.Scan(&name, &data) {
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), name),
+ FullPath: util.NewFullPath(string(fullpath), name),
}
if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil {
err = decodeErr
@@ -129,3 +154,7 @@ func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, star
return entries, err
}
+
+func (store *CassandraStore) Shutdown() {
+ store.session.Close()
+}
diff --git a/weed/filer2/configuration.go b/weed/filer2/configuration.go
index 7b05b53dc..a174117ea 100644
--- a/weed/filer2/configuration.go
+++ b/weed/filer2/configuration.go
@@ -17,8 +17,7 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) {
for _, store := range Stores {
if config.GetBool(store.GetName() + ".enabled") {
- viperSub := config.Sub(store.GetName())
- if err := store.Initialize(viperSub); err != nil {
+ if err := store.Initialize(config, store.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize store for %s: %+v",
store.GetName(), err)
}
diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go
index f17a11727..00b9b132d 100644
--- a/weed/filer2/entry.go
+++ b/weed/filer2/entry.go
@@ -5,6 +5,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type Attr struct {
@@ -20,6 +21,7 @@ type Attr struct {
UserName string
GroupNames []string
SymlinkTarget string
+ Md5 []byte
}
func (attr Attr) IsDirectory() bool {
@@ -27,9 +29,10 @@ func (attr Attr) IsDirectory() bool {
}
type Entry struct {
- FullPath
+ util.FullPath
Attr
+ Extended map[string][]byte
// the following is for files
Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"`
@@ -52,9 +55,29 @@ func (entry *Entry) ToProtoEntry() *filer_pb.Entry {
return nil
}
return &filer_pb.Entry{
- Name: string(entry.FullPath),
+ Name: entry.FullPath.Name(),
IsDirectory: entry.IsDirectory(),
Attributes: EntryAttributeToPb(entry),
Chunks: entry.Chunks,
+ Extended: entry.Extended,
+ }
+}
+
+func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
+ if entry == nil {
+ return nil
+ }
+ dir, _ := entry.FullPath.DirAndName()
+ return &filer_pb.FullEntry{
+ Dir: dir,
+ Entry: entry.ToProtoEntry(),
+ }
+}
+
+func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry {
+ return &Entry{
+ FullPath: util.NewFullPath(dir, entry.Name),
+ Attr: PbToEntryAttribute(entry.Attributes),
+ Chunks: entry.Chunks,
}
}
diff --git a/weed/filer2/entry_codec.go b/weed/filer2/entry_codec.go
index e50b3fa9a..47c911011 100644
--- a/weed/filer2/entry_codec.go
+++ b/weed/filer2/entry_codec.go
@@ -1,18 +1,21 @@
package filer2
import (
+ "bytes"
+ "fmt"
"os"
"time"
- "fmt"
+ "github.com/golang/protobuf/proto"
+
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/gogo/protobuf/proto"
)
func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) {
message := &filer_pb.Entry{
Attributes: EntryAttributeToPb(entry),
Chunks: entry.Chunks,
+ Extended: entry.Extended,
}
return proto.Marshal(message)
}
@@ -27,6 +30,8 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error {
entry.Attr = PbToEntryAttribute(message.Attributes)
+ entry.Extended = message.Extended
+
entry.Chunks = message.Chunks
return nil
@@ -47,6 +52,7 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes {
UserName: entry.Attr.UserName,
GroupName: entry.Attr.GroupNames,
SymlinkTarget: entry.Attr.SymlinkTarget,
+ Md5: entry.Attr.Md5,
}
}
@@ -66,6 +72,7 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
t.UserName = attr.UserName
t.GroupNames = attr.GroupName
t.SymlinkTarget = attr.SymlinkTarget
+ t.Md5 = attr.Md5
return t
}
@@ -84,6 +91,14 @@ func EqualEntry(a, b *Entry) bool {
return false
}
+ if !eq(a.Extended, b.Extended) {
+ return false
+ }
+
+ if !bytes.Equal(a.Md5, b.Md5) {
+ return false
+ }
+
for i := 0; i < len(a.Chunks); i++ {
if !proto.Equal(a.Chunks[i], b.Chunks[i]) {
return false
@@ -91,3 +106,17 @@ func EqualEntry(a, b *Entry) bool {
}
return true
}
+
+func eq(a, b map[string][]byte) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ for k, v := range a {
+ if w, ok := b[k]; !ok || !bytes.Equal(v, w) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer2/etcd/etcd_store.go
new file mode 100644
index 000000000..2ef65b4a0
--- /dev/null
+++ b/weed/filer2/etcd/etcd_store.go
@@ -0,0 +1,202 @@
+package etcd
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/clientv3"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_util "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ DIR_FILE_SEPARATOR = byte(0x00)
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &EtcdStore{})
+}
+
+type EtcdStore struct {
+ client *clientv3.Client
+}
+
+func (store *EtcdStore) GetName() string {
+ return "etcd"
+}
+
+func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ servers := configuration.GetString(prefix + "servers")
+ if servers == "" {
+ servers = "localhost:2379"
+ }
+
+ timeout := configuration.GetString(prefix + "timeout")
+ if timeout == "" {
+ timeout = "3s"
+ }
+
+ return store.initialize(servers, timeout)
+}
+
+func (store *EtcdStore) initialize(servers string, timeout string) (err error) {
+ glog.Infof("filer store etcd: %s", servers)
+
+ to, err := time.ParseDuration(timeout)
+ if err != nil {
+ return fmt.Errorf("parse timeout %s: %s", timeout, err)
+ }
+
+ store.client, err = clientv3.New(clientv3.Config{
+ Endpoints: strings.Split(servers, ","),
+ DialTimeout: to,
+ })
+ if err != nil {
+ return fmt.Errorf("connect to etcd %s: %s", servers, err)
+ }
+
+ return
+}
+
+func (store *EtcdStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *EtcdStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *EtcdStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+ key := genKey(entry.DirAndName())
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ if _, err := store.client.Put(ctx, string(key), string(value)); err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ return nil
+}
+
+func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
+ key := genKey(fullpath.DirAndName())
+
+ resp, err := store.client.Get(ctx, string(key))
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
+ }
+
+ if len(resp.Kvs) == 0 {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(resp.Kvs[0].Value)
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ key := genKey(fullpath.DirAndName())
+
+ if _, err := store.client.Delete(ctx, string(key)); err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+
+ if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil {
+ return fmt.Errorf("deleteFolderChildren %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *EtcdStore) ListDirectoryEntries(
+ ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
+) (entries []*filer2.Entry, err error) {
+ directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+
+ resp, err := store.client.Get(ctx, string(directoryPrefix),
+ clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
+ if err != nil {
+ return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ }
+
+ for _, kv := range resp.Kvs {
+ fileName := getNameFromKey(kv.Key)
+ if fileName == "" {
+ continue
+ }
+ if fileName == startFileName && !inclusive {
+ continue
+ }
+ limit--
+ if limit < 0 {
+ break
+ }
+ entry := &filer2.Entry{
+ FullPath: weed_util.NewFullPath(string(fullpath), fileName),
+ }
+ if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+ entries = append(entries, entry)
+ }
+
+ return entries, err
+}
+
+func genKey(dirPath, fileName string) (key []byte) {
+ key = []byte(dirPath)
+ key = append(key, DIR_FILE_SEPARATOR)
+ key = append(key, []byte(fileName)...)
+ return key
+}
+
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
+ keyPrefix = []byte(string(fullpath))
+ keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
+ if len(startFileName) > 0 {
+ keyPrefix = append(keyPrefix, []byte(startFileName)...)
+ }
+ return keyPrefix
+}
+
+func getNameFromKey(key []byte) string {
+ sepIndex := len(key) - 1
+ for sepIndex >= 0 && key[sepIndex] != DIR_FILE_SEPARATOR {
+ sepIndex--
+ }
+
+ return string(key[sepIndex+1:])
+}
+
+func (store *EtcdStore) Shutdown() {
+ store.client.Close()
+}
diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go
index 6c3157e6c..6832d0f31 100644
--- a/weed/filer2/filechunks.go
+++ b/weed/filer2/filechunks.go
@@ -3,6 +3,7 @@ package filer2
import (
"fmt"
"hash/fnv"
+ "math"
"sort"
"sync"
@@ -19,7 +20,21 @@ func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
return
}
-func ETag(chunks []*filer_pb.FileChunk) (etag string) {
+func ETag(entry *filer_pb.Entry) (etag string) {
+ if entry.Attributes == nil || entry.Attributes.Md5 == nil {
+ return ETagChunks(entry.Chunks)
+ }
+ return fmt.Sprintf("%x", entry.Attributes.Md5)
+}
+
+func ETagEntry(entry *Entry) (etag string) {
+ if entry.Attr.Md5 == nil {
+ return ETagChunks(entry.Chunks)
+ }
+ return fmt.Sprintf("%x", entry.Attr.Md5)
+}
+
+func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
if len(chunks) == 1 {
return chunks[0].ETag
}
@@ -40,7 +55,7 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file
fileIds[interval.fileId] = true
}
for _, chunk := range chunks {
- if found := fileIds[chunk.FileId]; found {
+ if _, found := fileIds[chunk.GetFileIdString()]; found {
compacted = append(compacted, chunk)
} else {
garbage = append(garbage, chunk)
@@ -50,15 +65,15 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file
return
}
-func FindUnusedFileChunks(oldChunks, newChunks []*filer_pb.FileChunk) (unused []*filer_pb.FileChunk) {
+func MinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
fileIds := make(map[string]bool)
- for _, interval := range newChunks {
- fileIds[interval.FileId] = true
+ for _, interval := range bs {
+ fileIds[interval.GetFileIdString()] = true
}
- for _, chunk := range oldChunks {
- if found := fileIds[chunk.FileId]; !found {
- unused = append(unused, chunk)
+ for _, chunk := range as {
+ if _, found := fileIds[chunk.GetFileIdString()]; !found {
+ delta = append(delta, chunk)
}
}
@@ -70,10 +85,16 @@ type ChunkView struct {
Offset int64
Size uint64
LogicOffset int64
- IsFullChunk bool
+ ChunkSize uint64
+ CipherKey []byte
+ IsGzipped bool
}
-func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) {
+func (cv *ChunkView) IsFullChunk() bool {
+ return cv.Size == cv.ChunkSize
+}
+
+func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
visibles := NonOverlappingVisibleIntervals(chunks)
@@ -81,19 +102,27 @@ func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views
}
-func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int) (views []*ChunkView) {
+func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
- stop := offset + int64(size)
+ stop := offset + size
+ if size == math.MaxInt64 {
+ stop = math.MaxInt64
+ }
+ if stop < offset {
+ stop = math.MaxInt64
+ }
for _, chunk := range visibles {
+
if chunk.start <= offset && offset < chunk.stop && offset < stop {
- isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop
views = append(views, &ChunkView{
FileId: chunk.fileId,
Offset: offset - chunk.start, // offset is the data starting location in this file id
Size: uint64(min(chunk.stop, stop) - offset),
LogicOffset: offset,
- IsFullChunk: isFullChunk,
+ ChunkSize: chunk.chunkSize,
+ CipherKey: chunk.cipherKey,
+ IsGzipped: chunk.isGzipped,
})
offset = min(chunk.stop, stop)
}
@@ -120,13 +149,7 @@ var bufPool = sync.Pool{
func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval {
- newV := newVisibleInterval(
- chunk.Offset,
- chunk.Offset+int64(chunk.Size),
- chunk.FileId,
- chunk.Mtime,
- true,
- )
+ newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
length := len(visibles)
if length == 0 {
@@ -140,23 +163,11 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.
logPrintf(" before", visibles)
for _, v := range visibles {
if v.start < chunk.Offset && chunk.Offset < v.stop {
- newVisibles = append(newVisibles, newVisibleInterval(
- v.start,
- chunk.Offset,
- v.fileId,
- v.modifiedTime,
- false,
- ))
+ newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
}
chunkStop := chunk.Offset + int64(chunk.Size)
if v.start < chunkStop && chunkStop < v.stop {
- newVisibles = append(newVisibles, newVisibleInterval(
- chunkStop,
- v.stop,
- v.fileId,
- v.modifiedTime,
- false,
- ))
+ newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
}
if chunkStop <= v.start || v.stop <= chunk.Offset {
newVisibles = append(newVisibles, v)
@@ -187,6 +198,7 @@ func NonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []Vi
var newVisibles []VisibleInterval
for _, chunk := range chunks {
+
newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk)
t := visibles[:0]
visibles = newVisibles
@@ -207,16 +219,20 @@ type VisibleInterval struct {
stop int64
modifiedTime int64
fileId string
- isFullChunk bool
+ chunkSize uint64
+ cipherKey []byte
+ isGzipped bool
}
-func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool) VisibleInterval {
+func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
return VisibleInterval{
start: start,
stop: stop,
fileId: fileId,
modifiedTime: modifiedTime,
- isFullChunk: isFullChunk,
+ chunkSize: chunkSize,
+ cipherKey: cipherKey,
+ isGzipped: isGzipped,
}
}
diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go
index e75e60753..7b1133b85 100644
--- a/weed/filer2/filechunks_test.go
+++ b/weed/filer2/filechunks_test.go
@@ -218,7 +218,7 @@ func TestChunksReading(t *testing.T) {
testcases := []struct {
Chunks []*filer_pb.FileChunk
Offset int64
- Size int
+ Size int64
Expected []*ChunkView
}{
// case 0: normal
@@ -331,6 +331,42 @@ func TestChunksReading(t *testing.T) {
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
},
},
+ // case 8: edge cases
+ {
+ Chunks: []*filer_pb.FileChunk{
+ {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
+ {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134},
+ {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353},
+ },
+ Offset: 0,
+ Size: 300,
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0},
+ {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90},
+ {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190},
+ },
+ },
+ // case 9: edge cases
+ {
+ Chunks: []*filer_pb.FileChunk{
+ {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1},
+ {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2},
+ {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3},
+ {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4},
+ {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5},
+ {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6},
+ },
+ Offset: 0,
+ Size: 153578836,
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0},
+ {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936},
+ {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760},
+ {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736},
+ {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168},
+ {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248},
+ },
+ },
}
for i, testcase := range testcases {
diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go
index 1ee2f5ede..06c997f36 100644
--- a/weed/filer2/filer.go
+++ b/weed/filer2/filer.go
@@ -3,35 +3,53 @@ package filer2
import (
"context"
"fmt"
- "math"
"os"
- "path/filepath"
"strings"
"time"
+ "google.golang.org/grpc"
+
+ "github.com/karlseguin/ccache"
+
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
"github.com/chrislusf/seaweedfs/weed/wdclient"
- "github.com/karlseguin/ccache"
)
+const PaginationSize = 1024 * 256
+
var (
OS_UID = uint32(os.Getuid())
OS_GID = uint32(os.Getgid())
)
type Filer struct {
- store FilerStore
- directoryCache *ccache.Cache
- MasterClient *wdclient.MasterClient
- fileIdDeletionChan chan string
+ store *FilerStoreWrapper
+ directoryCache *ccache.Cache
+ MasterClient *wdclient.MasterClient
+ fileIdDeletionQueue *util.UnboundedQueue
+ GrpcDialOption grpc.DialOption
+ DirBucketsPath string
+ FsyncBuckets []string
+ buckets *FilerBuckets
+ Cipher bool
+ LocalMetaLogBuffer *log_buffer.LogBuffer
+ metaLogCollection string
+ metaLogReplication string
}
-func NewFiler(masters []string) *Filer {
+func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerHost string, filerGrpcPort uint32, collection string, replication string, notifyFn func()) *Filer {
f := &Filer{
- directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
- MasterClient: wdclient.NewMasterClient(context.Background(), "filer", masters),
- fileIdDeletionChan: make(chan string, 4096),
+ directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
+ MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, masters),
+ fileIdDeletionQueue: util.NewUnboundedQueue(),
+ GrpcDialOption: grpcDialOption,
}
+ f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn)
+ f.metaLogCollection = collection
+ f.metaLogReplication = replication
go f.loopProcessingDeletion()
@@ -39,7 +57,11 @@ func NewFiler(masters []string) *Filer {
}
func (f *Filer) SetStore(store FilerStore) {
- f.store = store
+ f.store = NewFilerStoreWrapper(store)
+}
+
+func (f *Filer) GetStore() (store FilerStore) {
+ return f.store
}
func (f *Filer) DisableDirectoryCache() {
@@ -54,7 +76,19 @@ func (fs *Filer) KeepConnectedToMaster() {
fs.MasterClient.KeepConnectedToMaster()
}
-func (f *Filer) CreateEntry(entry *Entry) error {
+func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return f.store.BeginTransaction(ctx)
+}
+
+func (f *Filer) CommitTransaction(ctx context.Context) error {
+ return f.store.CommitTransaction(ctx)
+}
+
+func (f *Filer) RollbackTransaction(ctx context.Context) error {
+ return f.store.RollbackTransaction(ctx)
+}
+
+func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool) error {
if string(entry.FullPath) == "/" {
return nil
@@ -67,7 +101,7 @@ func (f *Filer) CreateEntry(entry *Entry) error {
var lastDirectoryEntry *Entry
for i := 1; i < len(dirParts); i++ {
- dirPath := "/" + filepath.Join(dirParts[:i]...)
+ dirPath := "/" + util.Join(dirParts[:i]...)
// fmt.Printf("%d directory: %+v\n", i, dirPath)
// first check local cache
@@ -76,9 +110,9 @@ func (f *Filer) CreateEntry(entry *Entry) error {
// not found, check the store directly
if dirEntry == nil {
glog.V(4).Infof("find uncached directory: %s", dirPath)
- dirEntry, _ = f.FindEntry(FullPath(dirPath))
+ dirEntry, _ = f.FindEntry(ctx, util.FullPath(dirPath))
} else {
- glog.V(4).Infof("found cached directory: %s", dirPath)
+ // glog.V(4).Infof("found cached directory: %s", dirPath)
}
// no such existing directory
@@ -88,27 +122,34 @@ func (f *Filer) CreateEntry(entry *Entry) error {
now := time.Now()
dirEntry = &Entry{
- FullPath: FullPath(dirPath),
+ FullPath: util.FullPath(dirPath),
Attr: Attr{
- Mtime: now,
- Crtime: now,
- Mode: os.ModeDir | 0770,
- Uid: entry.Uid,
- Gid: entry.Gid,
+ Mtime: now,
+ Crtime: now,
+ Mode: os.ModeDir | entry.Mode | 0110,
+ Uid: entry.Uid,
+ Gid: entry.Gid,
+ Collection: entry.Collection,
+ Replication: entry.Replication,
+ UserName: entry.UserName,
+ GroupNames: entry.GroupNames,
},
}
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
- mkdirErr := f.store.InsertEntry(dirEntry)
+ mkdirErr := f.store.InsertEntry(ctx, dirEntry)
if mkdirErr != nil {
- if _, err := f.FindEntry(FullPath(dirPath)); err == ErrNotFound {
+ if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
+ glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
}
} else {
- f.NotifyUpdateEvent(nil, dirEntry, false)
+ f.maybeAddBucket(dirEntry)
+ f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster)
}
} else if !dirEntry.IsDirectory() {
+ glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
return fmt.Errorf("%s is a file", dirPath)
}
@@ -123,6 +164,7 @@ func (f *Filer) CreateEntry(entry *Entry) error {
}
if lastDirectoryEntry == nil {
+ glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
return fmt.Errorf("parent folder not found: %v", entry.FullPath)
}
@@ -134,38 +176,50 @@ func (f *Filer) CreateEntry(entry *Entry) error {
}
*/
- oldEntry, _ := f.FindEntry(entry.FullPath)
+ oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
+ glog.V(4).Infof("CreateEntry %s: old entry: %v exclusive:%v", entry.FullPath, oldEntry, o_excl)
if oldEntry == nil {
- if err := f.store.InsertEntry(entry); err != nil {
+ if err := f.store.InsertEntry(ctx, entry); err != nil {
+ glog.Errorf("insert entry %s: %v", entry.FullPath, err)
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
}
} else {
- if err := f.UpdateEntry(oldEntry, entry); err != nil {
+ if o_excl {
+ glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
+ return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
+ }
+ if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
+ glog.Errorf("update entry %s: %v", entry.FullPath, err)
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
}
}
- f.NotifyUpdateEvent(oldEntry, entry, true)
+ f.maybeAddBucket(entry)
+ f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster)
f.deleteChunksIfNotNew(oldEntry, entry)
+ glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
+
return nil
}
-func (f *Filer) UpdateEntry(oldEntry, entry *Entry) (err error) {
+func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
if oldEntry != nil {
if oldEntry.IsDirectory() && !entry.IsDirectory() {
+ glog.Errorf("existing %s is a directory", entry.FullPath)
return fmt.Errorf("existing %s is a directory", entry.FullPath)
}
if !oldEntry.IsDirectory() && entry.IsDirectory() {
+ glog.Errorf("existing %s is a file", entry.FullPath)
return fmt.Errorf("existing %s is a file", entry.FullPath)
}
}
- return f.store.UpdateEntry(entry)
+ return f.store.UpdateEntry(ctx, entry)
}
-func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) {
+func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) {
now := time.Now()
@@ -181,74 +235,59 @@ func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) {
},
}, nil
}
- return f.store.FindEntry(p)
-}
-
-func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) {
- entry, err := f.FindEntry(p)
- if err != nil {
- return err
- }
-
- if entry.IsDirectory() {
- limit := int(1)
- if isRecursive {
- limit = math.MaxInt32
- }
- lastFileName := ""
- includeLastFile := false
- for limit > 0 {
- entries, err := f.ListDirectoryEntries(p, lastFileName, includeLastFile, 1024)
- if err != nil {
- return fmt.Errorf("list folder %s: %v", p, err)
- }
- if len(entries) == 0 {
- break
- } else {
- if isRecursive {
- for _, sub := range entries {
- lastFileName = sub.Name()
- f.DeleteEntryMetaAndData(sub.FullPath, isRecursive, shouldDeleteChunks)
- limit--
- if limit <= 0 {
- break
- }
- }
- } else {
- if len(entries) > 0 {
- return fmt.Errorf("folder %s is not empty", p)
- }
- }
- f.cacheDelDirectory(string(p))
- if len(entries) < 1024 {
- break
- }
- }
+ entry, err = f.store.FindEntry(ctx, p)
+ if entry != nil && entry.TtlSec > 0 {
+ if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ f.store.DeleteEntry(ctx, p.Child(entry.Name()))
+ return nil, filer_pb.ErrNotFound
}
}
+ return
- if shouldDeleteChunks {
- f.DeleteChunks(entry.Chunks)
- }
+}
- if p == "/" {
- return nil
+func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
+ if strings.HasSuffix(string(p), "/") && len(p) > 1 {
+ p = p[0 : len(p)-1]
}
- glog.V(3).Infof("deleting entry %v", p)
- f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
+ var makeupEntries []*Entry
+ entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
+ for expiredCount > 0 && err == nil {
+ makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount)
+ if err == nil {
+ entries = append(entries, makeupEntries...)
+ }
+ }
- return f.store.DeleteEntry(p)
+ return entries, err
}
-func (f *Filer) ListDirectoryEntries(p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
- if strings.HasSuffix(string(p), "/") && len(p) > 1 {
- p = p[0 : len(p)-1]
+func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) {
+ listedEntries, listErr := f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
+ if listErr != nil {
+ return listedEntries, expiredCount, "", listErr
}
- return f.store.ListDirectoryEntries(p, startFileName, inclusive, limit)
+ for _, entry := range listedEntries {
+ lastFileName = entry.Name()
+ if entry.TtlSec > 0 {
+ if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ f.store.DeleteEntry(ctx, p.Child(entry.Name()))
+ expiredCount++
+ continue
+ }
+ }
+ entries = append(entries, entry)
+ }
+ return
}
func (f *Filer) cacheDelDirectory(dirpath string) {
+
+ if dirpath == "/" {
+ return
+ }
+
if f.directoryCache == nil {
return
}
@@ -257,6 +296,7 @@ func (f *Filer) cacheDelDirectory(dirpath string) {
}
func (f *Filer) cacheGetDirectory(dirpath string) *Entry {
+
if f.directoryCache == nil {
return nil
}
@@ -280,3 +320,8 @@ func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) {
f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute)
}
+
+func (f *Filer) Shutdown() {
+ f.LocalMetaLogBuffer.Shutdown()
+ f.store.Shutdown()
+}
diff --git a/weed/filer2/filer_buckets.go b/weed/filer2/filer_buckets.go
new file mode 100644
index 000000000..7a57e7ee1
--- /dev/null
+++ b/weed/filer2/filer_buckets.go
@@ -0,0 +1,121 @@
+package filer2
+
+import (
+ "context"
+ "math"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type BucketName string
+type BucketOption struct {
+ Name BucketName
+ Replication string
+ fsync bool
+}
+type FilerBuckets struct {
+ dirBucketsPath string
+ buckets map[BucketName]*BucketOption
+ sync.RWMutex
+}
+
+func (f *Filer) LoadBuckets() {
+
+ f.buckets = &FilerBuckets{
+ buckets: make(map[BucketName]*BucketOption),
+ }
+
+ limit := math.MaxInt32
+
+ entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit)
+
+ if err != nil {
+ glog.V(1).Infof("no buckets found: %v", err)
+ return
+ }
+
+ shouldFsyncMap := make(map[string]bool)
+ for _, bucket := range f.FsyncBuckets {
+ shouldFsyncMap[bucket] = true
+ }
+
+ glog.V(1).Infof("buckets found: %d", len(entries))
+
+ f.buckets.Lock()
+ for _, entry := range entries {
+ _, shouldFsnyc := shouldFsyncMap[entry.Name()]
+ f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{
+ Name: BucketName(entry.Name()),
+ Replication: entry.Replication,
+ fsync: shouldFsnyc,
+ }
+ }
+ f.buckets.Unlock()
+
+}
+
+func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) {
+
+ f.buckets.RLock()
+ defer f.buckets.RUnlock()
+
+ option, found := f.buckets.buckets[BucketName(buketName)]
+
+ if !found {
+ return "", false
+ }
+ return option.Replication, option.fsync
+
+}
+
+func (f *Filer) isBucket(entry *Entry) bool {
+ if !entry.IsDirectory() {
+ return false
+ }
+ parent, dirName := entry.FullPath.DirAndName()
+ if parent != f.DirBucketsPath {
+ return false
+ }
+
+ f.buckets.RLock()
+ defer f.buckets.RUnlock()
+
+ _, found := f.buckets.buckets[BucketName(dirName)]
+
+ return found
+
+}
+
+func (f *Filer) maybeAddBucket(entry *Entry) {
+ if !entry.IsDirectory() {
+ return
+ }
+ parent, dirName := entry.FullPath.DirAndName()
+ if parent != f.DirBucketsPath {
+ return
+ }
+ f.addBucket(dirName, &BucketOption{
+ Name: BucketName(dirName),
+ Replication: entry.Replication,
+ })
+}
+
+func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) {
+
+ f.buckets.Lock()
+ defer f.buckets.Unlock()
+
+ f.buckets.buckets[BucketName(buketName)] = bucketOption
+
+}
+
+func (f *Filer) deleteBucket(buketName string) {
+
+ f.buckets.Lock()
+ defer f.buckets.Unlock()
+
+ delete(f.buckets.buckets, BucketName(buketName))
+
+}
diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go
new file mode 100644
index 000000000..f30e10d59
--- /dev/null
+++ b/weed/filer2/filer_delete_entry.go
@@ -0,0 +1,128 @@
+package filer2
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool) (err error) {
+ if p == "/" {
+ return nil
+ }
+
+ entry, findErr := f.FindEntry(ctx, p)
+ if findErr != nil {
+ return findErr
+ }
+
+ isCollection := f.isBucket(entry)
+
+ var chunks []*filer_pb.FileChunk
+ chunks = append(chunks, entry.Chunks...)
+ if entry.IsDirectory() {
+ // delete the folder children, not including the folder itself
+ var dirChunks []*filer_pb.FileChunk
+ dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection, isFromOtherCluster)
+ if err != nil {
+ glog.V(0).Infof("delete directory %s: %v", p, err)
+ return fmt.Errorf("delete directory %s: %v", p, err)
+ }
+ chunks = append(chunks, dirChunks...)
+ }
+
+ // delete the file or folder
+ err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks, isFromOtherCluster)
+ if err != nil {
+ return fmt.Errorf("delete file %s: %v", p, err)
+ }
+
+ if shouldDeleteChunks && !isCollection {
+ go f.DeleteChunks(chunks)
+ }
+ if isCollection {
+ collectionName := entry.Name()
+ f.doDeleteCollection(collectionName)
+ f.deleteBucket(collectionName)
+ }
+
+ return nil
+}
+
+func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool) (chunks []*filer_pb.FileChunk, err error) {
+
+ lastFileName := ""
+ includeLastFile := false
+ for {
+ entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize)
+ if err != nil {
+ glog.Errorf("list folder %s: %v", entry.FullPath, err)
+ return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
+ }
+ if lastFileName == "" && !isRecursive && len(entries) > 0 {
+ // only for first iteration in the loop
+ return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
+ }
+
+ for _, sub := range entries {
+ lastFileName = sub.Name()
+ var dirChunks []*filer_pb.FileChunk
+ if sub.IsDirectory() {
+ dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, false)
+ f.cacheDelDirectory(string(sub.FullPath))
+ f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster)
+ chunks = append(chunks, dirChunks...)
+ } else {
+ chunks = append(chunks, sub.Chunks...)
+ }
+ if err != nil && !ignoreRecursiveError {
+ return nil, err
+ }
+ }
+
+ if len(entries) < PaginationSize {
+ break
+ }
+ }
+
+ glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
+
+ if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
+ return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
+ }
+
+ return chunks, nil
+}
+
+func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool) (err error) {
+
+ glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
+
+ if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil {
+ return fmt.Errorf("filer store delete: %v", storeDeletionErr)
+ }
+ if entry.IsDirectory() {
+ f.cacheDelDirectory(string(entry.FullPath))
+ }
+ f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster)
+
+ return nil
+}
+
+func (f *Filer) doDeleteCollection(collectionName string) (err error) {
+
+ return f.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
+ Name: collectionName,
+ })
+ if err != nil {
+ glog.Infof("delete collection %s: %v", collectionName, err)
+ }
+ return err
+ })
+
+}
diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go
index 8fe8ae04f..a6b229771 100644
--- a/weed/filer2/filer_deletion.go
+++ b/weed/filer2/filer_deletion.go
@@ -6,16 +6,14 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
)
-func (f *Filer) loopProcessingDeletion() {
-
- ticker := time.NewTicker(5 * time.Second)
-
- lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
+func LookupByMasterClientFn(masterClient *wdclient.MasterClient) func(vids []string) (map[string]operation.LookupResult, error) {
+ return func(vids []string) (map[string]operation.LookupResult, error) {
m := make(map[string]operation.LookupResult)
for _, vid := range vids {
- locs := f.MasterClient.GetVidLocations(vid)
+ locs, _ := masterClient.GetVidLocations(vid)
var locations []operation.Location
for _, loc := range locs {
locations = append(locations, operation.Location{
@@ -30,35 +28,56 @@ func (f *Filer) loopProcessingDeletion() {
}
return m, nil
}
+}
+
+func (f *Filer) loopProcessingDeletion() {
+
+ lookupFunc := LookupByMasterClientFn(f.MasterClient)
- var fileIds []string
+ DeletionBatchSize := 100000 // roughly 20 bytes cost per file id.
+
+ var deletionCount int
for {
- select {
- case fid := <-f.fileIdDeletionChan:
- fileIds = append(fileIds, fid)
- if len(fileIds) >= 4096 {
- glog.V(1).Infof("deleting fileIds len=%d", len(fileIds))
- operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc)
- fileIds = fileIds[:0]
- }
- case <-ticker.C:
- if len(fileIds) > 0 {
- glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds))
- operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc)
- fileIds = fileIds[:0]
+ deletionCount = 0
+ f.fileIdDeletionQueue.Consume(func(fileIds []string) {
+ for len(fileIds) > 0 {
+ var toDeleteFileIds []string
+ if len(fileIds) > DeletionBatchSize {
+ toDeleteFileIds = fileIds[:DeletionBatchSize]
+ fileIds = fileIds[DeletionBatchSize:]
+ } else {
+ toDeleteFileIds = fileIds
+ fileIds = fileIds[:0]
+ }
+ deletionCount = len(toDeleteFileIds)
+ deleteResults, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
+ if err != nil {
+ glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
+ } else {
+ glog.V(1).Infof("deleting fileIds len=%d", deletionCount)
+ }
+ if len(deleteResults) != deletionCount {
+ glog.V(0).Infof("delete %d fileIds actual %d", deletionCount, len(deleteResults))
+ }
}
+ })
+
+ if deletionCount == 0 {
+ time.Sleep(1123 * time.Millisecond)
}
}
}
func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
for _, chunk := range chunks {
- f.fileIdDeletionChan <- chunk.FileId
+ f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString())
}
}
+// DeleteFileByFileId direct delete by file id.
+// Only used when the fileId is not being managed by snapshots.
func (f *Filer) DeleteFileByFileId(fileId string) {
- f.fileIdDeletionChan <- fileId
+ f.fileIdDeletionQueue.EnQueue(fileId)
}
func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
@@ -71,16 +90,13 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
}
var toDelete []*filer_pb.FileChunk
+ newChunkIds := make(map[string]bool)
+ for _, newChunk := range newEntry.Chunks {
+ newChunkIds[newChunk.GetFileIdString()] = true
+ }
for _, oldChunk := range oldEntry.Chunks {
- found := false
- for _, newChunk := range newEntry.Chunks {
- if oldChunk.FileId == newChunk.FileId {
- found = true
- break
- }
- }
- if !found {
+ if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found {
toDelete = append(toDelete, oldChunk)
}
}
diff --git a/weed/filer2/filer_notify.go b/weed/filer2/filer_notify.go
index b3c215249..340f19fb5 100644
--- a/weed/filer2/filer_notify.go
+++ b/weed/filer2/filer_notify.go
@@ -1,33 +1,160 @@
package filer2
import (
+ "context"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
-func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool) {
- var key string
+func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool) {
+ var fullpath string
if oldEntry != nil {
- key = string(oldEntry.FullPath)
+ fullpath = string(oldEntry.FullPath)
} else if newEntry != nil {
- key = string(newEntry.FullPath)
+ fullpath = string(newEntry.FullPath)
} else {
return
}
+ // println("fullpath:", fullpath)
+
+ if strings.HasPrefix(fullpath, SystemLogDir) {
+ return
+ }
+
+ newParentPath := ""
+ if newEntry != nil {
+ newParentPath, _ = newEntry.FullPath.DirAndName()
+ }
+ eventNotification := &filer_pb.EventNotification{
+ OldEntry: oldEntry.ToProtoEntry(),
+ NewEntry: newEntry.ToProtoEntry(),
+ DeleteChunks: deleteChunks,
+ NewParentPath: newParentPath,
+ IsFromOtherCluster: isFromOtherCluster,
+ }
+
if notification.Queue != nil {
+ glog.V(3).Infof("notifying entry update %v", fullpath)
+ notification.Queue.SendMessage(fullpath, eventNotification)
+ }
+
+ f.logMetaEvent(ctx, fullpath, eventNotification)
+
+}
- glog.V(3).Infof("notifying entry update %v", key)
+func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) {
- notification.Queue.SendMessage(
- key,
- &filer_pb.EventNotification{
- OldEntry: oldEntry.ToProtoEntry(),
- NewEntry: newEntry.ToProtoEntry(),
- DeleteChunks: deleteChunks,
- },
- )
+ dir, _ := util.FullPath(fullpath).DirAndName()
+
+ event := &filer_pb.SubscribeMetadataResponse{
+ Directory: dir,
+ EventNotification: eventNotification,
+ TsNs: time.Now().UnixNano(),
+ }
+ data, err := proto.Marshal(event)
+ if err != nil {
+ glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
+ return
+ }
+
+ f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data)
+
+}
+
+func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
+
+ targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir,
+ startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
+ // startTime.Second(), startTime.Nanosecond(),
+ )
+
+ if err := f.appendToFile(targetFile, buf); err != nil {
+ glog.V(0).Infof("log write failed %s: %v", targetFile, err)
+ }
+}
+
+func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
+
+ startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
+ startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
+
+ sizeBuf := make([]byte, 4)
+ startTsNs := startTime.UnixNano()
+
+ dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366)
+ if listDayErr != nil {
+ return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
+ }
+ for _, dayEntry := range dayEntries {
+ // println("checking day", dayEntry.FullPath)
+ hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60)
+ if listHourMinuteErr != nil {
+ return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
+ }
+ for _, hourMinuteEntry := range hourMinuteEntries {
+ // println("checking hh-mm", hourMinuteEntry.FullPath)
+ if dayEntry.Name() == startDate {
+ if strings.Compare(hourMinuteEntry.Name(), startHourMinute) < 0 {
+ continue
+ }
+ }
+ // println("processing", hourMinuteEntry.FullPath)
+ chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks)
+ if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
+ chunkedFileReader.Close()
+ if err == io.EOF {
+ break
+ }
+ return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
+ }
+ chunkedFileReader.Close()
+ }
+ }
+
+ return lastTsNs, nil
+}
+func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
+ for {
+ n, err := r.Read(sizeBuf)
+ if err != nil {
+ return lastTsNs, err
+ }
+ if n != 4 {
+ return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n)
+ }
+ size := util.BytesToUint32(sizeBuf)
+ // println("entry size", size)
+ entryData := make([]byte, size)
+ n, err = r.Read(entryData)
+ if err != nil {
+ return lastTsNs, err
+ }
+ if n != int(size) {
+ return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
+ }
+ logEntry := &filer_pb.LogEntry{}
+ if err = proto.Unmarshal(entryData, logEntry); err != nil {
+ return lastTsNs, err
+ }
+ if logEntry.TsNs <= ns {
+ return lastTsNs, nil
+ }
+ // println("each log: ", logEntry.TsNs)
+ if err := eachLogEntryFn(logEntry); err != nil {
+ return lastTsNs, err
+ } else {
+ lastTsNs = logEntry.TsNs
+ }
}
}
diff --git a/weed/filer2/filer_notify_append.go b/weed/filer2/filer_notify_append.go
new file mode 100644
index 000000000..61bbc9c45
--- /dev/null
+++ b/weed/filer2/filer_notify_append.go
@@ -0,0 +1,73 @@
+package filer2
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (f *Filer) appendToFile(targetFile string, data []byte) error {
+
+ assignResult, uploadResult, err2 := f.assignAndUpload(data)
+ if err2 != nil {
+ return err2
+ }
+
+ // find out existing entry
+ fullpath := util.FullPath(targetFile)
+ entry, err := f.FindEntry(context.Background(), fullpath)
+ var offset int64 = 0
+ if err == filer_pb.ErrNotFound {
+ entry = &Entry{
+ FullPath: fullpath,
+ Attr: Attr{
+ Crtime: time.Now(),
+ Mtime: time.Now(),
+ Mode: os.FileMode(0644),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ }
+ } else {
+ offset = int64(TotalSize(entry.Chunks))
+ }
+
+ // append to existing chunks
+ entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset))
+
+ // update the entry
+ err = f.CreateEntry(context.Background(), entry, false, false)
+
+ return err
+}
+
+func (f *Filer) assignAndUpload(data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
+ // assign a volume location
+ assignRequest := &operation.VolumeAssignRequest{
+ Count: 1,
+ Collection: f.metaLogCollection,
+ Replication: f.metaLogReplication,
+ WritableVolumeCount: 1,
+ }
+ assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
+ if err != nil {
+ return nil, nil, fmt.Errorf("AssignVolume: %v", err)
+ }
+ if assignResult.Error != "" {
+ return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error)
+ }
+
+ // upload data
+ targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
+ uploadResult, err := operation.UploadData(targetUrl, "", f.Cipher, data, false, "", nil, assignResult.Auth)
+ if err != nil {
+ return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
+ }
+ // println("uploaded to", targetUrl)
+ return assignResult, uploadResult, nil
+}
diff --git a/weed/filer2/filer_notify_test.go b/weed/filer2/filer_notify_test.go
index b74e2ad35..29170bfdf 100644
--- a/weed/filer2/filer_notify_test.go
+++ b/weed/filer2/filer_notify_test.go
@@ -5,13 +5,15 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+
"github.com/golang/protobuf/proto"
)
func TestProtoMarshalText(t *testing.T) {
oldEntry := &Entry{
- FullPath: FullPath("/this/path/to"),
+ FullPath: util.FullPath("/this/path/to"),
Attr: Attr{
Mtime: time.Now(),
Mode: 0644,
diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go
index 9ef1d9d48..f36c74f14 100644
--- a/weed/filer2/filerstore.go
+++ b/weed/filer2/filerstore.go
@@ -1,7 +1,11 @@
package filer2
import (
- "errors"
+ "context"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -9,13 +13,129 @@ type FilerStore interface {
// GetName gets the name to locate the configuration in filer.toml file
GetName() string
// Initialize initializes the file store
- Initialize(configuration util.Configuration) error
- InsertEntry(*Entry) error
- UpdateEntry(*Entry) (err error)
+ Initialize(configuration util.Configuration, prefix string) error
+ InsertEntry(context.Context, *Entry) error
+ UpdateEntry(context.Context, *Entry) (err error)
// err == filer2.ErrNotFound if not found
- FindEntry(FullPath) (entry *Entry, err error)
- DeleteEntry(FullPath) (err error)
- ListDirectoryEntries(dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
+ FindEntry(context.Context, util.FullPath) (entry *Entry, err error)
+ DeleteEntry(context.Context, util.FullPath) (err error)
+ DeleteFolderChildren(context.Context, util.FullPath) (err error)
+ ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
+
+ BeginTransaction(ctx context.Context) (context.Context, error)
+ CommitTransaction(ctx context.Context) error
+ RollbackTransaction(ctx context.Context) error
+
+ Shutdown()
+}
+
+type FilerStoreWrapper struct {
+ actualStore FilerStore
+}
+
+func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper {
+ if innerStore, ok := store.(*FilerStoreWrapper); ok {
+ return innerStore
+ }
+ return &FilerStoreWrapper{
+ actualStore: store,
+ }
+}
+
+func (fsw *FilerStoreWrapper) GetName() string {
+ return fsw.actualStore.GetName()
+}
+
+func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration, prefix string) error {
+ return fsw.actualStore.Initialize(configuration, prefix)
+}
+
+func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "insert").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "insert").Observe(time.Since(start).Seconds())
+ }()
+
+ filer_pb.BeforeEntrySerialization(entry.Chunks)
+ return fsw.actualStore.InsertEntry(ctx, entry)
+}
+
+func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "update").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "update").Observe(time.Since(start).Seconds())
+ }()
+
+ filer_pb.BeforeEntrySerialization(entry.Chunks)
+ return fsw.actualStore.UpdateEntry(ctx, entry)
+}
+
+func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "find").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "find").Observe(time.Since(start).Seconds())
+ }()
+
+ entry, err = fsw.actualStore.FindEntry(ctx, fp)
+ if err != nil {
+ return nil, err
+ }
+ filer_pb.AfterEntryDeserialization(entry.Chunks)
+ return
+}
+
+func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "delete").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "delete").Observe(time.Since(start).Seconds())
+ }()
+
+ return fsw.actualStore.DeleteEntry(ctx, fp)
+}
+
+func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds())
+ }()
+
+ return fsw.actualStore.DeleteFolderChildren(ctx, fp)
+}
+
+func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "list").Observe(time.Since(start).Seconds())
+ }()
+
+ entries, err := fsw.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
+ if err != nil {
+ return nil, err
+ }
+ for _, entry := range entries {
+ filer_pb.AfterEntryDeserialization(entry.Chunks)
+ }
+ return entries, err
}
-var ErrNotFound = errors.New("filer: no entry is found in filer store")
+func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return fsw.actualStore.BeginTransaction(ctx)
+}
+
+func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error {
+ return fsw.actualStore.CommitTransaction(ctx)
+}
+
+func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error {
+ return fsw.actualStore.RollbackTransaction(ctx)
+}
+
+func (fsw *FilerStoreWrapper) Shutdown() {
+ fsw.actualStore.Shutdown()
+}
diff --git a/weed/filer2/fullpath.go b/weed/filer2/fullpath.go
deleted file mode 100644
index be6e34431..000000000
--- a/weed/filer2/fullpath.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package filer2
-
-import (
- "path/filepath"
- "strings"
-)
-
-type FullPath string
-
-func NewFullPath(dir, name string) FullPath {
- if strings.HasSuffix(dir, "/") {
- return FullPath(dir + name)
- }
- return FullPath(dir + "/" + name)
-}
-
-func (fp FullPath) DirAndName() (string, string) {
- dir, name := filepath.Split(string(fp))
- if dir == "/" {
- return dir, name
- }
- if len(dir) < 1 {
- return "/", ""
- }
- return dir[:len(dir)-1], name
-}
-
-func (fp FullPath) Name() string {
- _, name := filepath.Split(string(fp))
- return name
-}
diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go
index 179107e2c..31919ca49 100644
--- a/weed/filer2/leveldb/leveldb_store.go
+++ b/weed/filer2/leveldb/leveldb_store.go
@@ -2,13 +2,18 @@ package leveldb
import (
"bytes"
+ "context"
"fmt"
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
- "github.com/syndtr/goleveldb/leveldb"
- leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
)
const (
@@ -27,8 +32,8 @@ func (store *LevelDBStore) GetName() string {
return "leveldb"
}
-func (store *LevelDBStore) Initialize(configuration weed_util.Configuration) (err error) {
- dir := configuration.GetString("dir")
+func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ dir := configuration.GetString(prefix + "dir")
return store.initialize(dir)
}
@@ -38,14 +43,35 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
- if store.db, err = leveldb.OpenFile(dir, nil); err != nil {
- glog.Infof("filer store open dir %s: %v", dir, err)
- return
+ opts := &opt.Options{
+ BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 10,
+ }
+
+ if store.db, err = leveldb.OpenFile(dir, opts); err != nil {
+ if errors.IsCorrupted(err) {
+ store.db, err = leveldb.RecoverFile(dir, opts)
+ }
+ if err != nil {
+ glog.Infof("filer store open dir %s: %v", dir, err)
+ return
+ }
}
return
}
-func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) {
+func (store *LevelDBStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *LevelDBStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
key := genKey(entry.DirAndName())
value, err := entry.EncodeAttributesAndChunks()
@@ -64,18 +90,18 @@ func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil
}
-func (store *LevelDBStore) UpdateEntry(entry *filer2.Entry) (err error) {
+func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- return store.InsertEntry(entry)
+ return store.InsertEntry(ctx, entry)
}
-func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
key := genKey(fullpath.DirAndName())
data, err := store.db.Get(key, nil)
if err == leveldb.ErrNotFound {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
@@ -94,7 +120,7 @@ func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.En
return entry, nil
}
-func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
+func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
key := genKey(fullpath.DirAndName())
err = store.db.Delete(key, nil)
@@ -105,7 +131,35 @@ func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
return nil
}
-func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool,
+func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+
+ batch := new(leveldb.Batch)
+
+ directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+ iter := store.db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil)
+ for iter.Next() {
+ key := iter.Key()
+ if !bytes.HasPrefix(key, directoryPrefix) {
+ break
+ }
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ continue
+ }
+ batch.Delete([]byte(genKey(string(fullpath), fileName)))
+ }
+ iter.Release()
+
+ err = store.db.Write(batch, nil)
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
@@ -128,7 +182,7 @@ func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startF
break
}
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), fileName),
+ FullPath: weed_util.NewFullPath(string(fullpath), fileName),
}
if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
err = decodeErr
@@ -149,7 +203,7 @@ func genKey(dirPath, fileName string) (key []byte) {
return key
}
-func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = []byte(string(fullpath))
keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
if len(startFileName) > 0 {
@@ -168,3 +222,7 @@ func getNameFromKey(key []byte) string {
return string(key[sepIndex+1:])
}
+
+func (store *LevelDBStore) Shutdown() {
+ store.db.Close()
+}
diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go
index 5b214558f..77df07a9b 100644
--- a/weed/filer2/leveldb/leveldb_store_test.go
+++ b/weed/filer2/leveldb/leveldb_store_test.go
@@ -1,14 +1,17 @@
package leveldb
import (
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "context"
"io/ioutil"
"os"
"testing"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func TestCreateAndFind(t *testing.T) {
- filer := filer2.NewFiler(nil)
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir)
store := &LevelDBStore{}
@@ -16,7 +19,9 @@ func TestCreateAndFind(t *testing.T) {
filer.SetStore(store)
filer.DisableDirectoryCache()
- fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
+
+ ctx := context.Background()
entry1 := &filer2.Entry{
FullPath: fullpath,
@@ -27,12 +32,12 @@ func TestCreateAndFind(t *testing.T) {
},
}
- if err := filer.CreateEntry(entry1); err != nil {
+ if err := filer.CreateEntry(ctx, entry1, false, false); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err)
return
}
- entry, err := filer.FindEntry(fullpath)
+ entry, err := filer.FindEntry(ctx, fullpath)
if err != nil {
t.Errorf("find entry: %v", err)
@@ -45,14 +50,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
- entries, _ := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one"), "", false, 100)
+ entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
- entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100)
+ entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@@ -61,7 +66,7 @@ func TestCreateAndFind(t *testing.T) {
}
func TestEmptyRoot(t *testing.T) {
- filer := filer2.NewFiler(nil)
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir)
store := &LevelDBStore{}
@@ -69,8 +74,10 @@ func TestEmptyRoot(t *testing.T) {
filer.SetStore(store)
filer.DisableDirectoryCache()
+ ctx := context.Background()
+
// checking one upper directory
- entries, err := filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100)
+ entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
if err != nil {
t.Errorf("list entries: %v", err)
return
diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go
new file mode 100644
index 000000000..c907e8746
--- /dev/null
+++ b/weed/filer2/leveldb2/leveldb2_store.go
@@ -0,0 +1,248 @@
+package leveldb
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_util "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &LevelDB2Store{})
+}
+
+type LevelDB2Store struct {
+ dbs []*leveldb.DB
+ dbCount int
+}
+
+func (store *LevelDB2Store) GetName() string {
+ return "leveldb2"
+}
+
+func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ dir := configuration.GetString(prefix + "dir")
+ return store.initialize(dir, 8)
+}
+
+func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
+ glog.Infof("filer store leveldb2 dir: %s", dir)
+ if err := weed_util.TestFolderWritable(dir); err != nil {
+ return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
+ }
+
+ opts := &opt.Options{
+ BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 4,
+ }
+
+ for d := 0; d < dbCount; d++ {
+ dbFolder := fmt.Sprintf("%s/%02d", dir, d)
+ os.MkdirAll(dbFolder, 0755)
+ db, dbErr := leveldb.OpenFile(dbFolder, opts)
+ if errors.IsCorrupted(dbErr) {
+ db, dbErr = leveldb.RecoverFile(dbFolder, opts)
+ }
+ if dbErr != nil {
+ glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
+ return dbErr
+ }
+ store.dbs = append(store.dbs, db)
+ }
+ store.dbCount = dbCount
+
+ return
+}
+
+func (store *LevelDB2Store) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *LevelDB2Store) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *LevelDB2Store) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+ dir, name := entry.DirAndName()
+ key, partitionId := genKey(dir, name, store.dbCount)
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ err = store.dbs[partitionId].Put(key, value, nil)
+
+ if err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ // println("saved", entry.FullPath, "chunks", len(entry.Chunks))
+
+ return nil
+}
+
+func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
+ dir, name := fullpath.DirAndName()
+ key, partitionId := genKey(dir, name, store.dbCount)
+
+ data, err := store.dbs[partitionId].Get(key, nil)
+
+ if err == leveldb.ErrNotFound {
+ return nil, filer_pb.ErrNotFound
+ }
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
+ }
+
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(data)
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
+
+ return entry, nil
+}
+
+func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ dir, name := fullpath.DirAndName()
+ key, partitionId := genKey(dir, name, store.dbCount)
+
+ err = store.dbs[partitionId].Delete(key, nil)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
+
+ batch := new(leveldb.Batch)
+
+ iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil)
+ for iter.Next() {
+ key := iter.Key()
+ if !bytes.HasPrefix(key, directoryPrefix) {
+ break
+ }
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ continue
+ }
+ batch.Delete(append(directoryPrefix, []byte(fileName)...))
+ }
+ iter.Release()
+
+ err = store.dbs[partitionId].Write(batch, nil)
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
+ limit int) (entries []*filer2.Entry, err error) {
+
+ directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
+ lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount)
+
+ iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
+ for iter.Next() {
+ key := iter.Key()
+ if !bytes.HasPrefix(key, directoryPrefix) {
+ break
+ }
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ continue
+ }
+ if fileName == startFileName && !inclusive {
+ continue
+ }
+ limit--
+ if limit < 0 {
+ break
+ }
+ entry := &filer2.Entry{
+ FullPath: weed_util.NewFullPath(string(fullpath), fileName),
+ }
+
+ // println("list", entry.FullPath, "chunks", len(entry.Chunks))
+
+ if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+ entries = append(entries, entry)
+ }
+ iter.Release()
+
+ return entries, err
+}
+
+func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) {
+ key, partitionId = hashToBytes(dirPath, dbCount)
+ key = append(key, []byte(fileName)...)
+ return key, partitionId
+}
+
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) {
+ keyPrefix, partitionId = hashToBytes(string(fullpath), dbCount)
+ if len(startFileName) > 0 {
+ keyPrefix = append(keyPrefix, []byte(startFileName)...)
+ }
+ return keyPrefix, partitionId
+}
+
+func getNameFromKey(key []byte) string {
+
+ return string(key[md5.Size:])
+
+}
+
+// hash directory, and use last byte for partitioning
+func hashToBytes(dir string, dbCount int) ([]byte, int) {
+ h := md5.New()
+ io.WriteString(h, dir)
+
+ b := h.Sum(nil)
+
+ x := b[len(b)-1]
+
+ return b, int(x) % dbCount
+}
+
+func (store *LevelDB2Store) Shutdown() {
+ for d := 0; d < store.dbCount; d++ {
+ store.dbs[d].Close()
+ }
+}
diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go
new file mode 100644
index 000000000..b211d86e4
--- /dev/null
+++ b/weed/filer2/leveldb2/leveldb2_store_test.go
@@ -0,0 +1,90 @@
+package leveldb
+
+import (
+ "context"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TestCreateAndFind(t *testing.T) {
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
+ defer os.RemoveAll(dir)
+ store := &LevelDB2Store{}
+ store.initialize(dir, 2)
+ filer.SetStore(store)
+ filer.DisableDirectoryCache()
+
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
+
+ ctx := context.Background()
+
+ entry1 := &filer2.Entry{
+ FullPath: fullpath,
+ Attr: filer2.Attr{
+ Mode: 0440,
+ Uid: 1234,
+ Gid: 5678,
+ },
+ }
+
+ if err := filer.CreateEntry(ctx, entry1, false, false); err != nil {
+ t.Errorf("create entry %v: %v", entry1.FullPath, err)
+ return
+ }
+
+ entry, err := filer.FindEntry(ctx, fullpath)
+
+ if err != nil {
+ t.Errorf("find entry: %v", err)
+ return
+ }
+
+ if entry.FullPath != entry1.FullPath {
+ t.Errorf("find wrong entry: %v", entry.FullPath)
+ return
+ }
+
+ // checking one upper directory
+ entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100)
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+ // checking one upper directory
+ entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
+
+func TestEmptyRoot(t *testing.T) {
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
+ defer os.RemoveAll(dir)
+ store := &LevelDB2Store{}
+ store.initialize(dir, 2)
+ filer.SetStore(store)
+ filer.DisableDirectoryCache()
+
+ ctx := context.Background()
+
+ // checking one upper directory
+ entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
+ if err != nil {
+ t.Errorf("list entries: %v", err)
+ return
+ }
+ if len(entries) != 0 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go
deleted file mode 100644
index 062f1cd1c..000000000
--- a/weed/filer2/memdb/memdb_store.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package memdb
-
-import (
- "fmt"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/util"
- "github.com/google/btree"
- "strings"
-)
-
-func init() {
- filer2.Stores = append(filer2.Stores, &MemDbStore{})
-}
-
-type MemDbStore struct {
- tree *btree.BTree
-}
-
-type entryItem struct {
- *filer2.Entry
-}
-
-func (a entryItem) Less(b btree.Item) bool {
- return strings.Compare(string(a.FullPath), string(b.(entryItem).FullPath)) < 0
-}
-
-func (store *MemDbStore) GetName() string {
- return "memory"
-}
-
-func (store *MemDbStore) Initialize(configuration util.Configuration) (err error) {
- store.tree = btree.New(8)
- return nil
-}
-
-func (store *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) {
- // println("inserting", entry.FullPath)
- store.tree.ReplaceOrInsert(entryItem{entry})
- return nil
-}
-
-func (store *MemDbStore) UpdateEntry(entry *filer2.Entry) (err error) {
- if _, err = store.FindEntry(entry.FullPath); err != nil {
- return fmt.Errorf("no such file %s : %v", entry.FullPath, err)
- }
- store.tree.ReplaceOrInsert(entryItem{entry})
- return nil
-}
-
-func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
- item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}})
- if item == nil {
- return nil, filer2.ErrNotFound
- }
- entry = item.(entryItem).Entry
- return entry, nil
-}
-
-func (store *MemDbStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
- store.tree.Delete(entryItem{&filer2.Entry{FullPath: fullpath}})
- return nil
-}
-
-func (store *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
-
- startFrom := string(fullpath)
- if startFileName != "" {
- startFrom = startFrom + "/" + startFileName
- }
-
- store.tree.AscendGreaterOrEqual(entryItem{&filer2.Entry{FullPath: filer2.FullPath(startFrom)}},
- func(item btree.Item) bool {
- if limit <= 0 {
- return false
- }
- entry := item.(entryItem).Entry
- // println("checking", entry.FullPath)
-
- if entry.FullPath == fullpath {
- // skipping the current directory
- // println("skipping the folder", entry.FullPath)
- return true
- }
-
- dir, name := entry.FullPath.DirAndName()
- if name == startFileName {
- if inclusive {
- limit--
- entries = append(entries, entry)
- }
- return true
- }
-
- // only iterate the same prefix
- if !strings.HasPrefix(string(entry.FullPath), string(fullpath)) {
- // println("breaking from", entry.FullPath)
- return false
- }
-
- if dir != string(fullpath) {
- // this could be items in deeper directories
- // println("skipping deeper folder", entry.FullPath)
- return true
- }
- // now process the directory items
- // println("adding entry", entry.FullPath)
- limit--
- entries = append(entries, entry)
- return true
- },
- )
- return entries, nil
-}
diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go
deleted file mode 100644
index cf813e04b..000000000
--- a/weed/filer2/memdb/memdb_store_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package memdb
-
-import (
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "testing"
-)
-
-func TestCreateAndFind(t *testing.T) {
- filer := filer2.NewFiler(nil)
- store := &MemDbStore{}
- store.Initialize(nil)
- filer.SetStore(store)
- filer.DisableDirectoryCache()
-
- fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
-
- entry1 := &filer2.Entry{
- FullPath: fullpath,
- Attr: filer2.Attr{
- Mode: 0440,
- Uid: 1234,
- Gid: 5678,
- },
- }
-
- if err := filer.CreateEntry(entry1); err != nil {
- t.Errorf("create entry %v: %v", entry1.FullPath, err)
- return
- }
-
- entry, err := filer.FindEntry(fullpath)
-
- if err != nil {
- t.Errorf("find entry: %v", err)
- return
- }
-
- if entry.FullPath != entry1.FullPath {
- t.Errorf("find wrong entry: %v", entry.FullPath)
- return
- }
-
-}
-
-func TestCreateFileAndList(t *testing.T) {
- filer := filer2.NewFiler(nil)
- store := &MemDbStore{}
- store.Initialize(nil)
- filer.SetStore(store)
- filer.DisableDirectoryCache()
-
- entry1 := &filer2.Entry{
- FullPath: filer2.FullPath("/home/chris/this/is/one/file1.jpg"),
- Attr: filer2.Attr{
- Mode: 0440,
- Uid: 1234,
- Gid: 5678,
- },
- }
-
- entry2 := &filer2.Entry{
- FullPath: filer2.FullPath("/home/chris/this/is/one/file2.jpg"),
- Attr: filer2.Attr{
- Mode: 0440,
- Uid: 1234,
- Gid: 5678,
- },
- }
-
- filer.CreateEntry(entry1)
- filer.CreateEntry(entry2)
-
- // checking the 2 files
- entries, err := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "", false, 100)
-
- if err != nil {
- t.Errorf("list entries: %v", err)
- return
- }
-
- if len(entries) != 2 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
- if entries[0].FullPath != entry1.FullPath {
- t.Errorf("find wrong entry 1: %v", entries[0].FullPath)
- return
- }
-
- if entries[1].FullPath != entry2.FullPath {
- t.Errorf("find wrong entry 2: %v", entries[1].FullPath)
- return
- }
-
- // checking the offset
- entries, err = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100)
- if len(entries) != 1 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
- // checking one upper directory
- entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
- if len(entries) != 1 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
- // checking root directory
- entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100)
- if len(entries) != 1 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
- // add file3
- file3Path := filer2.FullPath("/home/chris/this/is/file3.jpg")
- entry3 := &filer2.Entry{
- FullPath: file3Path,
- Attr: filer2.Attr{
- Mode: 0440,
- Uid: 1234,
- Gid: 5678,
- },
- }
- filer.CreateEntry(entry3)
-
- // checking one upper directory
- entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
- if len(entries) != 2 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
- // delete file and count
- filer.DeleteEntryMetaAndData(file3Path, false, false)
- entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
- if len(entries) != 1 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
-}
diff --git a/weed/filer2/meta_aggregator.go b/weed/filer2/meta_aggregator.go
new file mode 100644
index 000000000..2f707b921
--- /dev/null
+++ b/weed/filer2/meta_aggregator.go
@@ -0,0 +1,91 @@
+package filer2
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
+)
+
+type MetaAggregator struct {
+ filers []string
+ grpcDialOption grpc.DialOption
+ MetaLogBuffer *log_buffer.LogBuffer
+ // notifying clients
+ ListenersLock sync.Mutex
+ ListenersCond *sync.Cond
+}
+
+func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAggregator {
+ t := &MetaAggregator{
+ filers: filers,
+ grpcDialOption: grpcDialOption,
+ }
+ t.ListenersCond = sync.NewCond(&t.ListenersLock)
+ t.MetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, nil, func() {
+ t.ListenersCond.Broadcast()
+ })
+ return t
+}
+
+func (ma *MetaAggregator) StartLoopSubscribe(lastTsNs int64) {
+ for _, filer := range ma.filers {
+ go ma.subscribeToOneFiler(filer, lastTsNs)
+ }
+}
+
+func (ma *MetaAggregator) subscribeToOneFiler(filer string, lastTsNs int64) {
+
+ processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error {
+ data, err := proto.Marshal(event)
+ if err != nil {
+ glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
+ return err
+ }
+ dir := event.Directory
+ // println("received meta change", dir, "size", len(data))
+ ma.MetaLogBuffer.AddToBuffer([]byte(dir), data)
+ return nil
+ }
+
+ for {
+ err := pb.WithFilerClient(filer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ stream, err := client.SubscribeLocalMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{
+ ClientName: "filer",
+ PathPrefix: "/",
+ SinceNs: lastTsNs,
+ })
+ if err != nil {
+ return fmt.Errorf("subscribe: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+
+ if err := processEventFn(resp); err != nil {
+ return fmt.Errorf("process %v: %v", resp, err)
+ }
+ lastTsNs = resp.TsNs
+ }
+ })
+ if err != nil {
+ glog.V(0).Infof("subscribing remote %s meta change: %v", filer, err)
+ time.Sleep(1733 * time.Millisecond)
+ }
+ }
+}
diff --git a/weed/filer2/mongodb/mongodb_store.go b/weed/filer2/mongodb/mongodb_store.go
new file mode 100644
index 000000000..375a457a4
--- /dev/null
+++ b/weed/filer2/mongodb/mongodb_store.go
@@ -0,0 +1,210 @@
+package mongodb
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "go.mongodb.org/mongo-driver/x/bsonx"
+ "time"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &MongodbStore{})
+}
+
+type MongodbStore struct {
+ connect *mongo.Client
+ database string
+ collectionName string
+}
+
+type Model struct {
+ Directory string `bson:"directory"`
+ Name string `bson:"name"`
+ Meta []byte `bson:"meta"`
+}
+
+func (store *MongodbStore) GetName() string {
+ return "mongodb"
+}
+
+func (store *MongodbStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+ store.database = configuration.GetString(prefix + "database")
+ store.collectionName = "filemeta"
+ poolSize := configuration.GetInt(prefix + "option_pool_size")
+ return store.connection(configuration.GetString(prefix+"uri"), uint64(poolSize))
+}
+
+func (store *MongodbStore) connection(uri string, poolSize uint64) (err error) {
+ ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
+ opts := options.Client().ApplyURI(uri)
+
+ if poolSize > 0 {
+ opts.SetMaxPoolSize(poolSize)
+ }
+
+ client, err := mongo.Connect(ctx, opts)
+ if err != nil {
+ return err
+ }
+
+ c := client.Database(store.database).Collection(store.collectionName)
+ err = store.indexUnique(c)
+ store.connect = client
+ return err
+}
+
+func (store *MongodbStore) createIndex(c *mongo.Collection, index mongo.IndexModel, opts *options.CreateIndexesOptions) error {
+ _, err := c.Indexes().CreateOne(context.Background(), index, opts)
+ return err
+}
+
+func (store *MongodbStore) indexUnique(c *mongo.Collection) error {
+ opts := options.CreateIndexes().SetMaxTime(10 * time.Second)
+
+ unique := new(bool)
+ *unique = true
+
+ index := mongo.IndexModel{
+ Keys: bsonx.Doc{{Key: "directory", Value: bsonx.Int32(1)}, {Key: "name", Value: bsonx.Int32(1)}},
+ Options: &options.IndexOptions{
+ Unique: unique,
+ },
+ }
+
+ return store.createIndex(c, index, opts)
+}
+
+func (store *MongodbStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+
+func (store *MongodbStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *MongodbStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ dir, name := entry.FullPath.DirAndName()
+ meta, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encode %s: %s", entry.FullPath, err)
+ }
+
+ c := store.connect.Database(store.database).Collection(store.collectionName)
+
+ _, err = c.InsertOne(ctx, Model{
+ Directory: dir,
+ Name: name,
+ Meta: meta,
+ })
+
+ return nil
+}
+
+func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
+
+ dir, name := fullpath.DirAndName()
+ var data Model
+
+ var where = bson.M{"directory": dir, "name": name}
+ err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
+ if err != mongo.ErrNoDocuments && err != nil {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ if len(data.Meta) == 0 {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ }
+
+ err = entry.DecodeAttributesAndChunks(data.Meta)
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
+
+ dir, name := fullpath.DirAndName()
+
+ where := bson.M{"directory": dir, "name": name}
+ _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
+
+ where := bson.M{"directory": fullpath}
+ _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteMany(ctx, where)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
+
+ var where = bson.M{"directory": string(fullpath), "name": bson.M{"$gt": startFileName}}
+ if inclusive {
+ where["name"] = bson.M{
+ "$gte": startFileName,
+ }
+ }
+ optLimit := int64(limit)
+ opts := &options.FindOptions{Limit: &optLimit, Sort: bson.M{"name": 1}}
+ cur, err := store.connect.Database(store.database).Collection(store.collectionName).Find(ctx, where, opts)
+ for cur.Next(ctx) {
+ var data Model
+ err := cur.Decode(&data)
+ if err != nil && err != mongo.ErrNoDocuments {
+ return nil, err
+ }
+
+ entry := &filer2.Entry{
+ FullPath: util.NewFullPath(string(fullpath), data.Name),
+ }
+ if decodeErr := entry.DecodeAttributesAndChunks(data.Meta); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+
+ entries = append(entries, entry)
+ }
+
+ if err := cur.Close(ctx); err != nil {
+ glog.V(0).Infof("list iterator close: %v", err)
+ }
+
+ return entries, err
+}
+
+func (store *MongodbStore) Shutdown() {
+ ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
+ store.connect.Disconnect(ctx)
+}
diff --git a/weed/filer2/mysql/mysql_store.go b/weed/filer2/mysql/mysql_store.go
index e18299bd2..63d99cd9d 100644
--- a/weed/filer2/mysql/mysql_store.go
+++ b/weed/filer2/mysql/mysql_store.go
@@ -26,28 +26,35 @@ func (store *MysqlStore) GetName() string {
return "mysql"
}
-func (store *MysqlStore) Initialize(configuration util.Configuration) (err error) {
+func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
- configuration.GetString("username"),
- configuration.GetString("password"),
- configuration.GetString("hostname"),
- configuration.GetInt("port"),
- configuration.GetString("database"),
- configuration.GetInt("connection_max_idle"),
- configuration.GetInt("connection_max_open"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetString(prefix+"hostname"),
+ configuration.GetInt(prefix+"port"),
+ configuration.GetString(prefix+"database"),
+ configuration.GetInt(prefix+"connection_max_idle"),
+ configuration.GetInt(prefix+"connection_max_open"),
+ configuration.GetBool(prefix+"interpolateParams"),
)
}
-func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int) (err error) {
+func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int,
+ interpolateParams bool) (err error) {
store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)"
store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?"
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
+ store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?"
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
+ if interpolateParams {
+ sqlUrl += "&interpolateParams=true"
+ }
+
var dbErr error
store.DB, dbErr = sql.Open("mysql", sqlUrl)
if dbErr != nil {
diff --git a/weed/filer2/postgres/README.txt b/weed/filer2/postgres/README.txt
index ef2ef683b..cb0c99c63 100644
--- a/weed/filer2/postgres/README.txt
+++ b/weed/filer2/postgres/README.txt
@@ -9,8 +9,8 @@ $PGHOME/bin/psql --username=postgres --password seaweedfs
CREATE TABLE IF NOT EXISTS filemeta (
dirhash BIGINT,
- name VARCHAR(1000),
- directory VARCHAR(4096),
+ name VARCHAR(65535),
+ directory VARCHAR(65535),
meta bytea,
PRIMARY KEY (dirhash, name)
);
diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go
index ffd3d1e01..51c069aae 100644
--- a/weed/filer2/postgres/postgres_store.go
+++ b/weed/filer2/postgres/postgres_store.go
@@ -11,7 +11,7 @@ import (
)
const (
- CONNECTION_URL_PATTERN = "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30"
+ CONNECTION_URL_PATTERN = "host=%s port=%d user=%s sslmode=%s connect_timeout=30"
)
func init() {
@@ -26,16 +26,16 @@ func (store *PostgresStore) GetName() string {
return "postgres"
}
-func (store *PostgresStore) Initialize(configuration util.Configuration) (err error) {
+func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
- configuration.GetString("username"),
- configuration.GetString("password"),
- configuration.GetString("hostname"),
- configuration.GetInt("port"),
- configuration.GetString("database"),
- configuration.GetString("sslmode"),
- configuration.GetInt("connection_max_idle"),
- configuration.GetInt("connection_max_open"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetString(prefix+"hostname"),
+ configuration.GetInt(prefix+"port"),
+ configuration.GetString(prefix+"database"),
+ configuration.GetString(prefix+"sslmode"),
+ configuration.GetInt(prefix+"connection_max_idle"),
+ configuration.GetInt(prefix+"connection_max_open"),
)
}
@@ -45,10 +45,17 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int
store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4"
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
+ store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
- sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, password, database, sslmode)
+ sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, sslmode)
+ if password != "" {
+ sqlUrl += " password=" + password
+ }
+ if database != "" {
+ sqlUrl += " dbname=" + database
+ }
var dbErr error
store.DB, dbErr = sql.Open("postgres", sqlUrl)
if dbErr != nil {
diff --git a/weed/filer2/reader_at.go b/weed/filer2/reader_at.go
new file mode 100644
index 000000000..11a80443f
--- /dev/null
+++ b/weed/filer2/reader_at.go
@@ -0,0 +1,162 @@
+package filer2
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+type ChunkReadAt struct {
+ masterClient *wdclient.MasterClient
+ chunkViews []*ChunkView
+ buffer []byte
+ bufferOffset int64
+ lookupFileId func(fileId string) (targetUrl string, err error)
+ readerLock sync.Mutex
+
+ chunkCache *chunk_cache.ChunkCache
+}
+
+// var _ = io.ReaderAt(&ChunkReadAt{})
+
+type LookupFileIdFunctionType func(fileId string) (targetUrl string, err error)
+
+func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
+ return func(fileId string) (targetUrl string, err error) {
+ err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ vid := VolumeId(fileId)
+ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
+ VolumeIds: []string{vid},
+ })
+ if err != nil {
+ return err
+ }
+
+ locations := resp.LocationsMap[vid]
+ if locations == nil || len(locations.Locations) == 0 {
+ glog.V(0).Infof("failed to locate %s", fileId)
+ return fmt.Errorf("failed to locate %s", fileId)
+ }
+
+ volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url)
+
+ targetUrl = fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
+
+ return nil
+ })
+ return
+ }
+}
+
+func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt {
+
+ return &ChunkReadAt{
+ chunkViews: chunkViews,
+ lookupFileId: LookupFn(filerClient),
+ bufferOffset: -1,
+ chunkCache: chunkCache,
+ }
+}
+
+func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
+
+ c.readerLock.Lock()
+ defer c.readerLock.Unlock()
+
+ for n < len(p) && err == nil {
+ readCount, readErr := c.doReadAt(p[n:], offset+int64(n))
+ n += readCount
+ err = readErr
+ if readCount == 0 {
+ return n, io.EOF
+ }
+ }
+ return
+}
+
+func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
+
+ var found bool
+ for _, chunk := range c.chunkViews {
+ if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
+ found = true
+ if c.bufferOffset != chunk.LogicOffset {
+ c.buffer, err = c.fetchChunkData(chunk)
+ if err != nil {
+ glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
+ }
+ c.bufferOffset = chunk.LogicOffset
+ }
+ break
+ }
+ }
+ if !found {
+ return 0, io.EOF
+ }
+
+ if err == nil {
+ n = copy(p, c.buffer[offset-c.bufferOffset:])
+ }
+
+ // fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d)\n", offset, offset+int64(n), c.bufferOffset, c.bufferOffset+int64(len(c.buffer)))
+
+ return
+
+}
+
+func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err error) {
+
+ glog.V(4).Infof("fetchChunkData %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+
+ hasDataInCache := false
+ chunkData := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
+ if chunkData != nil {
+ glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+ hasDataInCache = true
+ } else {
+ chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if int64(len(chunkData)) < chunkView.Offset+int64(chunkView.Size) {
+ glog.Errorf("unexpected larger cached:%v chunk %s [%d,%d) than %d", hasDataInCache, chunkView.FileId, chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
+ return nil, fmt.Errorf("unexpected larger cached:%v chunk %s [%d,%d) than %d", hasDataInCache, chunkView.FileId, chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
+ }
+
+ data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)]
+
+ if !hasDataInCache {
+ c.chunkCache.SetChunk(chunkView.FileId, chunkData)
+ }
+
+ return data, nil
+}
+
+func (c *ChunkReadAt) doFetchFullChunkData(fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
+
+ urlString, err := c.lookupFileId(fileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err)
+ return nil, err
+ }
+ var buffer bytes.Buffer
+ err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, true, 0, 0, func(data []byte) {
+ buffer.Write(data)
+ })
+ if err != nil {
+ glog.V(0).Infof("read %s failed, err: %v", fileId, err)
+ return nil, err
+ }
+
+ return buffer.Bytes(), nil
+}
diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer2/redis/redis_cluster_store.go
index 4f74a8a22..eaaecb740 100644
--- a/weed/filer2/redis/redis_cluster_store.go
+++ b/weed/filer2/redis/redis_cluster_store.go
@@ -18,15 +18,25 @@ func (store *RedisClusterStore) GetName() string {
return "redis_cluster"
}
-func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) {
+func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+
+ configuration.SetDefault(prefix+"useReadOnly", true)
+ configuration.SetDefault(prefix+"routeByLatency", true)
+
return store.initialize(
- configuration.GetStringSlice("addresses"),
+ configuration.GetStringSlice(prefix+"addresses"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetBool(prefix+"useReadOnly"),
+ configuration.GetBool(prefix+"routeByLatency"),
)
}
-func (store *RedisClusterStore) initialize(addresses []string) (err error) {
+func (store *RedisClusterStore) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) {
store.Client = redis.NewClusterClient(&redis.ClusterOptions{
- Addrs: addresses,
+ Addrs: addresses,
+ Password: password,
+ ReadOnly: readOnly,
+ RouteByLatency: routeByLatency,
})
return
}
diff --git a/weed/filer2/redis/redis_store.go b/weed/filer2/redis/redis_store.go
index c56fa014c..9debdb070 100644
--- a/weed/filer2/redis/redis_store.go
+++ b/weed/filer2/redis/redis_store.go
@@ -18,11 +18,11 @@ func (store *RedisStore) GetName() string {
return "redis"
}
-func (store *RedisStore) Initialize(configuration util.Configuration) (err error) {
+func (store *RedisStore) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
- configuration.GetString("address"),
- configuration.GetString("password"),
- configuration.GetInt("database"),
+ configuration.GetString(prefix+"address"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetInt(prefix+"database"),
)
}
diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go
index 7fd7e1180..e5b9e8840 100644
--- a/weed/filer2/redis/universal_redis_store.go
+++ b/weed/filer2/redis/universal_redis_store.go
@@ -1,13 +1,18 @@
package redis
import (
+ "context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/go-redis/redis"
"sort"
"strings"
"time"
+
+ "github.com/go-redis/redis"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
const (
@@ -18,7 +23,17 @@ type UniversalRedisStore struct {
Client redis.UniversalClient
}
-func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) {
+func (store *UniversalRedisStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *UniversalRedisStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
value, err := entry.EncodeAttributesAndChunks()
if err != nil {
@@ -42,16 +57,16 @@ func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil
}
-func (store *UniversalRedisStore) UpdateEntry(entry *filer2.Entry) (err error) {
+func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- return store.InsertEntry(entry)
+ return store.InsertEntry(ctx, entry)
}
-func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
data, err := store.Client.Get(string(fullpath)).Result()
if err == redis.Nil {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
if err != nil {
@@ -69,7 +84,7 @@ func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *fi
return entry, nil
}
-func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
+func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
_, err = store.Client.Del(string(fullpath)).Result()
@@ -88,11 +103,30 @@ func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err err
return nil
}
-func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer2.Entry, err error) {
+func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result()
if err != nil {
+ return fmt.Errorf("delete folder %s : %v", fullpath, err)
+ }
+
+ for _, fileName := range members {
+ path := util.NewFullPath(string(fullpath), fileName)
+ _, err = store.Client.Del(string(path)).Result()
+ if err != nil {
+ return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
+ limit int) (entries []*filer2.Entry, err error) {
+
+ dirListKey := genDirectoryListKey(string(fullpath))
+ members, err := store.Client.SMembers(dirListKey).Result()
+ if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
}
@@ -125,11 +159,18 @@ func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath,
// fetch entry meta
for _, fileName := range members {
- path := filer2.NewFullPath(string(fullpath), fileName)
- entry, err := store.FindEntry(path)
+ path := util.NewFullPath(string(fullpath), fileName)
+ entry, err := store.FindEntry(ctx, path)
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
} else {
+ if entry.TtlSec > 0 {
+ if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ store.Client.Del(string(path)).Result()
+ store.Client.SRem(dirListKey, fileName).Result()
+ continue
+ }
+ }
entries = append(entries, entry)
}
}
@@ -140,3 +181,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath,
func genDirectoryListKey(dir string) (dirList string) {
return dir + DIR_LIST_MARKER
}
+
+func (store *UniversalRedisStore) Shutdown() {
+ store.Client.Close()
+}
diff --git a/weed/filer2/redis2/redis_cluster_store.go b/weed/filer2/redis2/redis_cluster_store.go
new file mode 100644
index 000000000..b252eabab
--- /dev/null
+++ b/weed/filer2/redis2/redis_cluster_store.go
@@ -0,0 +1,42 @@
+package redis2
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/go-redis/redis"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &RedisCluster2Store{})
+}
+
+type RedisCluster2Store struct {
+ UniversalRedis2Store
+}
+
+func (store *RedisCluster2Store) GetName() string {
+ return "redis_cluster2"
+}
+
+func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
+
+ configuration.SetDefault(prefix+"useReadOnly", true)
+ configuration.SetDefault(prefix+"routeByLatency", true)
+
+ return store.initialize(
+ configuration.GetStringSlice(prefix+"addresses"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetBool(prefix+"useReadOnly"),
+ configuration.GetBool(prefix+"routeByLatency"),
+ )
+}
+
+func (store *RedisCluster2Store) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) {
+ store.Client = redis.NewClusterClient(&redis.ClusterOptions{
+ Addrs: addresses,
+ Password: password,
+ ReadOnly: readOnly,
+ RouteByLatency: routeByLatency,
+ })
+ return
+}
diff --git a/weed/filer2/redis2/redis_store.go b/weed/filer2/redis2/redis_store.go
new file mode 100644
index 000000000..1e2a20043
--- /dev/null
+++ b/weed/filer2/redis2/redis_store.go
@@ -0,0 +1,36 @@
+package redis2
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/go-redis/redis"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &Redis2Store{})
+}
+
+type Redis2Store struct {
+ UniversalRedis2Store
+}
+
+func (store *Redis2Store) GetName() string {
+ return "redis2"
+}
+
+func (store *Redis2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"address"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetInt(prefix+"database"),
+ )
+}
+
+func (store *Redis2Store) initialize(hostPort string, password string, database int) (err error) {
+ store.Client = redis.NewClient(&redis.Options{
+ Addr: hostPort,
+ Password: password,
+ DB: database,
+ })
+ return
+}
diff --git a/weed/filer2/redis2/universal_redis_store.go b/weed/filer2/redis2/universal_redis_store.go
new file mode 100644
index 000000000..420336b46
--- /dev/null
+++ b/weed/filer2/redis2/universal_redis_store.go
@@ -0,0 +1,162 @@
+package redis2
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/go-redis/redis"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ DIR_LIST_MARKER = "\x00"
+)
+
+type UniversalRedis2Store struct {
+ Client redis.UniversalClient
+}
+
+func (store *UniversalRedis2Store) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *UniversalRedis2Store) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ if err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ dir, name := entry.FullPath.DirAndName()
+ if name != "" {
+ if err = store.Client.ZAddNX(genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil {
+ return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
+
+ data, err := store.Client.Get(string(fullpath)).Result()
+ if err == redis.Nil {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
+ }
+
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks([]byte(data))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
+
+ _, err = store.Client.Del(string(fullpath)).Result()
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ dir, name := fullpath.DirAndName()
+ if name != "" {
+ _, err = store.Client.ZRem(genDirectoryListKey(dir), name).Result()
+ if err != nil {
+ return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
+
+ members, err := store.Client.ZRange(genDirectoryListKey(string(fullpath)), 0, -1).Result()
+ if err != nil {
+ return fmt.Errorf("delete folder %s : %v", fullpath, err)
+ }
+
+ for _, fileName := range members {
+ path := util.NewFullPath(string(fullpath), fileName)
+ _, err = store.Client.Del(string(path)).Result()
+ if err != nil {
+ return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
+ limit int) (entries []*filer2.Entry, err error) {
+
+ dirListKey := genDirectoryListKey(string(fullpath))
+ start := int64(0)
+ if startFileName != "" {
+ start, _ = store.Client.ZRank(dirListKey, startFileName).Result()
+ if !inclusive {
+ start++
+ }
+ }
+ members, err := store.Client.ZRange(dirListKey, start, start+int64(limit)-1).Result()
+ if err != nil {
+ return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ }
+
+ // fetch entry meta
+ for _, fileName := range members {
+ path := util.NewFullPath(string(fullpath), fileName)
+ entry, err := store.FindEntry(ctx, path)
+ if err != nil {
+ glog.V(0).Infof("list %s : %v", path, err)
+ } else {
+ if entry.TtlSec > 0 {
+ if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ store.Client.Del(string(path)).Result()
+ store.Client.ZRem(dirListKey, fileName).Result()
+ continue
+ }
+ }
+ entries = append(entries, entry)
+ }
+ }
+
+ return entries, err
+}
+
+func genDirectoryListKey(dir string) (dirList string) {
+ return dir + DIR_LIST_MARKER
+}
+
+func (store *UniversalRedis2Store) Shutdown() {
+ store.Client.Close()
+}
diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go
new file mode 100644
index 000000000..033a8dd13
--- /dev/null
+++ b/weed/filer2/stream.go
@@ -0,0 +1,199 @@
+package filer2
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
+
+ chunkViews := ViewFromChunks(chunks, offset, size)
+
+ fileId2Url := make(map[string]string)
+
+ for _, chunkView := range chunkViews {
+
+ urlString, err := masterClient.LookupFileId(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ fileId2Url[chunkView.FileId] = urlString
+ }
+
+ for _, chunkView := range chunkViews {
+
+ urlString := fileId2Url[chunkView.FileId]
+ err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ w.Write(data)
+ })
+ if err != nil {
+ glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ }
+
+ return nil
+
+}
+
+// ---------------- ReadAllReader ----------------------------------
+
+func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) {
+
+ buffer := bytes.Buffer{}
+
+ chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
+
+ lookupFileId := func(fileId string) (targetUrl string, err error) {
+ return masterClient.LookupFileId(fileId)
+ }
+
+ for _, chunkView := range chunkViews {
+ urlString, err := lookupFileId(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return nil, err
+ }
+ err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ buffer.Write(data)
+ })
+ if err != nil {
+ glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
+ return nil, err
+ }
+ }
+ return buffer.Bytes(), nil
+}
+
+// ---------------- ChunkStreamReader ----------------------------------
+type ChunkStreamReader struct {
+ chunkViews []*ChunkView
+ logicOffset int64
+ buffer []byte
+ bufferOffset int64
+ bufferPos int
+ chunkIndex int
+ lookupFileId LookupFileIdFunctionType
+}
+
+var _ = io.ReadSeeker(&ChunkStreamReader{})
+
+func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
+
+ chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
+
+ return &ChunkStreamReader{
+ chunkViews: chunkViews,
+ lookupFileId: func(fileId string) (targetUrl string, err error) {
+ return masterClient.LookupFileId(fileId)
+ },
+ }
+}
+
+func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
+
+ chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
+
+ return &ChunkStreamReader{
+ chunkViews: chunkViews,
+ lookupFileId: LookupFn(filerClient),
+ }
+}
+
+func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
+ for n < len(p) {
+ if c.isBufferEmpty() {
+ if c.chunkIndex >= len(c.chunkViews) {
+ return n, io.EOF
+ }
+ chunkView := c.chunkViews[c.chunkIndex]
+ c.fetchChunkToBuffer(chunkView)
+ c.chunkIndex++
+ }
+ t := copy(p[n:], c.buffer[c.bufferPos:])
+ c.bufferPos += t
+ n += t
+ }
+ return
+}
+
+func (c *ChunkStreamReader) isBufferEmpty() bool {
+ return len(c.buffer) <= c.bufferPos
+}
+
+func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
+
+ var totalSize int64
+ for _, chunk := range c.chunkViews {
+ totalSize += int64(chunk.Size)
+ }
+
+ var err error
+ switch whence {
+ case io.SeekStart:
+ case io.SeekCurrent:
+ offset += c.bufferOffset + int64(c.bufferPos)
+ case io.SeekEnd:
+ offset = totalSize + offset
+ }
+ if offset > totalSize {
+ err = io.ErrUnexpectedEOF
+ }
+
+ for i, chunk := range c.chunkViews {
+ if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
+ if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
+ c.fetchChunkToBuffer(chunk)
+ c.chunkIndex = i + 1
+ break
+ }
+ }
+ }
+ c.bufferPos = int(offset - c.bufferOffset)
+
+ return offset, err
+
+}
+
+func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
+ urlString, err := c.lookupFileId(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ var buffer bytes.Buffer
+ err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ buffer.Write(data)
+ })
+ if err != nil {
+ glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ c.buffer = buffer.Bytes()
+ c.bufferPos = 0
+ c.bufferOffset = chunkView.LogicOffset
+
+ // glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+
+ return nil
+}
+
+func (c *ChunkStreamReader) Close() {
+ // TODO try to release and reuse buffer
+}
+
+func VolumeId(fileId string) string {
+ lastCommaIndex := strings.LastIndex(fileId, ",")
+ if lastCommaIndex > 0 {
+ return fileId[:lastCommaIndex]
+ }
+ return fileId
+}
diff --git a/weed/filer2/topics.go b/weed/filer2/topics.go
new file mode 100644
index 000000000..9c6e5c88d
--- /dev/null
+++ b/weed/filer2/topics.go
@@ -0,0 +1,6 @@
+package filer2
+
+const (
+ TopicsDir = "/topics"
+ SystemLogDir = TopicsDir + "/.system/log"
+)