diff options
Diffstat (limited to 'weed/filer')
47 files changed, 266 insertions, 266 deletions
diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go index 1d175651d..b2a74fb74 100644 --- a/weed/filer/abstract_sql/abstract_sql_store.go +++ b/weed/filer/abstract_sql/abstract_sql_store.go @@ -5,7 +5,7 @@ import ( "database/sql" "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket" "github.com/seaweedfs/seaweedfs/weed/util" @@ -169,7 +169,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent if err != nil && strings.Contains(strings.ToLower(err.Error()), "duplicate entry") { // now the insert failed possibly due to duplication constraints sqlInsert = "falls back to update" - glog.V(1).Infof("insert %s %s: %v", entry.FullPath, sqlInsert, err) + log.V(2).Infof("insert %s %s: %v", entry.FullPath, sqlInsert, err) res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir) } if err != nil { @@ -277,7 +277,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat } } - glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath))) + log.V(-1).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath))) res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath)) if err != nil { return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) @@ -312,7 +312,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, var name string var data []byte if err = rows.Scan(&name, &data); err != nil { - glog.V(0).Infof("scan %s : %v", dirPath, err) + log.V(3).Infof("scan %s : %v", dirPath, err) return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err) } lastFileName = name @@ -321,7 +321,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, FullPath: util.NewFullPath(string(dirPath), name), } if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { - glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) + log.V(3).Infof("scan decode %s : %v", entry.FullPath, err) return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err) } diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go index 221902aaa..ef746feb9 100644 --- a/weed/filer/abstract_sql/abstract_sql_store_kv.go +++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go @@ -6,7 +6,7 @@ import ( "encoding/base64" "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" "strings" ) @@ -31,7 +31,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by } // now the insert failed possibly due to duplication constraints - glog.V(1).Infof("kv insert falls back to update: %s", err) + log.V(2).Infof("kv insert falls back to update: %s", err) res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr) if err != nil { diff --git a/weed/filer/arangodb/arangodb_store.go b/weed/filer/arangodb/arangodb_store.go index 457b5f28b..d1b77d36f 100644 --- a/weed/filer/arangodb/arangodb_store.go +++ b/weed/filer/arangodb/arangodb_store.go @@ -12,7 +12,7 @@ import ( "github.com/arangodb/go-driver" "github.com/arangodb/go-driver/http" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -233,7 +233,7 @@ func (store *ArangodbStore) FindEntry(ctx context.Context, fullpath util.FullPat if driver.IsNotFound(err) { return nil, filer_pb.ErrNotFound } - glog.Errorf("find %s: %v", fullpath, err) + log.Errorf("find %s: %v", fullpath, err) return nil, filer_pb.ErrNotFound } if len(data.Meta) == 0 { @@ -257,7 +257,7 @@ func (store *ArangodbStore) DeleteEntry(ctx context.Context, fullpath util.FullP } _, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath))) if err != nil && !driver.IsNotFound(err) { - glog.Errorf("find %s: %v", fullpath, err) + log.Errorf("find %s: %v", fullpath, err) return fmt.Errorf("delete %s : %v", fullpath, err) } return nil @@ -331,7 +331,7 @@ sort d.name asc converted := arrayToBytes(data.Meta) if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } diff --git a/weed/filer/arangodb/arangodb_store_bucket.go b/weed/filer/arangodb/arangodb_store_bucket.go index 44aeeadea..1beb3dd7c 100644 --- a/weed/filer/arangodb/arangodb_store_bucket.go +++ b/weed/filer/arangodb/arangodb_store_bucket.go @@ -7,7 +7,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) var _ filer.BucketAware = (*ArangodbStore)(nil) @@ -18,7 +18,7 @@ func (store *ArangodbStore) OnBucketCreation(bucket string) { // create the collection && add to cache _, err := store.ensureBucket(timeout, bucket) if err != nil { - glog.Errorf("bucket create %s: %v", bucket, err) + log.Errorf("bucket create %s: %v", bucket, err) } } func (store *ArangodbStore) OnBucketDeletion(bucket string) { @@ -26,12 +26,12 @@ func (store *ArangodbStore) OnBucketDeletion(bucket string) { defer cancel() collection, err := store.ensureBucket(timeout, bucket) if err != nil { - glog.Errorf("bucket delete %s: %v", bucket, err) + log.Errorf("bucket delete %s: %v", bucket, err) return } err = collection.Remove(timeout) if err != nil && !driver.IsNotFound(err) { - glog.Errorf("bucket delete %s: %v", bucket, err) + log.Errorf("bucket delete %s: %v", bucket, err) return } store.mu.Lock() diff --git a/weed/filer/arangodb/arangodb_store_kv.go b/weed/filer/arangodb/arangodb_store_kv.go index 2ca85ccce..4b0a29e01 100644 --- a/weed/filer/arangodb/arangodb_store_kv.go +++ b/weed/filer/arangodb/arangodb_store_kv.go @@ -6,7 +6,7 @@ import ( "github.com/arangodb/go-driver" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { @@ -38,7 +38,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte return nil, filer.ErrKvNotFound } if err != nil { - glog.Errorf("kv get: %s %v", string(key), err) + log.Errorf("kv get: %s %v", string(key), err) return nil, filer.ErrKvNotFound } return arrayToBytes(model.Meta), nil @@ -47,7 +47,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) { _, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key))) if err != nil { - glog.Errorf("kv del: %v", err) + log.Errorf("kv del: %v", err) return filer.ErrKvNotFound } return nil diff --git a/weed/filer/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go index 418812a47..84990acfc 100644 --- a/weed/filer/cassandra/cassandra_store.go +++ b/weed/filer/cassandra/cassandra_store.go @@ -8,7 +8,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -51,7 +51,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam } store.cluster.Keyspace = keyspace store.cluster.Timeout = time.Duration(timeout) * time.Millisecond - glog.V(0).Infof("timeout = %d", timeout) + log.V(3).Infof("timeout = %d", timeout) fallback := gocql.RoundRobinHostPolicy() if localDC != "" { fallback = gocql.DCAwareRoundRobinPolicy(localDC) @@ -61,7 +61,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam store.session, err = store.cluster.CreateSession() if err != nil { - glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) + log.V(3).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) } // set directory hash @@ -72,7 +72,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam dirHash := util.Md5String([]byte(dir))[:4] store.superLargeDirectoryHash[dir] = dirHash if existingDir, found := existingHash[dirHash]; found { - glog.Fatalf("directory %s has the same hash as %s", dir, existingDir) + log.Fatalf("directory %s has the same hash as %s", dir, existingDir) } existingHash[dirHash] = dir } @@ -202,7 +202,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u lastFileName = name if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { @@ -210,7 +210,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u } } if err = iter.Close(); err != nil { - glog.V(0).Infof("list iterator close: %v", err) + log.V(3).Infof("list iterator close: %v", err) } return lastFileName, err diff --git a/weed/filer/cassandra2/cassandra_store.go b/weed/filer/cassandra2/cassandra_store.go index d0578669b..fa0015365 100644 --- a/weed/filer/cassandra2/cassandra_store.go +++ b/weed/filer/cassandra2/cassandra_store.go @@ -8,7 +8,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -51,7 +51,7 @@ func (store *Cassandra2Store) initialize(keyspace string, hosts []string, userna } store.cluster.Keyspace = keyspace store.cluster.Timeout = time.Duration(timeout) * time.Millisecond - glog.V(0).Infof("timeout = %d", timeout) + log.V(3).Infof("timeout = %d", timeout) fallback := gocql.RoundRobinHostPolicy() if localDC != "" { fallback = gocql.DCAwareRoundRobinPolicy(localDC) @@ -61,7 +61,7 @@ func (store *Cassandra2Store) initialize(keyspace string, hosts []string, userna store.session, err = store.cluster.CreateSession() if err != nil { - glog.V(0).Infof("Failed to open cassandra2 store, hosts %v, keyspace %s", hosts, keyspace) + log.V(3).Infof("Failed to open cassandra2 store, hosts %v, keyspace %s", hosts, keyspace) } // set directory hash @@ -72,7 +72,7 @@ func (store *Cassandra2Store) initialize(keyspace string, hosts []string, userna dirHash := util.Md5String([]byte(dir))[:4] store.superLargeDirectoryHash[dir] = dirHash if existingDir, found := existingHash[dirHash]; found { - glog.Fatalf("directory %s has the same hash as %s", dir, existingDir) + log.Fatalf("directory %s has the same hash as %s", dir, existingDir) } existingHash[dirHash] = dir } @@ -202,7 +202,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath lastFileName = name if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { @@ -210,7 +210,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath } } if err = iter.Close(); err != nil { - glog.V(0).Infof("list iterator close: %v", err) + log.V(3).Infof("list iterator close: %v", err) } return lastFileName, err diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go index db4af1559..b8e6c1007 100644 --- a/weed/filer/configuration.go +++ b/weed/filer/configuration.go @@ -1,7 +1,7 @@ package filer import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" "os" "reflect" @@ -22,10 +22,10 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) { if config.GetBool(store.GetName() + ".enabled") { store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore) if err := store.Initialize(config, store.GetName()+"."); err != nil { - glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) + log.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) } isFresh = f.SetStore(store) - glog.V(0).Infof("configured filer store to %s", store.GetName()) + log.V(3).Infof("configured filer store to %s", store.GetName()) hasDefaultStoreConfigured = true break } @@ -70,16 +70,16 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) { store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore) if err := store.Initialize(config, key+"."); err != nil { - glog.Fatalf("Failed to initialize store for %s: %+v", key, err) + log.Fatalf("Failed to initialize store for %s: %+v", key, err) } location := config.GetString(key + ".location") if location == "" { - glog.Errorf("path-specific filer store needs %s", key+".location") + log.Errorf("path-specific filer store needs %s", key+".location") os.Exit(-1) } f.Store.AddPathSpecificStore(location, storeId, store) - glog.V(0).Infof("configure filer %s for %s", store.GetName(), location) + log.V(3).Infof("configure filer %s for %s", store.GetName(), location) } return @@ -92,7 +92,7 @@ func validateOneEnabledStore(config *util.ViperProxy) { if enabledStore == "" { enabledStore = store.GetName() } else { - glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName()) + log.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName()) } } } diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go index bf9d3394e..f1d153739 100644 --- a/weed/filer/elastic/v7/elastic_store.go +++ b/weed/filer/elastic/v7/elastic_store.go @@ -12,7 +12,7 @@ import ( jsoniter "github.com/json-iterator/go" elastic "github.com/olivere/elastic/v7" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -70,7 +70,7 @@ func (store *ElasticStore) Initialize(configuration weed_util.Configuration, pre if store.maxPageSize <= 0 { store.maxPageSize = 10000 } - glog.Infof("filer store elastic endpoints: %v.", servers) + log.Infof("filer store elastic endpoints: %v.", servers) return store.initialize(options) } @@ -113,7 +113,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) } value, err := jsoniter.Marshal(esEntry) if err != nil { - glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) return fmt.Errorf("insert entry marshal %v", err) } _, err = store.client.Index(). @@ -123,7 +123,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) BodyJson(string(value)). Do(ctx) if err != nil { - glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) return fmt.Errorf("insert entry %v", err) } return nil @@ -152,7 +152,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful err := jsoniter.Unmarshal(searchResult.Source, esEntry) return esEntry.Entry, err } - glog.Errorf("find entry(%s),%v.", string(fullpath), err) + log.Errorf("find entry(%s),%v.", string(fullpath), err) return nil, filer_pb.ErrNotFound } @@ -178,7 +178,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) { return nil } - glog.Errorf("delete index(%s) %v.", index, err) + log.Errorf("delete index(%s) %v.", index, err) return err } @@ -193,14 +193,14 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e return nil } } - glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err) + log.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err) return fmt.Errorf("delete entry %v", err) } func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { _, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool { if err := store.DeleteEntry(ctx, entry.FullPath); err != nil { - glog.Errorf("elastic delete %s: %v.", entry.FullPath, err) + log.Errorf("elastic delete %s: %v.", entry.FullPath, err) return false } return true @@ -228,7 +228,7 @@ func (store *ElasticStore) listDirectoryEntries( result := &elastic.SearchResult{} if (startFileName == "" && first) || inclusive { if result, err = store.search(ctx, index, parentId); err != nil { - glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + log.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) return } } else { @@ -238,7 +238,7 @@ func (store *ElasticStore) listDirectoryEntries( } after := weed_util.Md5String([]byte(fullPath)) if result, err = store.searchAfter(ctx, index, parentId, after); err != nil { - glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + log.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) return } } diff --git a/weed/filer/elastic/v7/elastic_store_kv.go b/weed/filer/elastic/v7/elastic_store_kv.go index 86262bc0f..00f807394 100644 --- a/weed/filer/elastic/v7/elastic_store_kv.go +++ b/weed/filer/elastic/v7/elastic_store_kv.go @@ -11,7 +11,7 @@ import ( jsoniter "github.com/json-iterator/go" elastic "github.com/olivere/elastic/v7" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) { @@ -25,7 +25,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) return nil } } - glog.Errorf("delete key(id:%s) %v.", string(key), err) + log.Errorf("delete key(id:%s) %v.", string(key), err) return fmt.Errorf("delete key %v", err) } @@ -44,7 +44,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte, return esEntry.Value, nil } } - glog.Errorf("find key(%s),%v.", string(key), err) + log.Errorf("find key(%s),%v.", string(key), err) return value, filer.ErrKvNotFound } @@ -52,7 +52,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte) esEntry := &ESKVEntry{value} val, err := jsoniter.Marshal(esEntry) if err != nil { - glog.Errorf("insert key(%s) %v.", string(key), err) + log.Errorf("insert key(%s) %v.", string(key), err) return fmt.Errorf("insert key %v", err) } _, err = store.client.Index(). diff --git a/weed/filer/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go index fa2a72ca5..878451cca 100644 --- a/weed/filer/etcd/etcd_store.go +++ b/weed/filer/etcd/etcd_store.go @@ -11,7 +11,7 @@ import ( "go.etcd.io/etcd/client/v3" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -73,7 +73,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix } func (store *EtcdStore) initialize(servers, username, password string, timeout time.Duration, tlsConfig *tls.Config) error { - glog.Infof("filer store etcd: %s", servers) + log.Infof("filer store etcd: %s", servers) client, err := clientv3.New(clientv3.Config{ Endpoints: strings.Split(servers, ","), @@ -95,7 +95,7 @@ func (store *EtcdStore) initialize(servers, username, password string, timeout t return fmt.Errorf("error checking etcd connection: %s", err) } - glog.V(0).Infof("сonnection to etcd has been successfully verified. etcd version: %s", resp.Version) + log.V(3).Infof("сonnection to etcd has been successfully verified. etcd version: %s", resp.Version) store.client = client return nil @@ -208,7 +208,7 @@ func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat } if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go index 36096d2c1..52c80cd57 100644 --- a/weed/filer/filechunk_manifest.go +++ b/weed/filer/filechunk_manifest.go @@ -12,7 +12,7 @@ import ( "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" @@ -105,7 +105,7 @@ func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, c func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error { urlStrings, err := lookupFileIdFn(fileId) if err != nil { - glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) + log.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) return err } err = retriedStreamFetchChunkData(bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0) @@ -118,7 +118,7 @@ func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFi func fetchChunkRange(buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool, offset int64) (int, error) { urlStrings, err := lookupFileIdFn(fileId) if err != nil { - glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) + log.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) return 0, err } return util_http.RetriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset) @@ -158,7 +158,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri break } if err != nil { - glog.V(0).Infof("read %s failed, err: %v", urlString, err) + log.V(3).Infof("read %s failed, err: %v", urlString, err) } else { break } @@ -168,7 +168,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri break } if err != nil && shouldRetry { - glog.V(0).Infof("retry reading in %v", waitTime) + log.V(3).Infof("retry reading in %v", waitTime) time.Sleep(waitTime) } else { break diff --git a/weed/filer/filechunks2_test.go b/weed/filer/filechunks2_test.go index dfa971f86..d723c6b1b 100644 --- a/weed/filer/filechunks2_test.go +++ b/weed/filer/filechunks2_test.go @@ -6,7 +6,7 @@ import ( "slices" "testing" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -80,6 +80,6 @@ func printChunks(name string, chunks []*filer_pb.FileChunk) { return int(a.Offset - b.Offset) }) for _, chunk := range chunks { - glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) + log.V(3).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) } } diff --git a/weed/filer/filer.go b/weed/filer/filer.go index acde49d54..204bf2b13 100644 --- a/weed/filer/filer.go +++ b/weed/filer/filer.go @@ -18,7 +18,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" @@ -92,7 +92,7 @@ func (f *Filer) MaybeBootstrapFromOnePeer(self pb.ServerAddress, existingNodes [ return } - glog.V(0).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFilerId) + log.V(3).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFilerId) return pb.WithFilerClient(false, f.UniqueFilerId, pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { return filer_pb.StreamBfs(client, "/", snapshotTime.UnixNano(), func(parentPath util.FullPath, entry *filer_pb.Entry) error { @@ -110,7 +110,7 @@ func (f *Filer) AggregateFromPeers(self pb.ServerAddress, existingNodes []*maste snapshot = append(snapshot, address) } f.Dlm.LockRing.SetSnapshot(snapshot) - glog.V(0).Infof("%s aggregate from peers %+v", self, snapshot) + log.V(3).Infof("%s aggregate from peers %+v", self, snapshot) f.MetaAggregator = NewMetaAggregator(f, self, f.GrpcDialOption) f.MasterClient.SetOnPeerUpdateFn(func(update *master_pb.ClusterNodeUpdate, startFrom time.Time) { @@ -150,15 +150,15 @@ func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) (isFresh bool) { storeIdBytes = make([]byte, 4) util.Uint32toBytes(storeIdBytes, uint32(f.Signature)) if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil { - glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err) + log.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err) } - glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature) + log.V(3).Infof("create %s to %d", FilerStoreId, f.Signature) return true } else if err == nil && len(storeIdBytes) == 4 { f.Signature = int32(util.BytesToUint32(storeIdBytes)) - glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature) + log.V(3).Infof("existing %s = %d", FilerStoreId, f.Signature) } else { - glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err) + log.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err) } return false } @@ -201,7 +201,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr /* if !hasWritePermission(lastDirectoryEntry, entry) { - glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d", + log.V(3).Infof("directory %s: %v, entry: uid=%d gid=%d", lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid) return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath) } @@ -216,19 +216,19 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr } } - glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name()) + log.V(-1).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name()) if err := f.Store.InsertEntry(ctx, entry); err != nil { - glog.Errorf("insert entry %s: %v", entry.FullPath, err) + log.Errorf("insert entry %s: %v", entry.FullPath, err) return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) } } else { if o_excl { - glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath) + log.V(0).Infof("EEXIST: entry %s already exists", entry.FullPath) return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) } - glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name()) + log.V(-1).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name()) if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { - glog.Errorf("update entry %s: %v", entry.FullPath, err) + log.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err) } } @@ -237,7 +237,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr f.deleteChunksIfNotNew(oldEntry, entry) - glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + log.V(-1).Infof("CreateEntry %s: created", entry.FullPath) return nil } @@ -252,7 +252,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di // fmt.Printf("%d dirPath: %+v\n", level, dirPath) // check the store directly - glog.V(4).Infof("find uncached directory: %s", dirPath) + log.V(-1).Infof("find uncached directory: %s", dirPath) dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath)) // no such existing directory @@ -287,11 +287,11 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di }, } - glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) + log.V(1).Infof("create directory: %s %v", dirPath, dirEntry.Mode) mkdirErr := f.Store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { if fEntry, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound || fEntry == nil { - glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) + log.V(0).Infof("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { @@ -301,7 +301,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di } } else if !dirEntry.IsDirectory() { - glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) + log.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) return fmt.Errorf("%s is a file", dirPath) } @@ -312,11 +312,11 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er if oldEntry != nil { entry.Attr.Crtime = oldEntry.Attr.Crtime if oldEntry.IsDirectory() && !entry.IsDirectory() { - glog.Errorf("existing %s is a directory", oldEntry.FullPath) + log.Errorf("existing %s is a directory", oldEntry.FullPath) return fmt.Errorf("existing %s is a directory", oldEntry.FullPath) } if !oldEntry.IsDirectory() && entry.IsDirectory() { - glog.Errorf("existing %s is a file", oldEntry.FullPath) + log.Errorf("existing %s is a file", oldEntry.FullPath) return fmt.Errorf("existing %s is a file", oldEntry.FullPath) } } diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go index e93279fba..be0a2a37f 100644 --- a/weed/filer/filer_conf.go +++ b/weed/filer/filer_conf.go @@ -10,7 +10,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/wdclient" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/viant/ptrie" @@ -68,7 +68,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) { if err == filer_pb.ErrNotFound { return nil } - glog.Errorf("read filer conf entry %s: %v", filerConfPath, err) + log.Errorf("read filer conf entry %s: %v", filerConfPath, err) return } @@ -83,7 +83,7 @@ func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*file if len(content) == 0 { content, err = filer.readEntry(chunks, size) if err != nil { - glog.Errorf("read filer conf content: %v", err) + log.Errorf("read filer conf content: %v", err) return } } @@ -119,7 +119,7 @@ func (fc *FilerConf) GetLocationConf(locationPrefix string) (locConf *filer_pb.F func (fc *FilerConf) SetLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) { err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf) if err != nil { - glog.Errorf("put location prefix: %v", err) + log.Errorf("put location prefix: %v", err) } return } @@ -132,7 +132,7 @@ func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err } err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf) if err != nil { - glog.Errorf("put location prefix: %v", err) + log.Errorf("put location prefix: %v", err) } return } diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go index 0ae421981..96840281f 100644 --- a/weed/filer/filer_delete_entry.go +++ b/weed/filer/filer_delete_entry.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -41,7 +41,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR return nil }) if err != nil { - glog.V(2).Infof("delete directory %s: %v", p, err) + log.V(1).Infof("delete directory %s: %v", p, err) return fmt.Errorf("delete directory %s: %v", p, err) } } @@ -74,12 +74,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry for { entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "") if err != nil { - glog.Errorf("list folder %s: %v", entry.FullPath, err) + log.Errorf("list folder %s: %v", entry.FullPath, err) return fmt.Errorf("list folder %s: %v", entry.FullPath, err) } if lastFileName == "" && !isRecursive && len(entries) > 0 { // only for first iteration in the loop - glog.V(2).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) + log.V(1).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath) } @@ -110,7 +110,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry } } - glog.V(3).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks) + log.V(0).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks) if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { return fmt.Errorf("filer store delete: %v", storeDeletionErr) @@ -124,7 +124,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) { - glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) + log.V(0).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil { return fmt.Errorf("filer store delete: %v", storeDeletionErr) @@ -143,7 +143,7 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) { Name: collectionName, }) if err != nil { - glog.Infof("delete collection %s: %v", collectionName, err) + log.Infof("delete collection %s: %v", collectionName, err) } return err }) @@ -153,7 +153,7 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) { func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) { for _, hardLinkId := range hardLinkIds { if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil { - glog.Errorf("delete hard link id %d : %v", hardLinkId, err) + log.Errorf("delete hard link id %d : %v", hardLinkId, err) } } } diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go index 362c7c51b..1ad60c2dd 100644 --- a/weed/filer/filer_deletion.go +++ b/weed/filer/filer_deletion.go @@ -7,7 +7,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/wdclient" @@ -58,10 +58,10 @@ func (f *Filer) loopProcessingDeletion() { _, err := operation.DeleteFileIdsWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) if err != nil { if !strings.Contains(err.Error(), storage.ErrorDeleted.Error()) { - glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + log.V(3).Infof("deleting fileIds len=%d error: %v", deletionCount, err) } } else { - glog.V(2).Infof("deleting fileIds %+v", toDeleteFileIds) + log.V(1).Infof("deleting fileIds %+v", toDeleteFileIds) } } }) @@ -92,7 +92,7 @@ func (f *Filer) doDeleteChunks(chunks []*filer_pb.FileChunk) { } dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) if manifestResolveErr != nil { - glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) + log.V(3).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) } for _, dChunk := range dataChunks { f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString()) @@ -118,7 +118,7 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { toDelete, err := MinusChunks(f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks) if err != nil { - glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks) + log.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks) return } f.DeleteChunksNotRecursive(toDelete) diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go index 4c99da72a..165f7d52a 100644 --- a/weed/filer/filer_notify.go +++ b/weed/filer/filer_notify.go @@ -11,7 +11,7 @@ import ( "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/notification" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -56,10 +56,10 @@ func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry } if notification.Queue != nil { - glog.V(3).Infof("notifying entry update %v", fullpath) + log.V(0).Infof("notifying entry update %v", fullpath) if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil { // throw message - glog.Error(err) + log.Error(err) } } @@ -78,7 +78,7 @@ func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotifica } data, err := proto.Marshal(event) if err != nil { - glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + log.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err) return } @@ -101,7 +101,7 @@ func (f *Filer) logFlushFunc(logBuffer *log_buffer.LogBuffer, startTime, stopTim for { if err := f.appendToFile(targetFile, buf); err != nil { - glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err) + log.V(3).Infof("metadata log write failed %s: %v", targetFile, err) time.Sleep(737 * time.Millisecond) } else { break diff --git a/weed/filer/filer_notify_read.go b/weed/filer/filer_notify_read.go index ac2c763e6..e47108348 100644 --- a/weed/filer/filer_notify_read.go +++ b/weed/filer/filer_notify_read.go @@ -14,7 +14,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/wdclient" "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -217,7 +217,7 @@ func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) { // println(" enqueue", tsMinute) t, parseErr := time.Parse("2006-01-02-15-04", tsMinute) if parseErr != nil { - glog.Errorf("failed to parse %s: %v", tsMinute, parseErr) + log.Errorf("failed to parse %s: %v", tsMinute, parseErr) continue } filerId := getFilerId(hourMinuteEntry.Name()) @@ -237,7 +237,7 @@ func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) { for filerId, entryName := range freshFilerIds { iter, found := v.perFilerIteratorMap[filerId] if !found { - glog.Errorf("Unexpected! failed to find iterator for filer %s", filerId) + log.Errorf("Unexpected! failed to find iterator for filer %s", filerId) continue } next, nextErr := iter.getNext(v) diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go index 6cec80148..680eb8c86 100644 --- a/weed/filer/filer_on_meta_event.go +++ b/weed/filer/filer_on_meta_event.go @@ -2,7 +2,7 @@ package filer import ( "bytes" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -43,7 +43,7 @@ func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataR return } - glog.V(0).Infof("procesing %v", event) + log.V(3).Infof("procesing %v", event) if entry.Name == FilerConfName { f.reloadFilerConfiguration(entry) } @@ -62,7 +62,7 @@ func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) { fc := NewFilerConf() err := fc.loadFromChunks(f, entry.Content, entry.GetChunks(), FileSize(entry)) if err != nil { - glog.Errorf("read filer conf chunks: %v", err) + log.Errorf("read filer conf chunks: %v", err) return } f.FilerConf = fc @@ -74,7 +74,7 @@ func (f *Filer) LoadFilerConf() { return fc.loadFromFiler(f) }) if err != nil { - glog.Errorf("read filer conf: %v", err) + log.Errorf("read filer conf: %v", err) return } f.FilerConf = fc @@ -85,7 +85,7 @@ func (f *Filer) LoadFilerConf() { // ////////////////////////////////// func (f *Filer) LoadRemoteStorageConfAndMapping() { if err := f.RemoteStorage.LoadRemoteStorageConfigurationsAndMapping(f); err != nil { - glog.Errorf("read remote conf and mapping: %v", err) + log.Errorf("read remote conf and mapping: %v", err) return } } diff --git a/weed/filer/filerstore_hardlink.go b/weed/filer/filerstore_hardlink.go index 12402e82f..b469731be 100644 --- a/weed/filer/filerstore_hardlink.go +++ b/weed/filer/filerstore_hardlink.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -22,7 +22,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry } // check what is existing entry - // glog.V(4).Infof("handleUpdateToHardLinks FindEntry %s", entry.FullPath) + // log.V(-1).Infof("handleUpdateToHardLinks FindEntry %s", entry.FullPath) actualStore := fsw.getActualStore(entry.FullPath) existingEntry, err := actualStore.FindEntry(ctx, entry.FullPath) if err != nil && err != filer_pb.ErrNotFound { @@ -31,7 +31,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry // remove old hard link if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 { - glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath) + log.V(-1).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath) if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { return err } @@ -50,7 +50,7 @@ func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) err return encodeErr } - glog.V(4).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) + log.V(-1).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) return fsw.KvPut(ctx, key, newBlob) } @@ -63,16 +63,16 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr value, err := fsw.KvGet(ctx, key) if err != nil { - glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + log.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) return err } if err = entry.DecodeAttributesAndChunks(value); err != nil { - glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + log.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) return err } - glog.V(4).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) + log.V(-1).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) return nil } @@ -94,7 +94,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har entry.HardLinkCounter-- if entry.HardLinkCounter <= 0 { - glog.V(4).Infof("DeleteHardLink KvDelete %v", key) + log.V(-1).Infof("DeleteHardLink KvDelete %v", key) return fsw.KvDelete(ctx, key) } @@ -103,7 +103,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har return encodeErr } - glog.V(4).Infof("DeleteHardLink KvPut %v", key) + log.V(-1).Infof("DeleteHardLink KvPut %v", key) return fsw.KvPut(ctx, key, newBlob) } diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go index ebaf04065..ac22e416d 100644 --- a/weed/filer/filerstore_wrapper.go +++ b/weed/filer/filerstore_wrapper.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/viant/ptrie" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" @@ -79,7 +79,7 @@ func (fsw *FilerStoreWrapper) AddPathSpecificStore(path string, storeId string, fsw.storeIdToStore[storeId] = NewFilerStorePathTranslator(path, store) err := fsw.pathToStore.Put([]byte(path), storeId) if err != nil { - glog.Fatalf("put path specific store: %v", err) + log.Fatalf("put path specific store: %v", err) } } @@ -128,7 +128,7 @@ func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) err return err } - // glog.V(4).Infof("InsertEntry %s", entry.FullPath) + // log.V(-1).Infof("InsertEntry %s", entry.FullPath) return actualStore.InsertEntry(ctx, entry) } @@ -149,7 +149,7 @@ func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) err return err } - // glog.V(4).Infof("UpdateEntry %s", entry.FullPath) + // log.V(-1).Infof("UpdateEntry %s", entry.FullPath) return actualStore.UpdateEntry(ctx, entry) } @@ -162,7 +162,7 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) ( }() entry, err = actualStore.FindEntry(ctx, fp) - // glog.V(4).Infof("FindEntry %s: %v", fp, err) + // log.V(-1).Infof("FindEntry %s: %v", fp, err) if err != nil { if fsw.CanDropWholeBucket() && strings.Contains(err.Error(), "Table") && strings.Contains(err.Error(), "doesn't exist") { err = filer_pb.ErrNotFound @@ -192,14 +192,14 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) // remove hard link op := ctx.Value("OP") if op != "MV" { - glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath) + log.V(-1).Infof("DeleteHardLink %s", existingEntry.FullPath) if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { return err } } } - // glog.V(4).Infof("DeleteEntry %s", fp) + // log.V(-1).Infof("DeleteEntry %s", fp) return actualStore.DeleteEntry(ctx, fp) } @@ -215,14 +215,14 @@ func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry // remove hard link op := ctx.Value("OP") if op != "MV" { - glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath) + log.V(-1).Infof("DeleteHardLink %s", existingEntry.FullPath) if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { return err } } } - // glog.V(4).Infof("DeleteOneEntry %s", existingEntry.FullPath) + // log.V(-1).Infof("DeleteOneEntry %s", existingEntry.FullPath) return actualStore.DeleteEntry(ctx, existingEntry.FullPath) } @@ -234,7 +234,7 @@ func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util. stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds()) }() - // glog.V(4).Infof("DeleteFolderChildren %s", fp) + // log.V(-1).Infof("DeleteFolderChildren %s", fp) return actualStore.DeleteFolderChildren(ctx, fp) } @@ -246,7 +246,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "list").Observe(time.Since(start).Seconds()) }() - // glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit) + // log.V(-1).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit) return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool { fsw.maybeReadHardLink(ctx, entry) filer_pb.AfterEntryDeserialization(entry.GetChunks()) @@ -264,7 +264,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, if limit > math.MaxInt32-1 { limit = math.MaxInt32 - 1 } - // glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit) + // log.V(-1).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit) adjustedEntryFunc := func(entry *Entry) bool { fsw.maybeReadHardLink(ctx, entry) filer_pb.AfterEntryDeserialization(entry.GetChunks()) diff --git a/weed/filer/hbase/hbase_store.go b/weed/filer/hbase/hbase_store.go index 1a0e3c893..5915580c4 100644 --- a/weed/filer/hbase/hbase_store.go +++ b/weed/filer/hbase/hbase_store.go @@ -5,7 +5,7 @@ import ( "context" "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/tsuna/gohbase" @@ -203,7 +203,7 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa } if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go index 7960bf476..5ae85c453 100644 --- a/weed/filer/leveldb/leveldb_store.go +++ b/weed/filer/leveldb/leveldb_store.go @@ -13,7 +13,7 @@ import ( "os" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -44,7 +44,7 @@ func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, pre } func (store *LevelDBStore) initialize(dir string) (err error) { - glog.V(0).Infof("filer store dir: %s", dir) + log.V(3).Infof("filer store dir: %s", dir) os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) @@ -61,7 +61,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) { store.db, err = leveldb.RecoverFile(dir, opts) } if err != nil { - glog.Infof("filer store open dir %s: %v", dir, err) + log.Infof("filer store open dir %s: %v", dir, err) return } } @@ -205,7 +205,7 @@ func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir } if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go index b465046f9..5a2902ee0 100644 --- a/weed/filer/leveldb2/leveldb2_store.go +++ b/weed/filer/leveldb2/leveldb2_store.go @@ -15,7 +15,7 @@ import ( leveldb_util "github.com/syndtr/goleveldb/leveldb/util" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -40,7 +40,7 @@ func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, pr } func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { - glog.Infof("filer store leveldb2 dir: %s", dir) + log.Infof("filer store leveldb2 dir: %s", dir) os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) @@ -61,7 +61,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { db, dbErr = leveldb.RecoverFile(dbFolder, opts) } if dbErr != nil { - glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) + log.Errorf("filer store open dir %s: %v", dbFolder, dbErr) return dbErr } store.dbs = append(store.dbs, db) @@ -213,7 +213,7 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/leveldb3/leveldb3_store.go b/weed/filer/leveldb3/leveldb3_store.go index 2522221da..06e3eb776 100644 --- a/weed/filer/leveldb3/leveldb3_store.go +++ b/weed/filer/leveldb3/leveldb3_store.go @@ -17,7 +17,7 @@ import ( leveldb_util "github.com/syndtr/goleveldb/leveldb/util" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -47,7 +47,7 @@ func (store *LevelDB3Store) Initialize(configuration weed_util.Configuration, pr } func (store *LevelDB3Store) initialize(dir string) (err error) { - glog.Infof("filer store leveldb3 dir: %s", dir) + log.Infof("filer store leveldb3 dir: %s", dir) os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) @@ -88,7 +88,7 @@ func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) { db, dbErr = leveldb.RecoverFile(dbFolder, opts) } if dbErr != nil { - glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) + log.Errorf("filer store open dir %s: %v", dbFolder, dbErr) return nil, dbErr } return db, nil @@ -342,7 +342,7 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go index 976822ad1..9ce94ba05 100644 --- a/weed/filer/meta_aggregator.go +++ b/weed/filer/meta_aggregator.go @@ -14,7 +14,7 @@ import ( "google.golang.org/grpc" "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" @@ -73,23 +73,23 @@ func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, star func (ma *MetaAggregator) loopSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time, stopChan chan struct{}) { lastTsNs := startFrom.UnixNano() for { - glog.V(0).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs) + log.V(3).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs) nextLastTsNs, err := ma.doSubscribeToOneFiler(f, self, peer, lastTsNs) // check stopChan to see if we should stop select { case <-stopChan: - glog.V(0).Infof("stop subscribing peer %s meta change", peer) + log.V(3).Infof("stop subscribing peer %s meta change", peer) return default: } if err != nil { - errLvl := glog.Level(0) + errLvl := log.Level(0) if strings.Contains(err.Error(), "duplicated local subscription detected") { - errLvl = glog.Level(4) + errLvl = log.Level(4) } - glog.V(errLvl).Infof("subscribing remote %s meta change: %v", peer, err) + log.V(errLvl).Infof("subscribing remote %s meta change: %v", peer, err) } if lastTsNs < nextLastTsNs { lastTsNs = nextLastTsNs @@ -126,35 +126,35 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, defer func(prevTsNs int64) { if lastTsNs != prevTsNs && lastTsNs != lastPersistTime.UnixNano() { if err := ma.updateOffset(f, peer, peerSignature, lastTsNs); err == nil { - glog.V(0).Infof("last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) + log.V(3).Infof("last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) } else { - glog.Errorf("failed to save last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) + log.Errorf("failed to save last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) } } }(prevTsNs) } - glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) + log.V(3).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) var counter int64 var synced bool maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) { if err := Replay(f.Store, event); err != nil { - glog.Errorf("failed to reply metadata change from %v: %v", peer, err) + log.Errorf("failed to reply metadata change from %v: %v", peer, err) return } counter++ if lastPersistTime.Add(time.Minute).Before(time.Now()) { if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil { if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() { - glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0) + log.V(3).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0) } else if !synced { synced = true - glog.V(0).Infof("synced with %s", peer) + log.V(3).Infof("synced with %s", peer) } lastPersistTime = time.Now() counter = 0 } else { - glog.V(0).Infof("failed to update offset for %v: %v", peer, err) + log.V(3).Infof("failed to update offset for %v: %v", peer, err) } } } @@ -163,7 +163,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error { data, err := proto.Marshal(event) if err != nil { - glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + log.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err) return err } dir := event.Directory @@ -175,7 +175,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, return nil } - glog.V(0).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId) + log.V(3).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId) err = pb.WithFilerClient(true, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -188,7 +188,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, ClientEpoch: atomic.LoadInt32(&ma.filer.UniqueFilerEpoch), }) if err != nil { - glog.V(0).Infof("SubscribeLocalMetadata %v: %v", peer, err) + log.V(3).Infof("SubscribeLocalMetadata %v: %v", peer, err) return fmt.Errorf("subscribe: %v", err) } @@ -198,12 +198,12 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, return nil } if listenErr != nil { - glog.V(0).Infof("SubscribeLocalMetadata stream %v: %v", peer, listenErr) + log.V(3).Infof("SubscribeLocalMetadata stream %v: %v", peer, listenErr) return listenErr } if err := processEventFn(resp); err != nil { - glog.V(0).Infof("SubscribeLocalMetadata process %v: %v", resp, err) + log.V(3).Infof("SubscribeLocalMetadata process %v: %v", resp, err) return fmt.Errorf("process %v: %v", resp, err) } @@ -248,7 +248,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignat lastTsNs = int64(util.BytesToUint64(value)) - glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs) + log.V(3).Infof("readOffset %s : %d", peer, lastTsNs) return } @@ -266,7 +266,7 @@ func (ma *MetaAggregator) updateOffset(f *Filer, peer pb.ServerAddress, peerSign return fmt.Errorf("updateOffset %s : %v", peer, err) } - glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs) + log.V(-1).Infof("updateOffset %s : %d", peer, lastTsNs) return } diff --git a/weed/filer/meta_replay.go b/weed/filer/meta_replay.go index f6b009e92..26f5d65dd 100644 --- a/weed/filer/meta_replay.go +++ b/weed/filer/meta_replay.go @@ -4,7 +4,7 @@ import ( "context" "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -15,7 +15,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err var newEntry *Entry if message.OldEntry != nil { oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name) - glog.V(4).Infof("deleting %v", oldPath) + log.V(-1).Infof("deleting %v", oldPath) if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil { return err } @@ -27,7 +27,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err dir = message.NewParentPath } key := util.NewFullPath(dir, message.NewEntry.Name) - glog.V(4).Infof("creating %v", key) + log.V(-1).Infof("creating %v", key) newEntry = FromPbEntry(dir, message.NewEntry) if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil { return err diff --git a/weed/filer/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go index c05ed20f0..94dc6928e 100644 --- a/weed/filer/mongodb/mongodb_store.go +++ b/weed/filer/mongodb/mongodb_store.go @@ -10,7 +10,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "go.mongodb.org/mongo-driver/bson" @@ -187,7 +187,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath var where = bson.M{"directory": dir, "name": name} err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) if err != mongo.ErrNoDocuments && err != nil { - glog.Errorf("find %s: %v", fullpath, err) + log.Errorf("find %s: %v", fullpath, err) return nil, filer_pb.ErrNotFound } @@ -264,7 +264,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir lastFileName = data.Name if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } @@ -275,7 +275,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir } if err := cur.Close(ctx); err != nil { - glog.V(0).Infof("list iterator close: %v", err) + log.V(3).Infof("list iterator close: %v", err) } return lastFileName, err diff --git a/weed/filer/mongodb/mongodb_store_kv.go b/weed/filer/mongodb/mongodb_store_kv.go index f52dbfc03..0df4cbdd5 100644 --- a/weed/filer/mongodb/mongodb_store_kv.go +++ b/weed/filer/mongodb/mongodb_store_kv.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" @@ -37,7 +37,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte, var where = bson.M{"directory": dir, "name": name} err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) if err != mongo.ErrNoDocuments && err != nil { - glog.Errorf("kv get: %v", err) + log.Errorf("kv get: %v", err) return nil, filer.ErrKvNotFound } diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go index b87fa0411..24d1d81b3 100644 --- a/weed/filer/reader_at.go +++ b/weed/filer/reader_at.go @@ -7,7 +7,7 @@ import ( "math/rand" "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/wdclient" @@ -47,7 +47,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp locations = resp.LocationsMap[vid] if locations == nil || len(locations.Locations) == 0 { - glog.V(0).Infof("failed to locate %s", fileId) + log.V(3).Infof("failed to locate %s", fileId) return fmt.Errorf("failed to locate %s", fileId) } vicCacheLock.Lock() @@ -113,7 +113,7 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { c.chunkViews.Lock.RLock() defer c.chunkViews.Lock.RUnlock() - // glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) + // log.V(-1).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) n, _, err = c.doReadAt(p, offset) return } @@ -125,7 +125,7 @@ func (c *ChunkReadAt) ReadAtWithTime(p []byte, offset int64) (n int, ts int64, e c.chunkViews.Lock.RLock() defer c.chunkViews.Lock.RUnlock() - // glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) + // log.V(-1).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) return c.doReadAt(p, offset) } @@ -143,7 +143,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err } if startOffset < chunk.ViewOffset { gap := chunk.ViewOffset - startOffset - glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.ViewOffset) + log.V(-1).Infof("zero [%d,%d)", startOffset, chunk.ViewOffset) n += zero(p, startOffset-offset, gap) startOffset, remaining = chunk.ViewOffset, remaining-gap if remaining <= 0 { @@ -155,12 +155,12 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err if chunkStart >= chunkStop { continue } - // glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.ViewOffset-chunk.Offset, chunk.ViewOffset-chunk.Offset+int64(chunk.ViewSize)) + // log.V(-1).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.ViewOffset-chunk.Offset, chunk.ViewOffset-chunk.Offset+int64(chunk.ViewSize)) bufferOffset := chunkStart - chunk.ViewOffset + chunk.OffsetInChunk ts = chunk.ModifiedTsNs copied, err := c.readChunkSliceAt(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], chunk, nextChunks, uint64(bufferOffset)) if err != nil { - glog.Errorf("fetching chunk %+v: %v\n", chunk, err) + log.Errorf("fetching chunk %+v: %v\n", chunk, err) return copied, ts, err } @@ -168,7 +168,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err startOffset, remaining = startOffset+int64(copied), remaining-int64(copied) } - // glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err) + // log.V(-1).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err) // zero the remaining bytes if a gap exists at the end of the last chunk (or a fully sparse file) if err == nil && remaining > 0 { @@ -178,7 +178,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err startOffset -= offset } if delta > 0 { - glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize) + log.V(-1).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize) n += zero(p, startOffset, delta) } } diff --git a/weed/filer/reader_cache.go b/weed/filer/reader_cache.go index 2ef81a931..53308f466 100644 --- a/weed/filer/reader_cache.go +++ b/weed/filer/reader_cache.go @@ -6,7 +6,7 @@ import ( "sync/atomic" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" "github.com/seaweedfs/seaweedfs/weed/util/mem" @@ -63,7 +63,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) { continue } if rc.chunkCache.IsInCache(chunkView.FileId, true) { - glog.V(4).Infof("%s is in cache", chunkView.FileId) + log.V(-1).Infof("%s is in cache", chunkView.FileId) continue } @@ -72,7 +72,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) { return } - // glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset) + // log.V(-1).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset) // cache this chunk if not yet shouldCache := (uint64(chunkView.ViewOffset) + chunkView.ChunkSize) <= rc.chunkCache.GetMaxFilePartSizeInCache() cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), shouldCache) @@ -118,7 +118,7 @@ func (rc *ReaderCache) ReadChunkAt(buffer []byte, fileId string, cipherKey []byt } } - // glog.V(4).Infof("cache1 %s", fileId) + // log.V(-1).Infof("cache1 %s", fileId) cacher := newSingleChunkCacher(rc, fileId, cipherKey, isGzipped, chunkSize, shouldCache) go cacher.startCaching() @@ -132,7 +132,7 @@ func (rc *ReaderCache) ReadChunkAt(buffer []byte, fileId string, cipherKey []byt func (rc *ReaderCache) UnCache(fileId string) { rc.Lock() defer rc.Unlock() - // glog.V(4).Infof("uncache %s", fileId) + // log.V(-1).Infof("uncache %s", fileId) if downloader, found := rc.downloaders[fileId]; found { downloader.destroy() delete(rc.downloaders, fileId) diff --git a/weed/filer/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go index 5e7c850d0..2fa377512 100644 --- a/weed/filer/redis/universal_redis_store.go +++ b/weed/filer/redis/universal_redis_store.go @@ -10,7 +10,7 @@ import ( "github.com/redis/go-redis/v9" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -179,7 +179,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).Infof("list %s : %v", path, err) + log.V(3).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { continue } diff --git a/weed/filer/redis2/redis_store.go b/weed/filer/redis2/redis_store.go index 5e7bc019e..8f6fbeadf 100644 --- a/weed/filer/redis2/redis_store.go +++ b/weed/filer/redis2/redis_store.go @@ -8,7 +8,7 @@ import ( "github.com/redis/go-redis/v9" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -41,22 +41,22 @@ func (store *Redis2Store) initialize(hostPort string, password string, database if enableMtls { clientCert, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) if err != nil { - glog.Fatalf("Error loading client certificate and key pair: %v", err) + log.Fatalf("Error loading client certificate and key pair: %v", err) } caCertBytes, err := os.ReadFile(caCertPath) if err != nil { - glog.Fatalf("Error reading CA certificate file: %v", err) + log.Fatalf("Error reading CA certificate file: %v", err) } caCertPool := x509.NewCertPool() if ok := caCertPool.AppendCertsFromPEM(caCertBytes); !ok { - glog.Fatalf("Error appending CA certificate to pool") + log.Fatalf("Error appending CA certificate to pool") } redisHost, _, err := net.SplitHostPort(hostPort) if err != nil { - glog.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err) + log.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err) } tlsConfig := &tls.Config{ diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go index d3f01f88a..5a956e9f5 100644 --- a/weed/filer/redis2/universal_redis_store.go +++ b/weed/filer/redis2/universal_redis_store.go @@ -8,7 +8,7 @@ import ( "github.com/redis/go-redis/v9" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -194,7 +194,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dir entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).Infof("list %s : %v", path, err) + log.V(3).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { continue } diff --git a/weed/filer/redis3/item_list_serde.go b/weed/filer/redis3/item_list_serde.go index f4410b61b..f394c7004 100644 --- a/weed/filer/redis3/item_list_serde.go +++ b/weed/filer/redis3/item_list_serde.go @@ -2,7 +2,7 @@ package redis3 import ( "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util/skiplist" "google.golang.org/protobuf/proto" ) @@ -22,7 +22,7 @@ func LoadItemList(data []byte, prefix string, client redis.UniversalClient, stor message := &skiplist.SkipListProto{} if err := proto.Unmarshal(data, message); err != nil { - glog.Errorf("loading skiplist: %v", err) + log.Errorf("loading skiplist: %v", err) } nl.skipList.MaxNewLevel = int(message.MaxNewLevel) nl.skipList.MaxLevel = int(message.MaxLevel) @@ -69,7 +69,7 @@ func (nl *ItemList) ToBytes() []byte { } data, err := proto.Marshal(message) if err != nil { - glog.Errorf("marshal skiplist: %v", err) + log.Errorf("marshal skiplist: %v", err) } return data } diff --git a/weed/filer/redis3/kv_directory_children.go b/weed/filer/redis3/kv_directory_children.go index 5a2d76141..9738b80c4 100644 --- a/weed/filer/redis3/kv_directory_children.go +++ b/weed/filer/redis3/kv_directory_children.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) const maxNameBatchSizeLimit = 1000000 @@ -31,7 +31,7 @@ func insertChild(ctx context.Context, redisStore *UniversalRedis3Store, key stri nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit) if err := nameList.WriteName(name); err != nil { - glog.Errorf("add %s %s: %v", key, name, err) + log.Errorf("add %s %s: %v", key, name, err) return err } @@ -100,7 +100,7 @@ func removeChildren(ctx context.Context, redisStore *UniversalRedis3Store, key s if err = nameList.ListNames("", func(name string) bool { if err := onDeleteFn(name); err != nil { - glog.Errorf("delete %s child %s: %v", key, name, err) + log.Errorf("delete %s child %s: %v", key, name, err) return false } return true diff --git a/weed/filer/redis3/redis_store.go b/weed/filer/redis3/redis_store.go index 3bb0ce46f..d5c4f9b87 100644 --- a/weed/filer/redis3/redis_store.go +++ b/weed/filer/redis3/redis_store.go @@ -10,7 +10,7 @@ import ( "github.com/go-redsync/redsync/v4/redis/goredis/v9" "github.com/redis/go-redis/v9" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -42,22 +42,22 @@ func (store *Redis3Store) initialize(hostPort string, password string, database if enableMtls { clientCert, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) if err != nil { - glog.Fatalf("Error loading client certificate and key pair: %v", err) + log.Fatalf("Error loading client certificate and key pair: %v", err) } caCertBytes, err := os.ReadFile(caCertPath) if err != nil { - glog.Fatalf("Error reading CA certificate file: %v", err) + log.Fatalf("Error reading CA certificate file: %v", err) } caCertPool := x509.NewCertPool() if ok := caCertPool.AppendCertsFromPEM(caCertBytes); !ok { - glog.Fatalf("Error appending CA certificate to pool") + log.Fatalf("Error appending CA certificate to pool") } redisHost, _, err := net.SplitHostPort(hostPort) if err != nil { - glog.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err) + log.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err) } tlsConfig := &tls.Config{ diff --git a/weed/filer/redis3/skiplist_element_store.go b/weed/filer/redis3/skiplist_element_store.go index 46506187e..551aa463d 100644 --- a/weed/filer/redis3/skiplist_element_store.go +++ b/weed/filer/redis3/skiplist_element_store.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util/skiplist" "google.golang.org/protobuf/proto" ) @@ -27,7 +27,7 @@ func (m *SkipListElementStore) SaveElement(id int64, element *skiplist.SkipListE key := fmt.Sprintf("%s%d", m.Prefix, id) data, err := proto.Marshal(element) if err != nil { - glog.Errorf("marshal %s: %v", key, err) + log.Errorf("marshal %s: %v", key, err) } return m.client.Set(context.Background(), key, data, 0).Err() } diff --git a/weed/filer/redis3/universal_redis_store.go b/weed/filer/redis3/universal_redis_store.go index 51675d971..c0d0ef7af 100644 --- a/weed/filer/redis3/universal_redis_store.go +++ b/weed/filer/redis3/universal_redis_store.go @@ -9,7 +9,7 @@ import ( redsync "github.com/go-redsync/redsync/v4" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -151,7 +151,7 @@ func (store *UniversalRedis3Store) ListDirectoryEntries(ctx context.Context, dir entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).Infof("list %s : %v", path, err) + log.V(3).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { return true } diff --git a/weed/filer/redis_lua/universal_redis_store.go b/weed/filer/redis_lua/universal_redis_store.go index 9e8dbcda7..c45200b4e 100644 --- a/weed/filer/redis_lua/universal_redis_store.go +++ b/weed/filer/redis_lua/universal_redis_store.go @@ -9,7 +9,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer/redis_lua/stored_procedure" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -162,7 +162,7 @@ func (store *UniversalRedisLuaStore) ListDirectoryEntries(ctx context.Context, d entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).Infof("list %s : %v", path, err) + log.V(3).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { continue } diff --git a/weed/filer/remote_storage.go b/weed/filer/remote_storage.go index 3764fbac6..002840d52 100644 --- a/weed/filer/remote_storage.go +++ b/weed/filer/remote_storage.go @@ -12,7 +12,7 @@ import ( "math" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/viant/ptrie" ) @@ -43,7 +43,7 @@ func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *F if err == filer_pb.ErrNotFound { return nil } - glog.Errorf("read remote storage %s: %v", DirectoryEtcRemote, err) + log.Errorf("read remote storage %s: %v", DirectoryEtcRemote, err) return } @@ -125,7 +125,7 @@ func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *remote_pb.Remo } if len(oldContent) > 0 { if err = proto.Unmarshal(oldContent, mappings); err != nil { - glog.Warningf("unmarshal existing mappings: %v", err) + log.Warningf("unmarshal existing mappings: %v", err) } } return diff --git a/weed/filer/rocksdb/rocksdb_store.go b/weed/filer/rocksdb/rocksdb_store.go index f860f528a..489144f48 100644 --- a/weed/filer/rocksdb/rocksdb_store.go +++ b/weed/filer/rocksdb/rocksdb_store.go @@ -14,7 +14,7 @@ import ( gorocksdb "github.com/linxGnu/grocksdb" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -60,7 +60,7 @@ func (store *RocksDBStore) Initialize(configuration weed_util.Configuration, pre } func (store *RocksDBStore) initialize(dir string) (err error) { - glog.Infof("filer store rocksdb dir: %s", dir) + log.Infof("filer store rocksdb dir: %s", dir) os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) @@ -262,7 +262,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) return false } if !eachEntryFunc(entry) { diff --git a/weed/filer/stream.go b/weed/filer/stream.go index 2f55e3e44..f6a079a23 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -11,7 +11,7 @@ import ( "slices" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/util" @@ -81,7 +81,7 @@ func noJwtFunc(string) string { } func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) { - glog.V(4).Infof("prepare to stream content for chunks: %d", len(chunks)) + log.V(-1).Infof("prepare to stream content for chunks: %d", len(chunks)) chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size) fileId2Url := make(map[string][]string) @@ -95,15 +95,15 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc if err == nil && len(urlStrings) > 0 { break } - glog.V(4).Infof("waiting for chunk: %s", chunkView.FileId) + log.V(-1).Infof("waiting for chunk: %s", chunkView.FileId) time.Sleep(backoff) } if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + log.V(2).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return nil, err } else if len(urlStrings) == 0 { errUrlNotFound := fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) - glog.Error(errUrlNotFound) + log.Error(errUrlNotFound) return nil, errUrlNotFound } fileId2Url[chunkView.FileId] = urlStrings @@ -117,7 +117,7 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc if offset < chunkView.ViewOffset { gap := chunkView.ViewOffset - offset remaining -= gap - glog.V(4).Infof("zero [%d,%d)", offset, chunkView.ViewOffset) + log.V(-1).Infof("zero [%d,%d)", offset, chunkView.ViewOffset) err := writeZero(writer, gap) if err != nil { return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset) @@ -139,7 +139,7 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize)) } if remaining > 0 { - glog.V(4).Infof("zero [%d,%d)", offset, offset+remaining) + log.V(-1).Infof("zero [%d,%d)", offset, offset+remaining) err := writeZero(writer, remaining) if err != nil { return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining) @@ -191,7 +191,7 @@ func ReadAll(buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer chunkView := x.Value urlStrings, err := lookupFileIdFn(chunkView.FileId) if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + log.V(2).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err } @@ -319,13 +319,13 @@ func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) { if c.bufferOffset <= offset && offset < c.bufferOffset+int64(len(c.buffer)) { return nil } - // glog.V(2).Infof("c.chunkView: %v buffer:[%d,%d) offset:%d totalSize:%d", c.chunkView, c.bufferOffset, c.bufferOffset+int64(len(c.buffer)), offset, c.totalSize) + // log.V(1).Infof("c.chunkView: %v buffer:[%d,%d) offset:%d totalSize:%d", c.chunkView, c.bufferOffset, c.bufferOffset+int64(len(c.buffer)), offset, c.totalSize) // find a possible chunk view p := c.chunkView for p != nil { chunk := p.Value - // glog.V(2).Infof("prepareBufferFor check chunk:[%d,%d)", chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) + // log.V(1).Infof("prepareBufferFor check chunk:[%d,%d)", chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) if insideChunk(offset, chunk) { if c.isBufferEmpty() || c.bufferOffset != chunk.ViewOffset { c.chunkView = p @@ -345,7 +345,7 @@ func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) { func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { urlStrings, err := c.lookupFileId(chunkView.FileId) if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + log.V(2).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err } var buffer bytes.Buffer @@ -358,7 +358,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { break } if err != nil { - glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) + log.V(2).Infof("read %s failed, err: %v", chunkView.FileId, err) buffer.Reset() } else { break @@ -371,7 +371,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { c.bufferOffset = chunkView.ViewOffset c.chunk = chunkView.FileId - // glog.V(0).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.ViewOffset, chunkView.ViewOffset+int64(chunkView.ViewSize)) + // log.V(3).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.ViewOffset, chunkView.ViewOffset+int64(chunkView.ViewSize)) return nil } diff --git a/weed/filer/tarantool/tarantool_store.go b/weed/filer/tarantool/tarantool_store.go index 8d19db60d..951bfd3e2 100644 --- a/weed/filer/tarantool/tarantool_store.go +++ b/weed/filer/tarantool/tarantool_store.go @@ -11,7 +11,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" weed_util "github.com/seaweedfs/seaweedfs/weed/util" @@ -260,39 +260,39 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w } if len(results) < 1 { - glog.Errorf("Can't find results, data is empty") + log.Errorf("Can't find results, data is empty") return } rows, ok := results[0].([]interface{}) if !ok { - glog.Errorf("Can't convert results[0] to list") + log.Errorf("Can't convert results[0] to list") return } for _, result := range rows { row, ok := result.([]interface{}) if !ok { - glog.Errorf("Can't convert result to list") + log.Errorf("Can't convert result to list") return } if len(row) < 5 { - glog.Errorf("Length of result is less than needed: %v", len(row)) + log.Errorf("Length of result is less than needed: %v", len(row)) return } nameRaw := row[2] name, ok := nameRaw.(string) if !ok { - glog.Errorf("Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw) + log.Errorf("Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw) return } dataRaw := row[4] data, ok := dataRaw.(string) if !ok { - glog.Errorf("Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw) + log.Errorf("Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw) return } @@ -302,7 +302,7 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w lastFileName = name if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/tikv/tikv_store.go b/weed/filer/tikv/tikv_store.go index 8187375ca..17b8166ef 100644 --- a/weed/filer/tikv/tikv_store.go +++ b/weed/filer/tikv/tikv_store.go @@ -12,7 +12,7 @@ import ( "strings" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/tikv/client-go/v2/config" @@ -66,7 +66,7 @@ func (store *TikvStore) initialize(ca, cert, key string, verify_cn, pdAddrs []st func (store *TikvStore) Shutdown() { err := store.client.Close() if err != nil { - glog.V(0).Infof("Shutdown TiKV client got error: %v", err) + log.V(3).Infof("Shutdown TiKV client got error: %v", err) } } @@ -249,7 +249,7 @@ func (store *TikvStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if err := iter.Next(); !eachEntryFunc(entry) || err != nil { diff --git a/weed/filer/ydb/ydb_store.go b/weed/filer/ydb/ydb_store.go index a9ad6666e..cd93b512f 100644 --- a/weed/filer/ydb/ydb_store.go +++ b/weed/filer/ydb/ydb_store.go @@ -14,7 +14,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" environ "github.com/ydb-platform/ydb-go-sdk-auth-environ" @@ -69,7 +69,7 @@ func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix store.dirBuckets = dirBuckets store.SupportBucketTable = useBucketPrefix if store.SupportBucketTable { - glog.V(0).Infof("enabled BucketPrefix") + log.V(3).Infof("enabled BucketPrefix") } store.dbs = make(map[string]bool) ctx, cancel := context.WithCancel(context.Background()) @@ -203,7 +203,7 @@ func (store *YdbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) dir, name := fullpath.DirAndName() tablePathPrefix, shortDir := store.getPrefix(ctx, &dir) query := withPragma(tablePathPrefix, deleteQuery) - glog.V(4).Infof("DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir) + log.V(-1).Infof("DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir) queryParams := table.NewQueryParameters( table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))), table.ValueParam("$name", types.UTF8Value(name))) @@ -251,7 +251,7 @@ func (store *YdbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath if chunkLimit > maxChunk { chunkLimit = maxChunk } - glog.V(4).Infof("startFileName %s, restLimit %d, chunkLimit %d", startFileName, restLimit, chunkLimit) + log.V(-1).Infof("startFileName %s, restLimit %d, chunkLimit %d", startFileName, restLimit, chunkLimit) queryParams := table.NewQueryParameters( table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))), @@ -268,14 +268,14 @@ func (store *YdbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath return nil } truncated = res.CurrentResultSet().Truncated() - glog.V(4).Infof("truncated %v, entryCount %d", truncated, entryCount) + log.V(-1).Infof("truncated %v, entryCount %d", truncated, entryCount) for res.NextRow() { if err := res.ScanNamed( named.OptionalWithDefault("name", &name), named.OptionalWithDefault("meta", &data)); err != nil { return fmt.Errorf("list scanNamed %s : %v", dir, err) } - glog.V(8).Infof("name %s, fullpath %s", name, util.NewFullPath(dir, name)) + log.V(-1).Infof("name %s, fullpath %s", name, util.NewFullPath(dir, name)) lastFileName = name entry := &filer.Entry{ FullPath: util.NewFullPath(dir, name), @@ -345,7 +345,7 @@ func (store *YdbStore) OnBucketCreation(bucket string) { defer store.dbsLock.Unlock() if err := store.createTable(context.Background(), prefix); err != nil { - glog.Errorf("createTable %s: %v", prefix, err) + log.Errorf("createTable %s: %v", prefix, err) } if store.dbs == nil { @@ -362,14 +362,14 @@ func (store *YdbStore) OnBucketDeletion(bucket string) { defer store.dbsLock.Unlock() prefix := path.Join(store.tablePathPrefix, bucket) - glog.V(4).Infof("deleting table %s", prefix) + log.V(-1).Infof("deleting table %s", prefix) if err := store.deleteTable(context.Background(), prefix); err != nil { - glog.Errorf("deleteTable %s: %v", prefix, err) + log.Errorf("deleteTable %s: %v", prefix, err) } if err := store.DB.Scheme().RemoveDirectory(context.Background(), prefix); err != nil { - glog.Errorf("remove directory %s: %v", prefix, err) + log.Errorf("remove directory %s: %v", prefix, err) } if store.dbs == nil { @@ -393,7 +393,7 @@ func (store *YdbStore) deleteTable(ctx context.Context, prefix string) error { }); err != nil { return err } - glog.V(4).Infof("deleted table %s", prefix) + log.V(-1).Infof("deleted table %s", prefix) return nil } @@ -406,11 +406,11 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre } prefixBuckets := store.dirBuckets + "/" - glog.V(4).Infof("dir: %s, prefixBuckets: %s", *dir, prefixBuckets) + log.V(-1).Infof("dir: %s, prefixBuckets: %s", *dir, prefixBuckets) if strings.HasPrefix(*dir, prefixBuckets) { // detect bucket bucketAndDir := (*dir)[len(prefixBuckets):] - glog.V(4).Infof("bucketAndDir: %s", bucketAndDir) + log.V(-1).Infof("bucketAndDir: %s", bucketAndDir) var bucket string if t := strings.Index(bucketAndDir, "/"); t > 0 { bucket = bucketAndDir[:t] @@ -428,9 +428,9 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre if _, found := store.dbs[bucket]; !found { if err := store.createTable(ctx, tablePathPrefixWithBucket); err == nil { store.dbs[bucket] = true - glog.V(4).Infof("created table %s", tablePathPrefixWithBucket) + log.V(-1).Infof("created table %s", tablePathPrefixWithBucket) } else { - glog.Errorf("createTable %s: %v", tablePathPrefixWithBucket, err) + log.Errorf("createTable %s: %v", tablePathPrefixWithBucket, err) } } tablePathPrefix = &tablePathPrefixWithBucket @@ -441,7 +441,7 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre func (store *YdbStore) ensureTables(ctx context.Context) error { prefixFull := store.tablePathPrefix - glog.V(4).Infof("creating base table %s", prefixFull) + log.V(-1).Infof("creating base table %s", prefixFull) baseTable := path.Join(prefixFull, abstract_sql.DEFAULT_TABLE) if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error { return s.CreateTable(ctx, baseTable, createTableOptions()...) @@ -449,17 +449,17 @@ func (store *YdbStore) ensureTables(ctx context.Context) error { return fmt.Errorf("failed to create base table %s: %v", baseTable, err) } - glog.V(4).Infof("creating bucket tables") + log.V(-1).Infof("creating bucket tables") if store.SupportBucketTable { store.dbsLock.Lock() defer store.dbsLock.Unlock() for bucket := range store.dbs { - glog.V(4).Infof("creating bucket table %s", bucket) + log.V(-1).Infof("creating bucket table %s", bucket) bucketTable := path.Join(prefixFull, bucket, abstract_sql.DEFAULT_TABLE) if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error { return s.CreateTable(ctx, bucketTable, createTableOptions()...) }); err != nil { - glog.Errorf("failed to create bucket table %s: %v", bucketTable, err) + log.Errorf("failed to create bucket table %s: %v", bucketTable, err) } } } |
