diff options
Diffstat (limited to 'weed/filer')
28 files changed, 147 insertions, 147 deletions
diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go index 7c95ffb57..3acfbbcee 100644 --- a/weed/filer/abstract_sql/abstract_sql_store.go +++ b/weed/filer/abstract_sql/abstract_sql_store.go @@ -5,7 +5,7 @@ import ( "database/sql" "fmt" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "strings" @@ -81,7 +81,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent } // now the insert failed possibly due to duplication constraints - glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err) + log.Debugf("insert %s falls back to update: %v", entry.FullPath, err) res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir) if err != nil { @@ -187,7 +187,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, var name string var data []byte if err = rows.Scan(&name, &data); err != nil { - glog.V(0).Infof("scan %s : %v", fullpath, err) + log.Infof("scan %s : %v", fullpath, err) return nil, fmt.Errorf("scan %s: %v", fullpath, err) } @@ -195,7 +195,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, FullPath: util.NewFullPath(string(fullpath), name), } if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { - glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) + log.Infof("scan decode %s : %v", entry.FullPath, err) return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err) } diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go index c368059df..17634fa3e 100644 --- a/weed/filer/abstract_sql/abstract_sql_store_kv.go +++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -24,7 +24,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by } // now the insert failed possibly due to duplication constraints - glog.V(1).Infof("kv insert falls back to update: %s", err) + log.Debugf("kv insert falls back to update: %s", err) res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr) if err != nil { diff --git a/weed/filer/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go index ae8cb7a86..681c12d57 100644 --- a/weed/filer/cassandra/cassandra_store.go +++ b/weed/filer/cassandra/cassandra_store.go @@ -6,7 +6,7 @@ import ( "github.com/gocql/gocql" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -42,7 +42,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam store.cluster.Consistency = gocql.LocalQuorum store.session, err = store.cluster.CreateSession() if err != nil { - glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) + log.Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) } return } @@ -155,13 +155,13 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath } if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.Infof("list %s : %v", entry.FullPath, err) break } entries = append(entries, entry) } if err := iter.Close(); err != nil { - glog.V(0).Infof("list iterator close: %v", err) + log.Infof("list iterator close: %v", err) } return entries, err diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go index 3dce67d6d..27c5d9344 100644 --- a/weed/filer/configuration.go +++ b/weed/filer/configuration.go @@ -3,7 +3,7 @@ package filer import ( "os" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/spf13/viper" ) @@ -18,11 +18,11 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) { for _, store := range Stores { if config.GetBool(store.GetName() + ".enabled") { if err := store.Initialize(config, store.GetName()+"."); err != nil { - glog.Fatalf("Failed to initialize store for %s: %+v", + log.Fatalf("Failed to initialize store for %s: %+v", store.GetName(), err) } f.SetStore(store) - glog.V(0).Infof("Configure filer for %s", store.GetName()) + log.Infof("Configure filer for %s", store.GetName()) return } } @@ -43,7 +43,7 @@ func validateOneEnabledStore(config *viper.Viper) { if enabledStore == "" { enabledStore = store.GetName() } else { - glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName()) + log.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName()) } } } diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go index ec88e10a5..9363e1265 100644 --- a/weed/filer/elastic/v7/elastic_store.go +++ b/weed/filer/elastic/v7/elastic_store.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" jsoniter "github.com/json-iterator/go" @@ -67,7 +67,7 @@ func (store *ElasticStore) Initialize(configuration weed_util.Configuration, pre if store.maxPageSize <= 0 { store.maxPageSize = 10000 } - glog.Infof("filer store elastic endpoints: %v.", servers) + log.Infof("filer store elastic endpoints: %v.", servers) return store.initialize(options) } @@ -110,7 +110,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) } value, err := jsoniter.Marshal(esEntry) if err != nil { - glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) return fmt.Errorf("insert entry %v.", err) } _, err = store.client.Index(). @@ -120,7 +120,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) BodyJson(string(value)). Do(ctx) if err != nil { - glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) return fmt.Errorf("insert entry %v.", err) } return nil @@ -149,7 +149,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful err := jsoniter.Unmarshal(searchResult.Source, esEntry) return esEntry.Entry, err } - glog.Errorf("find entry(%s),%v.", string(fullpath), err) + log.Errorf("find entry(%s),%v.", string(fullpath), err) return nil, filer_pb.ErrNotFound } @@ -167,7 +167,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) { return nil } - glog.Errorf("delete index(%s) %v.", index, err) + log.Errorf("delete index(%s) %v.", index, err) return err } @@ -182,7 +182,7 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e return nil } } - glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err) + log.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err) return fmt.Errorf("delete entry %v.", err) } @@ -207,7 +207,7 @@ func (store *ElasticStore) ListDirectoryEntries( func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) { indexResult, err := store.client.CatIndices().Do(ctx) if err != nil { - glog.Errorf("list indices %v.", err) + log.Errorf("list indices %v.", err) return entries, err } for _, index := range indexResult { @@ -249,7 +249,7 @@ func (store *ElasticStore) listDirectoryEntries( result := &elastic.SearchResult{} if (startFileName == "" && first) || inclusive { if result, err = store.search(ctx, index, parentId); err != nil { - glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + log.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) return entries, err } } else { @@ -259,7 +259,7 @@ func (store *ElasticStore) listDirectoryEntries( } after := weed_util.Md5String([]byte(fullPath)) if result, err = store.searchAfter(ctx, index, parentId, after); err != nil { - glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + log.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) return entries, err } } diff --git a/weed/filer/elastic/v7/elastic_store_kv.go b/weed/filer/elastic/v7/elastic_store_kv.go index 99c03314e..4a8c32c34 100644 --- a/weed/filer/elastic/v7/elastic_store_kv.go +++ b/weed/filer/elastic/v7/elastic_store_kv.go @@ -6,7 +6,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" jsoniter "github.com/json-iterator/go" elastic "github.com/olivere/elastic/v7" ) @@ -22,7 +22,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) return nil } } - glog.Errorf("delete key(id:%s) %v.", string(key), err) + log.Errorf("delete key(id:%s) %v.", string(key), err) return fmt.Errorf("delete key %v.", err) } @@ -41,7 +41,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte, return esEntry.Value, nil } } - glog.Errorf("find key(%s),%v.", string(key), err) + log.Errorf("find key(%s),%v.", string(key), err) return value, filer.ErrKvNotFound } @@ -49,7 +49,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte) esEntry := &ESKVEntry{value} val, err := jsoniter.Marshal(esEntry) if err != nil { - glog.Errorf("insert key(%s) %v.", string(key), err) + log.Errorf("insert key(%s) %v.", string(key), err) return fmt.Errorf("insert key %v.", err) } _, err = store.client.Index(). diff --git a/weed/filer/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go index 634fba1eb..9a07f9b65 100644 --- a/weed/filer/etcd/etcd_store.go +++ b/weed/filer/etcd/etcd_store.go @@ -9,7 +9,7 @@ import ( "go.etcd.io/etcd/clientv3" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" ) @@ -45,7 +45,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix } func (store *EtcdStore) initialize(servers string, timeout string) (err error) { - glog.Infof("filer store etcd: %s", servers) + log.Infof("filer store etcd: %s", servers) to, err := time.ParseDuration(timeout) if err != nil { @@ -169,7 +169,7 @@ func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_ } if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.Infof("list %s : %v", entry.FullPath, err) break } entries = append(entries, entry) diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go index f5ab36d37..3e64fd8a6 100644 --- a/weed/filer/filechunk_manifest.go +++ b/weed/filer/filechunk_manifest.go @@ -9,7 +9,7 @@ import ( "github.com/golang/protobuf/proto" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -87,7 +87,7 @@ func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *fil func fetchChunk(lookupFileIdFn LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) { urlStrings, err := lookupFileIdFn(fileId) if err != nil { - glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) + log.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) return nil, err } return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, 0, 0) @@ -108,14 +108,14 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool break } if err != nil { - glog.V(0).Infof("read %s failed, err: %v", urlString, err) + log.Infof("read %s failed, err: %v", urlString, err) buffer.Reset() } else { break } } if err != nil && shouldRetry { - glog.V(0).Infof("retry reading in %v", waitTime) + log.Infof("retry reading in %v", waitTime) time.Sleep(waitTime) } else { break diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go index c75a35f79..2adf4a300 100644 --- a/weed/filer/filechunks.go +++ b/weed/filer/filechunks.go @@ -158,9 +158,9 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int func logPrintf(name string, visibles []VisibleInterval) { /* - glog.V(0).Infof("%s len %d", name, len(visibles)) + log.Infof("%s len %d", name, len(visibles)) for _, v := range visibles { - glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset) + log.Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset) } */ } @@ -185,22 +185,22 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n } logPrintf(" before", visibles) - // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size) + // log.Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size) chunkStop := chunk.Offset + int64(chunk.Size) for _, v := range visibles { if v.start < chunk.Offset && chunk.Offset < v.stop { t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped) newVisibles = append(newVisibles, t) - // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop) + // log.Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop) } if v.start < chunkStop && chunkStop < v.stop { t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped) newVisibles = append(newVisibles, t) - // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop) + // log.Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop) } if chunkStop <= v.start || v.stop <= chunk.Offset { newVisibles = append(newVisibles, v) - // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop) + // log.Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop) } } newVisibles = append(newVisibles, newV) @@ -240,7 +240,7 @@ func NonOverlappingVisibleIntervals(lookupFileIdFn LookupFileIdFunctionType, chu for _, chunk := range chunks { - // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size)) + // log.Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size)) visibles = MergeIntoVisibles(visibles, chunk) logPrintf("add", visibles) diff --git a/weed/filer/filechunks2_test.go b/weed/filer/filechunks2_test.go index 9f9566d9b..f8c2c812c 100644 --- a/weed/filer/filechunks2_test.go +++ b/weed/filer/filechunks2_test.go @@ -4,7 +4,7 @@ import ( "sort" "testing" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) @@ -41,6 +41,6 @@ func printChunks(name string, chunks []*filer_pb.FileChunk) { return chunks[i].Offset < chunks[j].Offset }) for _, chunk := range chunks { - glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) + log.Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) } } diff --git a/weed/filer/filer.go b/weed/filer/filer.go index 105c8e04f..71314136d 100644 --- a/weed/filer/filer.go +++ b/weed/filer/filer.go @@ -9,7 +9,7 @@ import ( "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util/log_buffer" @@ -93,14 +93,14 @@ func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) { storeIdBytes = make([]byte, 4) util.Uint32toBytes(storeIdBytes, uint32(f.Signature)) if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil { - glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err) + log.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err) } - glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature) + log.Infof("create %s to %d", FilerStoreId, f.Signature) } else if err == nil && len(storeIdBytes) == 4 { f.Signature = int32(util.BytesToUint32(storeIdBytes)) - glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature) + log.Infof("existing %s = %d", FilerStoreId, f.Signature) } else { - glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err) + log.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err) } } @@ -145,7 +145,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr // fmt.Printf("%d directory: %+v\n", i, dirPath) // check the store directly - glog.V(4).Infof("find uncached directory: %s", dirPath) + log.Tracef("find uncached directory: %s", dirPath) dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath)) // no such existing directory @@ -169,11 +169,11 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr }, } - glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) + log.Debugf("create directory: %s %v", dirPath, dirEntry.Mode) mkdirErr := f.Store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound { - glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) + log.Tracef("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { @@ -182,7 +182,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr } } else if !dirEntry.IsDirectory() { - glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) + log.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) return fmt.Errorf("%s is a file", dirPath) } @@ -194,13 +194,13 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr } if lastDirectoryEntry == nil { - glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath) + log.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath) return fmt.Errorf("parent folder not found: %v", entry.FullPath) } /* if !hasWritePermission(lastDirectoryEntry, entry) { - glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d", + log.Infof("directory %s: %v, entry: uid=%d gid=%d", lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid) return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath) } @@ -209,19 +209,19 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr oldEntry, _ := f.FindEntry(ctx, entry.FullPath) if oldEntry == nil { - glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name()) + log.Tracef("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name()) if err := f.Store.InsertEntry(ctx, entry); err != nil { - glog.Errorf("insert entry %s: %v", entry.FullPath, err) + log.Errorf("insert entry %s: %v", entry.FullPath, err) return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) } } else { if o_excl { - glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath) + log.Tracef("EEXIST: entry %s already exists", entry.FullPath) return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) } - glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name()) + log.Tracef("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name()) if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { - glog.Errorf("update entry %s: %v", entry.FullPath, err) + log.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err) } } @@ -231,7 +231,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr f.deleteChunksIfNotNew(oldEntry, entry) - glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + log.Tracef("CreateEntry %s: created", entry.FullPath) return nil } @@ -239,11 +239,11 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) { if oldEntry != nil { if oldEntry.IsDirectory() && !entry.IsDirectory() { - glog.Errorf("existing %s is a directory", entry.FullPath) + log.Errorf("existing %s is a directory", entry.FullPath) return fmt.Errorf("existing %s is a directory", entry.FullPath) } if !oldEntry.IsDirectory() && entry.IsDirectory() { - glog.Errorf("existing %s is a file", entry.FullPath) + log.Errorf("existing %s is a file", entry.FullPath) return fmt.Errorf("existing %s is a file", entry.FullPath) } } @@ -321,7 +321,7 @@ func (f *Filer) Shutdown() { func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) { for _, hardLinkId := range hardLinkIds { if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil { - glog.Errorf("delete hard link id %d : %v", hardLinkId, err) + log.Errorf("delete hard link id %d : %v", hardLinkId, err) } } } diff --git a/weed/filer/filer_buckets.go b/weed/filer/filer_buckets.go index 4d4f4abc3..b0dba7955 100644 --- a/weed/filer/filer_buckets.go +++ b/weed/filer/filer_buckets.go @@ -5,7 +5,7 @@ import ( "math" "sync" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -32,7 +32,7 @@ func (f *Filer) LoadBuckets() { entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "") if err != nil { - glog.V(1).Infof("no buckets found: %v", err) + log.Debugf("no buckets found: %v", err) return } @@ -41,7 +41,7 @@ func (f *Filer) LoadBuckets() { shouldFsyncMap[bucket] = true } - glog.V(1).Infof("buckets found: %d", len(entries)) + log.Debugf("buckets found: %d", len(entries)) f.buckets.Lock() for _, entry := range entries { diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go index 5fd8e5b49..0abdc807e 100644 --- a/weed/filer/filer_conf.go +++ b/weed/filer/filer_conf.go @@ -5,7 +5,7 @@ import ( "context" "io" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "github.com/golang/protobuf/jsonpb" @@ -36,7 +36,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) { if err == filer_pb.ErrNotFound { return nil } - glog.Errorf("read filer conf entry %s: %v", filerConfPath, err) + log.Errorf("read filer conf entry %s: %v", filerConfPath, err) return } @@ -46,7 +46,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) { func (fc *FilerConf) loadFromChunks(filer *Filer, chunks []*filer_pb.FileChunk) (err error) { data, err := filer.readEntry(chunks) if err != nil { - glog.Errorf("read filer conf content: %v", err) + log.Errorf("read filer conf content: %v", err) return } @@ -60,7 +60,7 @@ func (fc *FilerConf) LoadFromBytes(data []byte) (err error) { err = proto.UnmarshalText(string(data), conf) if err != nil { - glog.Errorf("unable to parse filer conf: %v", err) + log.Errorf("unable to parse filer conf: %v", err) // this is not recoverable return nil } @@ -85,7 +85,7 @@ func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) { func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) { err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf) if err != nil { - glog.Errorf("put location prefix: %v", err) + log.Errorf("put location prefix: %v", err) } return } diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go index 69219fbfa..603312995 100644 --- a/weed/filer/filer_delete_entry.go +++ b/weed/filer/filer_delete_entry.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -33,7 +33,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR var dirHardLinkIds []HardLinkId dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection, isFromOtherCluster, signatures) if err != nil { - glog.V(0).Infof("delete directory %s: %v", p, err) + log.Infof("delete directory %s: %v", p, err) return fmt.Errorf("delete directory %s: %v", p, err) } chunks = append(chunks, dirChunks...) @@ -71,12 +71,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry for { entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "") if err != nil { - glog.Errorf("list folder %s: %v", entry.FullPath, err) + log.Errorf("list folder %s: %v", entry.FullPath, err) return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) } if lastFileName == "" && !isRecursive && len(entries) > 0 { // only for first iteration in the loop - glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) + log.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) return nil, nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath) } @@ -107,7 +107,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry } } - glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks) + log.Tracef("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks) if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr) @@ -120,7 +120,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) { - glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) + log.Tracef("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) if storeDeletionErr := f.Store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil { return fmt.Errorf("filer store delete: %v", storeDeletionErr) @@ -139,7 +139,7 @@ func (f *Filer) doDeleteCollection(collectionName string) (err error) { Name: collectionName, }) if err != nil { - glog.Infof("delete collection %s: %v", collectionName, err) + log.Infof("delete collection %s: %v", collectionName, err) } return err }) diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go index 126d162ec..619ec2d66 100644 --- a/weed/filer/filer_deletion.go +++ b/weed/filer/filer_deletion.go @@ -4,7 +4,7 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/wdclient" @@ -54,10 +54,10 @@ func (f *Filer) loopProcessingDeletion() { _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) if err != nil { if !strings.Contains(err.Error(), "already deleted") { - glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + log.Infof("deleting fileIds len=%d error: %v", deletionCount, err) } } else { - glog.V(1).Infof("deleting fileIds len=%d", deletionCount) + log.Debugf("deleting fileIds len=%d", deletionCount) } } }) @@ -76,7 +76,7 @@ func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { } dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) if manifestResolveErr != nil { - glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) + log.Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) } for _, dChunk := range dataChunks { f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString()) diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go index 40755e6a7..51e7f8669 100644 --- a/weed/filer/filer_notify.go +++ b/weed/filer/filer_notify.go @@ -9,7 +9,7 @@ import ( "github.com/golang/protobuf/proto" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -54,7 +54,7 @@ func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry } if notification.Queue != nil { - glog.V(3).Infof("notifying entry update %v", fullpath) + log.Tracef("notifying entry update %v", fullpath) notification.Queue.SendMessage(fullpath, eventNotification) } @@ -73,7 +73,7 @@ func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotifica } data, err := proto.Marshal(event) if err != nil { - glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + log.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err) return } @@ -96,7 +96,7 @@ func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) { for { if err := f.appendToFile(targetFile, buf); err != nil { - glog.V(1).Infof("log write failed %s: %v", targetFile, err) + log.Debugf("log write failed %s: %v", targetFile, err) time.Sleep(737 * time.Millisecond) } else { break diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go index 3de27da6e..f7036d236 100644 --- a/weed/filer/filer_on_meta_event.go +++ b/weed/filer/filer_on_meta_event.go @@ -4,7 +4,7 @@ import ( "bytes" "math" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -22,7 +22,7 @@ func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) return } - glog.V(0).Infof("procesing %v", event) + log.Infof("procesing %v", event) if entry.Name == FilerConfName { f.reloadFilerConfiguration(entry) } @@ -42,7 +42,7 @@ func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) { fc := NewFilerConf() err := fc.loadFromChunks(f, entry.Chunks) if err != nil { - glog.Errorf("read filer conf chunks: %v", err) + log.Errorf("read filer conf chunks: %v", err) return } f.FilerConf = fc @@ -54,7 +54,7 @@ func (f *Filer) LoadFilerConf() { return fc.loadFromFiler(f) }) if err != nil { - glog.Errorf("read filer conf: %v", err) + log.Errorf("read filer conf: %v", err) return } f.FilerConf = fc diff --git a/weed/filer/filerstore_hardlink.go b/weed/filer/filerstore_hardlink.go index 0fbf8310e..6d89c20f9 100644 --- a/weed/filer/filerstore_hardlink.go +++ b/weed/filer/filerstore_hardlink.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) @@ -54,12 +54,12 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr value, err := fsw.KvGet(ctx, key) if err != nil { - glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + log.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) return err } if err = entry.DecodeAttributesAndChunks(value); err != nil { - glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + log.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) return err } diff --git a/weed/filer/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go index 4b8dd5ea9..2613c7996 100644 --- a/weed/filer/leveldb/leveldb_store.go +++ b/weed/filer/leveldb/leveldb_store.go @@ -10,7 +10,7 @@ import ( leveldb_util "github.com/syndtr/goleveldb/leveldb/util" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" ) @@ -37,7 +37,7 @@ func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, pre } func (store *LevelDBStore) initialize(dir string) (err error) { - glog.Infof("filer store dir: %s", dir) + log.Infof("filer store dir: %s", dir) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } @@ -53,7 +53,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) { store.db, err = leveldb.RecoverFile(dir, opts) } if err != nil { - glog.Infof("filer store open dir %s: %v", dir, err) + log.Infof("filer store open dir %s: %v", dir, err) return } } @@ -193,7 +193,7 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath we } if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.Infof("list %s : %v", entry.FullPath, err) break } entries = append(entries, entry) diff --git a/weed/filer/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go index 2ad0dd648..1299121c4 100644 --- a/weed/filer/leveldb2/leveldb2_store.go +++ b/weed/filer/leveldb2/leveldb2_store.go @@ -13,7 +13,7 @@ import ( "os" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" ) @@ -37,7 +37,7 @@ func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, pr } func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { - glog.Infof("filer store leveldb2 dir: %s", dir) + log.Infof("filer store leveldb2 dir: %s", dir) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } @@ -56,7 +56,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { db, dbErr = leveldb.RecoverFile(dbFolder, opts) } if dbErr != nil { - glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) + log.Errorf("filer store open dir %s: %v", dbFolder, dbErr) return dbErr } store.dbs = append(store.dbs, db) @@ -205,7 +205,7 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath w // println("list", entry.FullPath, "chunks", len(entry.Chunks)) if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.Infof("list %s : %v", entry.FullPath, err) break } entries = append(entries, entry) diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go index 9437e9992..3bd4d36f9 100644 --- a/weed/filer/meta_aggregator.go +++ b/weed/filer/meta_aggregator.go @@ -11,7 +11,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util/log_buffer" @@ -64,7 +64,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string peerSignature, err := ma.readFilerStoreSignature(peer) for err != nil { - glog.V(0).Infof("connecting to peer filer %s: %v", peer, err) + log.Infof("connecting to peer filer %s: %v", peer, err) time.Sleep(1357 * time.Millisecond) peerSignature, err = ma.readFilerStoreSignature(peer) } @@ -74,27 +74,27 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string lastTsNs = prevTsNs } - glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) + log.Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) var counter int64 var synced bool maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) { if err := Replay(f.Store, event); err != nil { - glog.Errorf("failed to reply metadata change from %v: %v", peer, err) + log.Errorf("failed to reply metadata change from %v: %v", peer, err) return } counter++ if lastPersistTime.Add(time.Minute).Before(time.Now()) { if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil { if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() { - glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0) + log.Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0) } else if !synced { synced = true - glog.V(0).Infof("synced with %s", peer) + log.Infof("synced with %s", peer) } lastPersistTime = time.Now() counter = 0 } else { - glog.V(0).Infof("failed to update offset for %v: %v", peer, err) + log.Infof("failed to update offset for %v: %v", peer, err) } } } @@ -103,7 +103,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error { data, err := proto.Marshal(event) if err != nil { - glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + log.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err) return err } dir := event.Directory @@ -147,7 +147,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string } }) if err != nil { - glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err) + log.Infof("subscribing remote %s meta change: %v", peer, err) time.Sleep(1733 * time.Millisecond) } } @@ -177,7 +177,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32) value, err := f.Store.KvGet(context.Background(), key) if err == ErrKvNotFound { - glog.Warningf("readOffset %s not found", peer) + log.Warnf("readOffset %s not found", peer) return 0, nil } @@ -187,7 +187,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32) lastTsNs = int64(util.BytesToUint64(value)) - glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs) + log.Infof("readOffset %s : %d", peer, lastTsNs) return } @@ -206,7 +206,7 @@ func (ma *MetaAggregator) updateOffset(f *Filer, peer string, peerSignature int3 return fmt.Errorf("updateOffset %s : %v", peer, err) } - glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs) + log.Tracef("updateOffset %s : %d", peer, lastTsNs) return } diff --git a/weed/filer/meta_replay.go b/weed/filer/meta_replay.go index feb76278b..ecf47d576 100644 --- a/weed/filer/meta_replay.go +++ b/weed/filer/meta_replay.go @@ -3,7 +3,7 @@ package filer import ( "context" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -14,7 +14,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err var newEntry *Entry if message.OldEntry != nil { oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name) - glog.V(4).Infof("deleting %v", oldPath) + log.Tracef("deleting %v", oldPath) if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil { return err } @@ -26,7 +26,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err dir = message.NewParentPath } key := util.NewFullPath(dir, message.NewEntry.Name) - glog.V(4).Infof("creating %v", key) + log.Tracef("creating %v", key) newEntry = FromPbEntry(dir, message.NewEntry) if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil { return err diff --git a/weed/filer/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go index d20c6477a..9d3798620 100644 --- a/weed/filer/mongodb/mongodb_store.go +++ b/weed/filer/mongodb/mongodb_store.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "go.mongodb.org/mongo-driver/bson" @@ -134,7 +134,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath var where = bson.M{"directory": dir, "name": name} err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) if err != mongo.ErrNoDocuments && err != nil { - glog.Errorf("find %s: %v", fullpath, err) + log.Errorf("find %s: %v", fullpath, err) return nil, filer_pb.ErrNotFound } @@ -205,7 +205,7 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut } if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.Infof("list %s : %v", entry.FullPath, err) break } @@ -213,7 +213,7 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut } if err := cur.Close(ctx); err != nil { - glog.V(0).Infof("list iterator close: %v", err) + log.Infof("list iterator close: %v", err) } return entries, err diff --git a/weed/filer/mongodb/mongodb_store_kv.go b/weed/filer/mongodb/mongodb_store_kv.go index 4aa9c3a33..c7ea69534 100644 --- a/weed/filer/mongodb/mongodb_store_kv.go +++ b/weed/filer/mongodb/mongodb_store_kv.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" ) @@ -36,7 +36,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte, var where = bson.M{"directory": dir, "name": name} err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) if err != mongo.ErrNoDocuments && err != nil { - glog.Errorf("kv get: %v", err) + log.Errorf("kv get: %v", err) return nil, filer.ErrKvNotFound } diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go index ccc746b90..ca083803e 100644 --- a/weed/filer/reader_at.go +++ b/weed/filer/reader_at.go @@ -7,7 +7,7 @@ import ( "math/rand" "sync" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" @@ -54,7 +54,7 @@ func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType { locations = resp.LocationsMap[vid] if locations == nil || len(locations.Locations) == 0 { - glog.V(0).Infof("failed to locate %s", fileId) + log.Infof("failed to locate %s", fileId) return fmt.Errorf("failed to locate %s", fileId) } vicCacheLock.Lock() @@ -101,7 +101,7 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { c.readerLock.Lock() defer c.readerLock.Unlock() - glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) + log.Tracef("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) return c.doReadAt(p[n:], offset+int64(n)) } @@ -121,7 +121,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { } if startOffset < chunk.LogicOffset { gap := int(chunk.LogicOffset - startOffset) - glog.V(4).Infof("zero [%d,%d)", startOffset, startOffset+int64(gap)) + log.Tracef("zero [%d,%d)", startOffset, startOffset+int64(gap)) n += int(min(int64(gap), remaining)) startOffset, remaining = chunk.LogicOffset, remaining-int64(gap) if remaining <= 0 { @@ -133,10 +133,10 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { if chunkStart >= chunkStop { continue } - glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size)) + log.Tracef("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size)) buffer, err = c.readFromWholeChunkData(chunk, nextChunk) if err != nil { - glog.Errorf("fetching chunk %+v: %v\n", chunk, err) + log.Errorf("fetching chunk %+v: %v\n", chunk, err) return } bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset @@ -145,11 +145,11 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { startOffset, remaining = startOffset+int64(copied), remaining-int64(copied) } - glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err) + log.Tracef("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err) if err == nil && remaining > 0 && c.fileSize > startOffset { delta := int(min(remaining, c.fileSize-startOffset)) - glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize) + log.Tracef("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize) n += delta } @@ -194,11 +194,11 @@ func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, erro return c.fetchGroup.Do(chunkView.FileId, func() (interface{}, error) { - glog.V(4).Infof("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize) + log.Tracef("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize) data := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize) if data != nil { - glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data))) + log.Tracef("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data))) } else { var err error data, err = c.doFetchFullChunkData(chunkView) @@ -213,11 +213,11 @@ func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, erro func (c *ChunkReadAt) doFetchFullChunkData(chunkView *ChunkView) ([]byte, error) { - glog.V(4).Infof("+ doFetchFullChunkData %s", chunkView.FileId) + log.Tracef("+ doFetchFullChunkData %s", chunkView.FileId) data, err := fetchChunk(c.lookupFileId, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped) - glog.V(4).Infof("- doFetchFullChunkData %s", chunkView.FileId) + log.Tracef("- doFetchFullChunkData %s", chunkView.FileId) return data, err diff --git a/weed/filer/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go index 0de9924a3..3abe0bca9 100644 --- a/weed/filer/redis/universal_redis_store.go +++ b/weed/filer/redis/universal_redis_store.go @@ -10,7 +10,7 @@ import ( "github.com/go-redis/redis" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -170,7 +170,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full path := util.NewFullPath(string(fullpath), fileName) entry, err := store.FindEntry(ctx, path) if err != nil { - glog.V(0).Infof("list %s : %v", path, err) + log.Infof("list %s : %v", path, err) } else { if entry.TtlSec > 0 { if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go index 0374314c0..2bc0a1a36 100644 --- a/weed/filer/redis2/universal_redis_store.go +++ b/weed/filer/redis2/universal_redis_store.go @@ -8,7 +8,7 @@ import ( "github.com/go-redis/redis" "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -149,7 +149,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, ful path := util.NewFullPath(string(fullpath), fileName) entry, err := store.FindEntry(ctx, path) if err != nil { - glog.V(0).Infof("list %s : %v", path, err) + log.Infof("list %s : %v", path, err) } else { if entry.TtlSec > 0 { if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { diff --git a/weed/filer/stream.go b/weed/filer/stream.go index cffdc8303..bd03222f1 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -7,7 +7,7 @@ import ( "math" "strings" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/log" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" @@ -24,7 +24,7 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f urlStrings, err := masterClient.LookupFileId(chunkView.FileId) if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err } fileId2Url[chunkView.FileId] = urlStrings @@ -36,12 +36,12 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) if err != nil { - glog.Errorf("read chunk: %v", err) + log.Errorf("read chunk: %v", err) return fmt.Errorf("read chunk: %v", err) } _, err = w.Write(data) if err != nil { - glog.Errorf("write chunk: %v", err) + log.Errorf("write chunk: %v", err) return fmt.Errorf("write chunk: %v", err) } } @@ -65,7 +65,7 @@ func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) for _, chunkView := range chunkViews { urlStrings, err := lookupFileIdFn(chunkView.FileId) if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return nil, err } @@ -175,7 +175,7 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { urlStrings, err := c.lookupFileId(chunkView.FileId) if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err } var buffer bytes.Buffer @@ -188,7 +188,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { break } if err != nil { - glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) + log.Debugf("read %s failed, err: %v", chunkView.FileId, err) buffer.Reset() } else { break @@ -201,7 +201,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { c.bufferPos = 0 c.bufferOffset = chunkView.LogicOffset - // glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + // log.Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) return nil } |
