aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2021-11-02 23:39:16 -0700
committerChris Lu <chris.lu@gmail.com>2021-11-02 23:39:16 -0700
commit0c8dea9de8dffa2e2cc252cdab7112a3664b3a09 (patch)
tree3c0cfd3cbe04c9a74059be5dc9725d00725afa0c
parent5160eb08f7665409221ebb0b9db6f4820e29bed3 (diff)
downloadseaweedfs-0c8dea9de8dffa2e2cc252cdab7112a3664b3a09.tar.xz
seaweedfs-0c8dea9de8dffa2e2cc252cdab7112a3664b3a09.zip
go fmt
-rw-r--r--weed/filer/filechunks_read.go2
-rw-r--r--weed/filer/filechunks_read_test.go8
-rw-r--r--weed/filer/filer_delete_entry.go2
-rw-r--r--weed/filer/redis2/universal_redis_store.go4
-rw-r--r--weed/filesys/dir_rename.go3
-rw-r--r--weed/server/filer_grpc_server_rename.go2
-rw-r--r--weed/server/master_server_cluster.go2
-rw-r--r--weed/server/volume_grpc_copy.go2
-rw-r--r--weed/server/volume_grpc_vacuum.go2
-rw-r--r--weed/storage/backend/s3_backend/s3_upload.go8
10 files changed, 17 insertions, 18 deletions
diff --git a/weed/filer/filechunks_read.go b/weed/filer/filechunks_read.go
index 742f050be..33ee6d138 100644
--- a/weed/filer/filechunks_read.go
+++ b/weed/filer/filechunks_read.go
@@ -40,7 +40,7 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterva
for _, point := range points {
if point.isStart {
if len(queue) > 0 {
- lastIndex := len(queue) -1
+ lastIndex := len(queue) - 1
lastPoint := queue[lastIndex]
if point.x != prevX && lastPoint.ts < point.ts {
visibles = addToVisibles(visibles, prevX, lastPoint, point)
diff --git a/weed/filer/filechunks_read_test.go b/weed/filer/filechunks_read_test.go
index ff0cd9f4b..e70c66e6f 100644
--- a/weed/filer/filechunks_read_test.go
+++ b/weed/filer/filechunks_read_test.go
@@ -52,7 +52,7 @@ func TestReadResolvedChunks(t *testing.T) {
func TestRandomizedReadResolvedChunks(t *testing.T) {
- var limit int64 = 1024*1024
+ var limit int64 = 1024 * 1024
array := make([]int64, limit)
var chunks []*filer_pb.FileChunk
for ts := int64(0); ts < 1024; ts++ {
@@ -75,7 +75,7 @@ func TestRandomizedReadResolvedChunks(t *testing.T) {
visibles := readResolvedChunks(chunks)
for _, visible := range visibles {
- for i := visible.start; i<visible.stop;i++{
+ for i := visible.start; i < visible.stop; i++ {
if array[i] != visible.modifiedTime {
t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTime)
}
@@ -101,12 +101,12 @@ func randomWrite(array []int64, start int64, size int64, ts int64) *filer_pb.Fil
func TestSequentialReadResolvedChunks(t *testing.T) {
- var chunkSize int64 = 1024*1024*2
+ var chunkSize int64 = 1024 * 1024 * 2
var chunks []*filer_pb.FileChunk
for ts := int64(0); ts < 13; ts++ {
chunks = append(chunks, &filer_pb.FileChunk{
FileId: "",
- Offset: chunkSize*ts,
+ Offset: chunkSize * ts,
Size: uint64(chunkSize),
Mtime: 1,
})
diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go
index 116f5cd2f..cc33811aa 100644
--- a/weed/filer/filer_delete_entry.go
+++ b/weed/filer/filer_delete_entry.go
@@ -70,7 +70,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
return nil
}
-func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32, onChunksFn OnChunksFunc, onHardLinkIdsFn OnHardLinkIdsFunc)(err error) {
+func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32, onChunksFn OnChunksFunc, onHardLinkIdsFn OnHardLinkIdsFunc) (err error) {
lastFileName := ""
includeLastFile := false
diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go
index f9798cf2f..deccf8922 100644
--- a/weed/filer/redis2/universal_redis_store.go
+++ b/weed/filer/redis2/universal_redis_store.go
@@ -134,8 +134,8 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
}
members, err := store.Client.ZRangeByLex(ctx, genDirectoryListKey(string(fullpath)), &redis.ZRangeBy{
- Min: "-",
- Max: "+",
+ Min: "-",
+ Max: "+",
}).Result()
if err != nil {
return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err)
diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go
index d2288e3bd..8a80559f6 100644
--- a/weed/filesys/dir_rename.go
+++ b/weed/filesys/dir_rename.go
@@ -67,7 +67,6 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
return nil
}
-
func (dir *Dir) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamRenameEntryResponse) error {
// comes from filer StreamRenameEntry, can only be create or delete entry
@@ -119,7 +118,7 @@ func (dir *Dir) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamR
}
dir.wfs.handlesLock.Unlock()
- }else if resp.EventNotification.OldEntry != nil {
+ } else if resp.EventNotification.OldEntry != nil {
// without new entry, only old entry name exists. This is the second step to delete old entry
if err := dir.wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, util.NewFullPath(resp.Directory, resp.EventNotification.OldEntry.Name), nil); err != nil {
return err
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
index 278ecadbd..a7f428848 100644
--- a/weed/server/filer_grpc_server_rename.go
+++ b/weed/server/filer_grpc_server_rename.go
@@ -161,7 +161,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee
if err := stream.Send(&filer_pb.StreamRenameEntryResponse{
Directory: string(newParent),
EventNotification: &filer_pb.EventNotification{
- OldEntry: &filer_pb.Entry{
+ OldEntry: &filer_pb.Entry{
Name: entry.Name(),
},
NewEntry: newEntry.ToProtoEntry(),
diff --git a/weed/server/master_server_cluster.go b/weed/server/master_server_cluster.go
index 52e1526f9..19ecaff0a 100644
--- a/weed/server/master_server_cluster.go
+++ b/weed/server/master_server_cluster.go
@@ -56,7 +56,7 @@ func (cluster *Cluster) RemoveClusterNode(nodeType string, address pb.ServerAddr
}
}
-func (cluster *Cluster) ListClusterNode(nodeType string) (nodes []*ClusterNode){
+func (cluster *Cluster) ListClusterNode(nodeType string) (nodes []*ClusterNode) {
switch nodeType {
case "filer":
cluster.filersLock.RLock()
diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go
index 28018f344..9630b322e 100644
--- a/weed/server/volume_grpc_copy.go
+++ b/weed/server/volume_grpc_copy.go
@@ -80,7 +80,7 @@ func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stre
// println("source:", volFileInfoResp.String())
copyResponse := &volume_server_pb.VolumeCopyResponse{}
- reportInterval := int64(1024*1024*128)
+ reportInterval := int64(1024 * 1024 * 128)
nextReportTarget := reportInterval
var modifiedTsNs int64
var sendErr error
diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go
index 57bf8d867..0ab782b02 100644
--- a/weed/server/volume_grpc_vacuum.go
+++ b/weed/server/volume_grpc_vacuum.go
@@ -27,7 +27,7 @@ func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_serve
func (vs *VolumeServer) VacuumVolumeCompact(req *volume_server_pb.VacuumVolumeCompactRequest, stream volume_server_pb.VolumeServer_VacuumVolumeCompactServer) error {
resp := &volume_server_pb.VacuumVolumeCompactResponse{}
- reportInterval := int64(1024*1024*128)
+ reportInterval := int64(1024 * 1024 * 128)
nextReportTarget := reportInterval
var sendErr error
diff --git a/weed/storage/backend/s3_backend/s3_upload.go b/weed/storage/backend/s3_backend/s3_upload.go
index bc1c82730..cb5ce83e5 100644
--- a/weed/storage/backend/s3_backend/s3_upload.go
+++ b/weed/storage/backend/s3_backend/s3_upload.go
@@ -49,10 +49,10 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
// Upload the file to S3.
var result *s3manager.UploadOutput
result, err = uploader.Upload(&s3manager.UploadInput{
- Bucket: aws.String(destBucket),
- Key: aws.String(destKey),
- Body: fileReader,
- StorageClass: aws.String("STANDARD_IA"),
+ Bucket: aws.String(destBucket),
+ Key: aws.String(destKey),
+ Body: fileReader,
+ StorageClass: aws.String("STANDARD_IA"),
})
//in case it fails to upload