aboutsummaryrefslogtreecommitdiff
path: root/weed/s3api
diff options
context:
space:
mode:
Diffstat (limited to 'weed/s3api')
-rw-r--r--weed/s3api/auth_signature_v4.go2
-rw-r--r--weed/s3api/auto_signature_v4_test.go2
-rw-r--r--weed/s3api/filer_multipart.go218
-rw-r--r--weed/s3api/filer_multipart_test.go81
-rw-r--r--weed/s3api/s3_constants/header.go1
-rw-r--r--weed/s3api/s3api_bucket_handlers.go2
-rw-r--r--weed/s3api/s3api_object_copy_handlers.go12
-rw-r--r--weed/s3api/s3api_object_multipart_handlers.go9
-rw-r--r--weed/s3api/s3api_objects_list_handlers.go2
9 files changed, 174 insertions, 155 deletions
diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go
index 04548cc6f..0a156cfce 100644
--- a/weed/s3api/auth_signature_v4.go
+++ b/weed/s3api/auth_signature_v4.go
@@ -311,7 +311,7 @@ func parseSignature(signElement string) (string, s3err.ErrorCode) {
return signature, s3err.ErrNone
}
-// doesPolicySignatureMatch - Verify query headers with post policy
+// doesPolicySignatureV4Match - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
//
// returns ErrNone if the signature matches.
diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go
index ccee8b885..6ff67b5bf 100644
--- a/weed/s3api/auto_signature_v4_test.go
+++ b/weed/s3api/auto_signature_v4_test.go
@@ -262,7 +262,7 @@ func getMD5HashBase64(data []byte) string {
return base64.StdEncoding.EncodeToString(getMD5Sum(data))
}
-// getSHA256Hash returns SHA-256 sum of given data.
+// getSHA256Sum returns SHA-256 sum of given data.
func getSHA256Sum(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go
index 765a5679e..e9cd6a0c4 100644
--- a/weed/s3api/filer_multipart.go
+++ b/weed/s3api/filer_multipart.go
@@ -1,11 +1,12 @@
package s3api
import (
+ "cmp"
"encoding/hex"
"encoding/xml"
"fmt"
- "github.com/google/uuid"
- "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
+ "github.com/seaweedfs/seaweedfs/weed/stats"
"golang.org/x/exp/slices"
"math"
"path/filepath"
@@ -16,12 +17,19 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
+ "github.com/google/uuid"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
)
+const (
+ multipartExt = ".part"
+ multiPartMinSize = 5 * 1024 * 1024
+)
+
type InitiateMultipartUploadResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult"`
s3.CreateMultipartUploadOutput
@@ -70,61 +78,129 @@ type CompleteMultipartUploadResult struct {
func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput, parts *CompleteMultipartUpload) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) {
glog.V(2).Infof("completeMultipartUpload input %v", input)
-
- completedParts := parts.Parts
- slices.SortFunc(completedParts, func(a, b CompletedPart) int {
- return a.PartNumber - b.PartNumber
- })
+ if len(parts.Parts) == 0 {
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
+ return nil, s3err.ErrNoSuchUpload
+ }
+ completedPartNumbers := []int{}
+ completedPartMap := make(map[int][]string)
+ for _, part := range parts.Parts {
+ if _, ok := completedPartMap[part.PartNumber]; !ok {
+ completedPartNumbers = append(completedPartNumbers, part.PartNumber)
+ }
+ completedPartMap[part.PartNumber] = append(completedPartMap[part.PartNumber], part.ETag)
+ }
+ sort.Ints(completedPartNumbers)
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
-
entries, _, err := s3a.list(uploadDirectory, "", "", false, maxPartsList)
- if err != nil || len(entries) == 0 {
+ if err != nil {
glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries))
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
+ return nil, s3err.ErrNoSuchUpload
+ }
+
+ if len(entries) == 0 {
+ entryName, dirName := s3a.getEntryNameAndDir(input)
+ if entry, _ := s3a.getEntry(dirName, entryName); entry != nil && entry.Extended != nil {
+ if uploadId, ok := entry.Extended[s3_constants.X_SeaweedFS_Header_Upload_Id]; ok && *input.UploadId == string(uploadId) {
+ return &CompleteMultipartUploadResult{
+ CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
+ Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))),
+ Bucket: input.Bucket,
+ ETag: aws.String("\"" + filer.ETagChunks(entry.GetChunks()) + "\""),
+ Key: objectKey(input.Key),
+ },
+ }, s3err.ErrNone
+ }
+ }
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
return nil, s3err.ErrNoSuchUpload
}
pentry, err := s3a.getEntry(s3a.genUploadsFolder(*input.Bucket), *input.UploadId)
if err != nil {
glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err)
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
return nil, s3err.ErrNoSuchUpload
}
-
- // check whether completedParts is more than received parts
- {
- partNumbers := make(map[int]struct{}, len(entries))
- for _, entry := range entries {
- if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
- partNumberString := entry.Name[:len(entry.Name)-len(".part")]
- partNumber, err := strconv.Atoi(partNumberString)
- if err == nil {
- partNumbers[partNumber] = struct{}{}
+ deleteEntries := []*filer_pb.Entry{}
+ partEntries := make(map[int][]*filer_pb.Entry, len(entries))
+ entityTooSmall := false
+ for _, entry := range entries {
+ foundEntry := false
+ glog.V(4).Infof("completeMultipartUpload part entries %s", entry.Name)
+ if entry.IsDirectory || !strings.HasSuffix(entry.Name, multipartExt) {
+ continue
+ }
+ partNumber, err := parsePartNumber(entry.Name)
+ if err != nil {
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartNumber).Inc()
+ glog.Errorf("completeMultipartUpload failed to pasre partNumber %s:%s", entry.Name, err)
+ continue
+ }
+ completedPartsByNumber, ok := completedPartMap[partNumber]
+ if !ok {
+ continue
+ }
+ for _, partETag := range completedPartsByNumber {
+ partETag = strings.Trim(partETag, `"`)
+ entryETag := hex.EncodeToString(entry.Attributes.GetMd5())
+ if partETag != "" && len(partETag) == 32 && entryETag != "" {
+ if entryETag != partETag {
+ glog.Errorf("completeMultipartUpload %s ETag mismatch chunk: %s part: %s", entry.Name, entryETag, partETag)
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedEtagMismatch).Inc()
+ continue
}
+ } else {
+ glog.Warningf("invalid complete etag %s, partEtag %s", partETag, entryETag)
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedEtagInvalid).Inc()
+ }
+ if len(entry.Chunks) == 0 {
+ glog.Warningf("completeMultipartUpload %s empty chunks", entry.Name)
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartEmpty).Inc()
+ continue
}
+ //there maybe multi same part, because of client retry
+ partEntries[partNumber] = append(partEntries[partNumber], entry)
+ foundEntry = true
}
- for _, part := range completedParts {
- if _, found := partNumbers[part.PartNumber]; !found {
- return nil, s3err.ErrInvalidPart
+ if foundEntry {
+ if len(completedPartNumbers) > 1 && partNumber != completedPartNumbers[len(completedPartNumbers)-1] &&
+ entry.Attributes.FileSize < multiPartMinSize {
+ glog.Warningf("completeMultipartUpload %s part file size less 5mb", entry.Name)
+ entityTooSmall = true
}
+ } else {
+ deleteEntries = append(deleteEntries, entry)
}
}
-
+ if entityTooSmall {
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompleteEntityTooSmall).Inc()
+ return nil, s3err.ErrEntityTooSmall
+ }
mime := pentry.Attributes.Mime
-
var finalParts []*filer_pb.FileChunk
var offset int64
-
- for _, entry := range entries {
- if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
- partETag, found := findByPartNumber(entry.Name, completedParts)
- if !found {
+ for _, partNumber := range completedPartNumbers {
+ partEntriesByNumber, ok := partEntries[partNumber]
+ if !ok {
+ glog.Errorf("part %d has no entry", partNumber)
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartNotFound).Inc()
+ return nil, s3err.ErrInvalidPart
+ }
+ found := false
+ if len(partEntriesByNumber) > 1 {
+ slices.SortFunc(partEntriesByNumber, func(a, b *filer_pb.Entry) int {
+ return cmp.Compare(b.Chunks[0].ModifiedTsNs, a.Chunks[0].ModifiedTsNs)
+ })
+ }
+ for _, entry := range partEntriesByNumber {
+ if found {
+ deleteEntries = append(deleteEntries, entry)
+ stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartEntryMismatch).Inc()
continue
}
- entryETag := hex.EncodeToString(entry.Attributes.GetMd5())
- if partETag != "" && len(partETag) == 32 && entryETag != "" && entryETag != partETag {
- glog.Errorf("completeMultipartUpload %s ETag mismatch chunk: %s part: %s", entry.Name, entryETag, partETag)
- return nil, s3err.ErrInvalidPart
- }
for _, chunk := range entry.GetChunks() {
p := &filer_pb.FileChunk{
FileId: chunk.GetFileIdString(),
@@ -137,28 +213,16 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
finalParts = append(finalParts, p)
offset += int64(chunk.Size)
}
+ found = true
}
}
- entryName := filepath.Base(*input.Key)
- dirName := filepath.ToSlash(filepath.Dir(*input.Key))
- if dirName == "." {
- dirName = ""
- }
- if strings.HasPrefix(dirName, "/") {
- dirName = dirName[1:]
- }
- dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName)
-
- // remove suffix '/'
- if strings.HasSuffix(dirName, "/") {
- dirName = dirName[:len(dirName)-1]
- }
-
+ entryName, dirName := s3a.getEntryNameAndDir(input)
err = s3a.mkFile(dirName, entryName, finalParts, func(entry *filer_pb.Entry) {
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
+ entry.Extended[s3_constants.X_SeaweedFS_Header_Upload_Id] = []byte(*input.UploadId)
for k, v := range pentry.Extended {
if k != "key" {
entry.Extended[k] = v
@@ -186,6 +250,13 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
},
}
+ for _, deleteEntry := range deleteEntries {
+ //delete unused part data
+ glog.Infof("completeMultipartUpload cleanup %s upload %s unused %s", *input.Bucket, *input.UploadId, deleteEntry.Name)
+ if err = s3a.rm(uploadDirectory, deleteEntry.Name, true, true); err != nil {
+ glog.Warningf("completeMultipartUpload cleanup %s upload %s unused %s : %v", *input.Bucket, *input.UploadId, deleteEntry.Name, err)
+ }
+ }
if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil {
glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
}
@@ -193,29 +264,33 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
return
}
-func findByPartNumber(fileName string, parts []CompletedPart) (etag string, found bool) {
- partNumber, formatErr := strconv.Atoi(fileName[:4])
- if formatErr != nil {
- return
+func (s3a *S3ApiServer) getEntryNameAndDir(input *s3.CompleteMultipartUploadInput) (string, string) {
+ entryName := filepath.Base(*input.Key)
+ dirName := filepath.ToSlash(filepath.Dir(*input.Key))
+ if dirName == "." {
+ dirName = ""
}
- x := sort.Search(len(parts), func(i int) bool {
- return parts[i].PartNumber >= partNumber
- })
- if x >= len(parts) {
- return
+ if strings.HasPrefix(dirName, "/") {
+ dirName = dirName[1:]
}
- if parts[x].PartNumber != partNumber {
- return
+ dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName)
+
+ // remove suffix '/'
+ if strings.HasSuffix(dirName, "/") {
+ dirName = dirName[:len(dirName)-1]
}
- y := 0
- for i, part := range parts[x:] {
- if part.PartNumber == partNumber {
- y = i
- } else {
- break
- }
+ return entryName, dirName
+}
+
+func parsePartNumber(fileName string) (int, error) {
+ var partNumberString string
+ index := strings.Index(fileName, "_")
+ if index != -1 {
+ partNumberString = fileName[:index]
+ } else {
+ partNumberString = fileName[:len(fileName)-len(multipartExt)]
}
- return parts[x+y].ETag, true
+ return strconv.Atoi(partNumberString)
}
func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code s3err.ErrorCode) {
@@ -331,7 +406,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
StorageClass: aws.String("STANDARD"),
}
- entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts))
+ entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d%s", *input.PartNumberMarker, multipartExt), false, uint32(*input.MaxParts))
if err != nil {
glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
return nil, s3err.ErrNoSuchUpload
@@ -343,9 +418,8 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
output.IsTruncated = aws.Bool(!isLast)
for _, entry := range entries {
- if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
- partNumberString := entry.Name[:len(entry.Name)-len(".part")]
- partNumber, err := strconv.Atoi(partNumberString)
+ if strings.HasSuffix(entry.Name, multipartExt) && !entry.IsDirectory {
+ partNumber, err := parsePartNumber(entry.Name)
if err != nil {
glog.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err)
continue
diff --git a/weed/s3api/filer_multipart_test.go b/weed/s3api/filer_multipart_test.go
index e76d903b8..7f75a40de 100644
--- a/weed/s3api/filer_multipart_test.go
+++ b/weed/s3api/filer_multipart_test.go
@@ -50,88 +50,27 @@ func TestListPartsResult(t *testing.T) {
}
-func Test_findByPartNumber(t *testing.T) {
- type args struct {
- fileName string
- parts []CompletedPart
- }
-
- parts := []CompletedPart{
- {
- ETag: "xxx",
- PartNumber: 1,
- },
- {
- ETag: "lll",
- PartNumber: 1,
- },
- {
- ETag: "yyy",
- PartNumber: 3,
- },
- {
- ETag: "zzz",
- PartNumber: 5,
- },
- }
-
+func Test_parsePartNumber(t *testing.T) {
tests := []struct {
- name string
- args args
- wantEtag string
- wantFound bool
+ name string
+ fileName string
+ partNum int
}{
{
"first",
- args{
- "0001.part",
- parts,
- },
- "lll",
- true,
+ "0001_uuid.part",
+ 1,
},
{
"second",
- args{
- "0002.part",
- parts,
- },
- "",
- false,
- },
- {
- "third",
- args{
- "0003.part",
- parts,
- },
- "yyy",
- true,
- },
- {
- "fourth",
- args{
- "0004.part",
- parts,
- },
- "",
- false,
- },
- {
- "fifth",
- args{
- "0005.part",
- parts,
- },
- "zzz",
- true,
+ "0002.part",
+ 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- gotEtag, gotFound := findByPartNumber(tt.args.fileName, tt.args.parts)
- assert.Equalf(t, tt.wantEtag, gotEtag, "findByPartNumber(%v, %v)", tt.args.fileName, tt.args.parts)
- assert.Equalf(t, tt.wantFound, gotFound, "findByPartNumber(%v, %v)", tt.args.fileName, tt.args.parts)
+ partNumber, _ := parsePartNumber(tt.fileName)
+ assert.Equalf(t, tt.partNum, partNumber, "parsePartNumber(%v)", tt.fileName)
})
}
}
diff --git a/weed/s3api/s3_constants/header.go b/weed/s3api/s3_constants/header.go
index 30a878ccb..8e4a2f8c7 100644
--- a/weed/s3api/s3_constants/header.go
+++ b/weed/s3api/s3_constants/header.go
@@ -39,6 +39,7 @@ const (
AmzTagCount = "x-amz-tagging-count"
X_SeaweedFS_Header_Directory_Key = "x-seaweedfs-is-directory-key"
+ X_SeaweedFS_Header_Upload_Id = "X-Seaweedfs-Upload-Id"
// S3 ACL headers
AmzCannedAcl = "X-Amz-Acl"
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index bb3939571..04e1e00a4 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -351,7 +351,7 @@ func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWr
}
-// DeleteBucketMetricsConfiguration Delete Bucket Lifecycle
+// DeleteBucketLifecycleHandler Delete Bucket Lifecycle
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html
func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go
index 8dc33f213..8d13fe17e 100644
--- a/weed/s3api/s3api_object_copy_handlers.go
+++ b/weed/s3api/s3api_object_copy_handlers.go
@@ -2,16 +2,17 @@ package s3api
import (
"fmt"
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
- "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
- "modernc.org/strutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
+ "modernc.org/strutil"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/seaweedfs/seaweedfs/weed/util"
)
@@ -170,8 +171,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
rangeHeader := r.Header.Get("x-amz-copy-source-range")
- dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part",
- s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(dstBucket), uploadID, partID)
+ dstUrl := s3a.genPartUploadUrl(dstBucket, uploadID, partID)
srcUrl := fmt.Sprintf("http://%s%s/%s%s",
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject))
diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go
index 187022079..6fecdcf2d 100644
--- a/weed/s3api/s3api_object_multipart_handlers.go
+++ b/weed/s3api/s3api_object_multipart_handlers.go
@@ -10,6 +10,7 @@ import (
"strconv"
"strings"
+ "github.com/google/uuid"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
@@ -247,8 +248,7 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
glog.V(2).Infof("PutObjectPartHandler %s %s %04d", bucket, uploadID, partID)
- uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part",
- s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(bucket), uploadID, partID)
+ uploadUrl := s3a.genPartUploadUrl(bucket, uploadID, partID)
if partID == 1 && r.Header.Get("Content-Type") == "" {
dataReader = mimeDetect(r, dataReader)
@@ -271,6 +271,11 @@ func (s3a *S3ApiServer) genUploadsFolder(bucket string) string {
return fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, s3_constants.MultipartUploadsFolder)
}
+func (s3a *S3ApiServer) genPartUploadUrl(bucket, uploadID string, partID int) string {
+ return fmt.Sprintf("http://%s%s/%s/%04d_%s.part",
+ s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(bucket), uploadID, partID, uuid.NewString())
+}
+
// Generate uploadID hash string from object
func (s3a *S3ApiServer) generateUploadID(object string) string {
if strings.HasPrefix(object, "/") {
diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go
index f332da856..b00e4630d 100644
--- a/weed/s3api/s3api_objects_list_handlers.go
+++ b/weed/s3api/s3api_objects_list_handlers.go
@@ -374,7 +374,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
}
if cursor.maxKeys <= 0 {
cursor.isTruncated = true
- return
+ continue
}
entry := resp.Entry
nextMarker = entry.Name