aboutsummaryrefslogtreecommitdiff
path: root/weed/s3api
diff options
context:
space:
mode:
Diffstat (limited to 'weed/s3api')
-rw-r--r--weed/s3api/s3api_object_handlers.go33
-rw-r--r--weed/s3api/s3api_server.go9
2 files changed, 12 insertions, 30 deletions
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
index dee5f60c8..cd0e82421 100644
--- a/weed/s3api/s3api_object_handlers.go
+++ b/weed/s3api/s3api_object_handlers.go
@@ -24,7 +24,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
- util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"github.com/seaweedfs/seaweedfs/weed/util/mem"
"github.com/seaweedfs/seaweedfs/weed/glog"
@@ -994,36 +993,10 @@ var volumeServerHTTPClient = &http.Client{
}
// createLookupFileIdFunction creates a reusable lookup function for resolving volume URLs
+// Uses FilerClient's vidMap cache to eliminate per-chunk gRPC overhead
func (s3a *S3ApiServer) createLookupFileIdFunction() func(context.Context, string) ([]string, error) {
- return func(ctx context.Context, fileId string) ([]string, error) {
- var urls []string
- err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
- vid := filer.VolumeId(fileId)
- resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
- VolumeIds: []string{vid},
- })
- if err != nil {
- return err
- }
- if locs, found := resp.LocationsMap[vid]; found {
- for _, loc := range locs.Locations {
- // Build complete URL with volume server address and fileId
- // The fileId parameter contains the full "volumeId,fileKey" identifier (e.g., "3,01637037d6")
- // This constructs URLs like: http://127.0.0.1:8080/3,01637037d6 (or https:// if configured)
- // NormalizeUrl ensures the proper scheme (http:// or https://) is used based on configuration
- normalizedUrl, err := util_http.NormalizeUrl(loc.Url)
- if err != nil {
- glog.Warningf("Failed to normalize URL for %s: %v", loc.Url, err)
- continue
- }
- urls = append(urls, normalizedUrl+"/"+fileId)
- }
- }
- return nil
- })
- glog.V(3).Infof("createLookupFileIdFunction: fileId=%s, resolved urls=%v", fileId, urls)
- return urls, err
- }
+ // Return the FilerClient's lookup function which uses the battle-tested vidMap cache
+ return s3a.filerClient.GetLookupFileIdFunction()
}
// streamFromVolumeServersWithSSE handles streaming with inline SSE decryption
diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go
index b9c4eb3fc..992027fda 100644
--- a/weed/s3api/s3api_server.go
+++ b/weed/s3api/s3api_server.go
@@ -19,6 +19,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/iam/sts"
"github.com/seaweedfs/seaweedfs/weed/pb/s3_pb"
"github.com/seaweedfs/seaweedfs/weed/util/grace"
+ "github.com/seaweedfs/seaweedfs/weed/wdclient"
"github.com/gorilla/mux"
"github.com/seaweedfs/seaweedfs/weed/pb"
@@ -55,6 +56,7 @@ type S3ApiServer struct {
cb *CircuitBreaker
randomClientId int32
filerGuard *security.Guard
+ filerClient *wdclient.FilerClient
client util_http_client.HTTPClientInterface
bucketRegistry *BucketRegistry
credentialManager *credential.CredentialManager
@@ -91,11 +93,18 @@ func NewS3ApiServerWithStore(router *mux.Router, option *S3ApiServerOption, expl
// Initialize bucket policy engine first
policyEngine := NewBucketPolicyEngine()
+ // Initialize FilerClient for volume location caching
+ // Uses the battle-tested vidMap with filer-based lookups
+ // S3 API typically connects to a single filer, but wrap in slice for consistency
+ filerClient := wdclient.NewFilerClient([]pb.ServerAddress{option.Filer}, option.GrpcDialOption, option.DataCenter)
+ glog.V(0).Infof("S3 API initialized FilerClient for volume location caching")
+
s3ApiServer = &S3ApiServer{
option: option,
iam: iam,
randomClientId: util.RandomInt32(),
filerGuard: security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec),
+ filerClient: filerClient,
cb: NewCircuitBreaker(option),
credentialManager: iam.credentialManager,
bucketConfigCache: NewBucketConfigCache(60 * time.Minute), // Increased TTL since cache is now event-driven