aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Lu <chrislusf@users.noreply.github.com>2025-07-13 16:21:36 -0700
committerGitHub <noreply@github.com>2025-07-13 16:21:36 -0700
commit7cb1ca13082568bfdcdab974d8cefddf650443c5 (patch)
tree573b5e15d080d37b9312cade4151da9e3fb7ddee
parent1549ee2e154ab040e211ac7b3bc361272069abef (diff)
downloadseaweedfs-7cb1ca13082568bfdcdab974d8cefddf650443c5.tar.xz
seaweedfs-7cb1ca13082568bfdcdab974d8cefddf650443c5.zip
Add policy engine (#6970)
-rw-r--r--weed/admin/dash/policies_management.go37
-rw-r--r--weed/admin/handlers/policy_handlers.go8
-rw-r--r--weed/credential/credential_store.go22
-rw-r--r--weed/credential/filer_etc/filer_etc_policy.go22
-rw-r--r--weed/credential/memory/memory_policy.go12
-rw-r--r--weed/credential/memory/memory_store.go9
-rw-r--r--weed/credential/postgres/postgres_policy.go14
-rw-r--r--weed/credential/test/policy_test.go25
-rw-r--r--weed/iamapi/iamapi_management_handlers.go62
-rw-r--r--weed/iamapi/iamapi_management_handlers_test.go50
-rw-r--r--weed/iamapi/iamapi_server.go3
-rw-r--r--weed/iamapi/iamapi_test.go3
-rw-r--r--weed/s3api/auth_credentials.go13
-rw-r--r--weed/s3api/policy_engine/GOVERNANCE_PERMISSIONS.md249
-rw-r--r--weed/s3api/policy_engine/INTEGRATION_EXAMPLE.md176
-rw-r--r--weed/s3api/policy_engine/POLICY_EXAMPLES.md54
-rw-r--r--weed/s3api/policy_engine/README_POLICY_ENGINE.md279
-rw-r--r--weed/s3api/policy_engine/conditions.go768
-rw-r--r--weed/s3api/policy_engine/engine.go432
-rw-r--r--weed/s3api/policy_engine/engine_test.go716
-rw-r--r--weed/s3api/policy_engine/examples.go463
-rw-r--r--weed/s3api/policy_engine/integration.go438
-rw-r--r--weed/s3api/policy_engine/types.go454
-rw-r--r--weed/s3api/policy_engine/wildcard_matcher.go253
-rw-r--r--weed/s3api/policy_engine/wildcard_matcher_test.go469
-rw-r--r--weed/s3api/s3_constants/extend_key.go3
-rw-r--r--weed/s3api/s3_constants/header.go1
-rw-r--r--weed/s3api/s3_constants/s3_actions.go17
-rw-r--r--weed/s3api/s3api_bucket_handlers.go19
-rw-r--r--weed/s3api/s3api_governance_permissions_test.go599
-rw-r--r--weed/s3api/s3api_object_handlers_delete.go4
-rw-r--r--weed/s3api/s3api_object_handlers_put.go2
-rw-r--r--weed/s3api/s3api_object_retention.go84
33 files changed, 5565 insertions, 195 deletions
diff --git a/weed/admin/dash/policies_management.go b/weed/admin/dash/policies_management.go
index 8853bbb54..5757520b2 100644
--- a/weed/admin/dash/policies_management.go
+++ b/weed/admin/dash/policies_management.go
@@ -7,18 +7,19 @@ import (
"github.com/seaweedfs/seaweedfs/weed/credential"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
)
type IAMPolicy struct {
- Name string `json:"name"`
- Document credential.PolicyDocument `json:"document"`
- DocumentJSON string `json:"document_json"`
- CreatedAt time.Time `json:"created_at"`
- UpdatedAt time.Time `json:"updated_at"`
+ Name string `json:"name"`
+ Document policy_engine.PolicyDocument `json:"document"`
+ DocumentJSON string `json:"document_json"`
+ CreatedAt time.Time `json:"created_at"`
+ UpdatedAt time.Time `json:"updated_at"`
}
type PoliciesCollection struct {
- Policies map[string]credential.PolicyDocument `json:"policies"`
+ Policies map[string]policy_engine.PolicyDocument `json:"policies"`
}
type PoliciesData struct {
@@ -30,14 +31,14 @@ type PoliciesData struct {
// Policy management request structures
type CreatePolicyRequest struct {
- Name string `json:"name" binding:"required"`
- Document credential.PolicyDocument `json:"document" binding:"required"`
- DocumentJSON string `json:"document_json"`
+ Name string `json:"name" binding:"required"`
+ Document policy_engine.PolicyDocument `json:"document" binding:"required"`
+ DocumentJSON string `json:"document_json"`
}
type UpdatePolicyRequest struct {
- Document credential.PolicyDocument `json:"document" binding:"required"`
- DocumentJSON string `json:"document_json"`
+ Document policy_engine.PolicyDocument `json:"document" binding:"required"`
+ DocumentJSON string `json:"document_json"`
}
// PolicyManager interface is now in the credential package
@@ -55,7 +56,7 @@ func NewCredentialStorePolicyManager(credentialManager *credential.CredentialMan
}
// GetPolicies retrieves all IAM policies via credential store
-func (cspm *CredentialStorePolicyManager) GetPolicies(ctx context.Context) (map[string]credential.PolicyDocument, error) {
+func (cspm *CredentialStorePolicyManager) GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error) {
// Get policies from credential store
// We'll use the credential store to access the filer indirectly
// Since policies are stored separately, we need to access the underlying store
@@ -75,12 +76,12 @@ func (cspm *CredentialStorePolicyManager) GetPolicies(ctx context.Context) (map[
} else {
// Fallback: use empty policies for stores that don't support policies
glog.V(1).Infof("Credential store doesn't support policy management, returning empty policies")
- return make(map[string]credential.PolicyDocument), nil
+ return make(map[string]policy_engine.PolicyDocument), nil
}
}
// CreatePolicy creates a new IAM policy via credential store
-func (cspm *CredentialStorePolicyManager) CreatePolicy(ctx context.Context, name string, document credential.PolicyDocument) error {
+func (cspm *CredentialStorePolicyManager) CreatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error {
store := cspm.credentialManager.GetStore()
if policyStore, ok := store.(credential.PolicyManager); ok {
@@ -91,7 +92,7 @@ func (cspm *CredentialStorePolicyManager) CreatePolicy(ctx context.Context, name
}
// UpdatePolicy updates an existing IAM policy via credential store
-func (cspm *CredentialStorePolicyManager) UpdatePolicy(ctx context.Context, name string, document credential.PolicyDocument) error {
+func (cspm *CredentialStorePolicyManager) UpdatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error {
store := cspm.credentialManager.GetStore()
if policyStore, ok := store.(credential.PolicyManager); ok {
@@ -113,7 +114,7 @@ func (cspm *CredentialStorePolicyManager) DeletePolicy(ctx context.Context, name
}
// GetPolicy retrieves a specific IAM policy via credential store
-func (cspm *CredentialStorePolicyManager) GetPolicy(ctx context.Context, name string) (*credential.PolicyDocument, error) {
+func (cspm *CredentialStorePolicyManager) GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error) {
store := cspm.credentialManager.GetStore()
if policyStore, ok := store.(credential.PolicyManager); ok {
@@ -163,7 +164,7 @@ func (s *AdminServer) GetPolicies() ([]IAMPolicy, error) {
}
// CreatePolicy creates a new IAM policy
-func (s *AdminServer) CreatePolicy(name string, document credential.PolicyDocument) error {
+func (s *AdminServer) CreatePolicy(name string, document policy_engine.PolicyDocument) error {
policyManager := s.GetPolicyManager()
if policyManager == nil {
return fmt.Errorf("policy manager not available")
@@ -174,7 +175,7 @@ func (s *AdminServer) CreatePolicy(name string, document credential.PolicyDocume
}
// UpdatePolicy updates an existing IAM policy
-func (s *AdminServer) UpdatePolicy(name string, document credential.PolicyDocument) error {
+func (s *AdminServer) UpdatePolicy(name string, document policy_engine.PolicyDocument) error {
policyManager := s.GetPolicyManager()
if policyManager == nil {
return fmt.Errorf("policy manager not available")
diff --git a/weed/admin/handlers/policy_handlers.go b/weed/admin/handlers/policy_handlers.go
index 8f5cc91b1..c9850b219 100644
--- a/weed/admin/handlers/policy_handlers.go
+++ b/weed/admin/handlers/policy_handlers.go
@@ -9,8 +9,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
"github.com/seaweedfs/seaweedfs/weed/admin/view/app"
"github.com/seaweedfs/seaweedfs/weed/admin/view/layout"
- "github.com/seaweedfs/seaweedfs/weed/credential"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
)
// PolicyHandlers contains all the HTTP handlers for policy management
@@ -190,7 +190,7 @@ func (h *PolicyHandlers) DeletePolicy(c *gin.Context) {
// ValidatePolicy validates a policy document without saving it
func (h *PolicyHandlers) ValidatePolicy(c *gin.Context) {
var req struct {
- Document credential.PolicyDocument `json:"document" binding:"required"`
+ Document policy_engine.PolicyDocument `json:"document" binding:"required"`
}
if err := c.ShouldBindJSON(&req); err != nil {
@@ -218,14 +218,14 @@ func (h *PolicyHandlers) ValidatePolicy(c *gin.Context) {
return
}
- if len(statement.Action) == 0 {
+ if len(statement.Action.Strings()) == 0 {
c.JSON(http.StatusBadRequest, gin.H{
"error": fmt.Sprintf("Statement %d: Action is required", i+1),
})
return
}
- if len(statement.Resource) == 0 {
+ if len(statement.Resource.Strings()) == 0 {
c.JSON(http.StatusBadRequest, gin.H{
"error": fmt.Sprintf("Statement %d: Resource is required", i+1),
})
diff --git a/weed/credential/credential_store.go b/weed/credential/credential_store.go
index 6fe5a5da1..9bcb69260 100644
--- a/weed/credential/credential_store.go
+++ b/weed/credential/credential_store.go
@@ -6,6 +6,7 @@ import (
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
"github.com/seaweedfs/seaweedfs/weed/util"
)
@@ -86,26 +87,13 @@ type UserCredentials struct {
UpdatedAt time.Time `json:"updatedAt"`
}
-// PolicyStatement represents a single policy statement in an IAM policy
-type PolicyStatement struct {
- Effect string `json:"Effect"`
- Action []string `json:"Action"`
- Resource []string `json:"Resource"`
-}
-
-// PolicyDocument represents an IAM policy document
-type PolicyDocument struct {
- Version string `json:"Version"`
- Statement []*PolicyStatement `json:"Statement"`
-}
-
// PolicyManager interface for managing IAM policies
type PolicyManager interface {
- GetPolicies(ctx context.Context) (map[string]PolicyDocument, error)
- CreatePolicy(ctx context.Context, name string, document PolicyDocument) error
- UpdatePolicy(ctx context.Context, name string, document PolicyDocument) error
+ GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error)
+ CreatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error
+ UpdatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error
DeletePolicy(ctx context.Context, name string) error
- GetPolicy(ctx context.Context, name string) (*PolicyDocument, error)
+ GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error)
}
// Stores holds all available credential store implementations
diff --git a/weed/credential/filer_etc/filer_etc_policy.go b/weed/credential/filer_etc/filer_etc_policy.go
index fdd3156ff..8b4647cb1 100644
--- a/weed/credential/filer_etc/filer_etc_policy.go
+++ b/weed/credential/filer_etc/filer_etc_policy.go
@@ -5,20 +5,20 @@ import (
"context"
"encoding/json"
- "github.com/seaweedfs/seaweedfs/weed/credential"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
)
type PoliciesCollection struct {
- Policies map[string]credential.PolicyDocument `json:"policies"`
+ Policies map[string]policy_engine.PolicyDocument `json:"policies"`
}
// GetPolicies retrieves all IAM policies from the filer
-func (store *FilerEtcStore) GetPolicies(ctx context.Context) (map[string]credential.PolicyDocument, error) {
+func (store *FilerEtcStore) GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error) {
policiesCollection := &PoliciesCollection{
- Policies: make(map[string]credential.PolicyDocument),
+ Policies: make(map[string]policy_engine.PolicyDocument),
}
// Check if filer client is configured
@@ -53,28 +53,28 @@ func (store *FilerEtcStore) GetPolicies(ctx context.Context) (map[string]credent
}
// CreatePolicy creates a new IAM policy in the filer
-func (store *FilerEtcStore) CreatePolicy(ctx context.Context, name string, document credential.PolicyDocument) error {
- return store.updatePolicies(ctx, func(policies map[string]credential.PolicyDocument) {
+func (store *FilerEtcStore) CreatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error {
+ return store.updatePolicies(ctx, func(policies map[string]policy_engine.PolicyDocument) {
policies[name] = document
})
}
// UpdatePolicy updates an existing IAM policy in the filer
-func (store *FilerEtcStore) UpdatePolicy(ctx context.Context, name string, document credential.PolicyDocument) error {
- return store.updatePolicies(ctx, func(policies map[string]credential.PolicyDocument) {
+func (store *FilerEtcStore) UpdatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error {
+ return store.updatePolicies(ctx, func(policies map[string]policy_engine.PolicyDocument) {
policies[name] = document
})
}
// DeletePolicy deletes an IAM policy from the filer
func (store *FilerEtcStore) DeletePolicy(ctx context.Context, name string) error {
- return store.updatePolicies(ctx, func(policies map[string]credential.PolicyDocument) {
+ return store.updatePolicies(ctx, func(policies map[string]policy_engine.PolicyDocument) {
delete(policies, name)
})
}
// updatePolicies is a helper method to update policies atomically
-func (store *FilerEtcStore) updatePolicies(ctx context.Context, updateFunc func(map[string]credential.PolicyDocument)) error {
+func (store *FilerEtcStore) updatePolicies(ctx context.Context, updateFunc func(map[string]policy_engine.PolicyDocument)) error {
// Load existing policies
policies, err := store.GetPolicies(ctx)
if err != nil {
@@ -100,7 +100,7 @@ func (store *FilerEtcStore) updatePolicies(ctx context.Context, updateFunc func(
}
// GetPolicy retrieves a specific IAM policy by name from the filer
-func (store *FilerEtcStore) GetPolicy(ctx context.Context, name string) (*credential.PolicyDocument, error) {
+func (store *FilerEtcStore) GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error) {
policies, err := store.GetPolicies(ctx)
if err != nil {
return nil, err
diff --git a/weed/credential/memory/memory_policy.go b/weed/credential/memory/memory_policy.go
index 1c9268958..8a4700467 100644
--- a/weed/credential/memory/memory_policy.go
+++ b/weed/credential/memory/memory_policy.go
@@ -4,11 +4,11 @@ import (
"context"
"fmt"
- "github.com/seaweedfs/seaweedfs/weed/credential"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
)
// GetPolicies retrieves all IAM policies from memory
-func (store *MemoryStore) GetPolicies(ctx context.Context) (map[string]credential.PolicyDocument, error) {
+func (store *MemoryStore) GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error) {
store.mu.RLock()
defer store.mu.RUnlock()
@@ -17,7 +17,7 @@ func (store *MemoryStore) GetPolicies(ctx context.Context) (map[string]credentia
}
// Create a copy of the policies map to avoid mutation issues
- policies := make(map[string]credential.PolicyDocument)
+ policies := make(map[string]policy_engine.PolicyDocument)
for name, doc := range store.policies {
policies[name] = doc
}
@@ -26,7 +26,7 @@ func (store *MemoryStore) GetPolicies(ctx context.Context) (map[string]credentia
}
// GetPolicy retrieves a specific IAM policy by name from memory
-func (store *MemoryStore) GetPolicy(ctx context.Context, name string) (*credential.PolicyDocument, error) {
+func (store *MemoryStore) GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error) {
store.mu.RLock()
defer store.mu.RUnlock()
@@ -38,7 +38,7 @@ func (store *MemoryStore) GetPolicy(ctx context.Context, name string) (*credenti
}
// CreatePolicy creates a new IAM policy in memory
-func (store *MemoryStore) CreatePolicy(ctx context.Context, name string, document credential.PolicyDocument) error {
+func (store *MemoryStore) CreatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error {
store.mu.Lock()
defer store.mu.Unlock()
@@ -51,7 +51,7 @@ func (store *MemoryStore) CreatePolicy(ctx context.Context, name string, documen
}
// UpdatePolicy updates an existing IAM policy in memory
-func (store *MemoryStore) UpdatePolicy(ctx context.Context, name string, document credential.PolicyDocument) error {
+func (store *MemoryStore) UpdatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error {
store.mu.Lock()
defer store.mu.Unlock()
diff --git a/weed/credential/memory/memory_store.go b/weed/credential/memory/memory_store.go
index f0f383c04..acd05a456 100644
--- a/weed/credential/memory/memory_store.go
+++ b/weed/credential/memory/memory_store.go
@@ -5,6 +5,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/credential"
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
"github.com/seaweedfs/seaweedfs/weed/util"
)
@@ -16,9 +17,9 @@ func init() {
// This is primarily intended for testing purposes
type MemoryStore struct {
mu sync.RWMutex
- users map[string]*iam_pb.Identity // username -> identity
- accessKeys map[string]string // access_key -> username
- policies map[string]credential.PolicyDocument // policy_name -> policy_document
+ users map[string]*iam_pb.Identity // username -> identity
+ accessKeys map[string]string // access_key -> username
+ policies map[string]policy_engine.PolicyDocument // policy_name -> policy_document
initialized bool
}
@@ -36,7 +37,7 @@ func (store *MemoryStore) Initialize(configuration util.Configuration, prefix st
store.users = make(map[string]*iam_pb.Identity)
store.accessKeys = make(map[string]string)
- store.policies = make(map[string]credential.PolicyDocument)
+ store.policies = make(map[string]policy_engine.PolicyDocument)
store.initialized = true
return nil
diff --git a/weed/credential/postgres/postgres_policy.go b/weed/credential/postgres/postgres_policy.go
index 8be2b108c..4e50e0771 100644
--- a/weed/credential/postgres/postgres_policy.go
+++ b/weed/credential/postgres/postgres_policy.go
@@ -5,16 +5,16 @@ import (
"encoding/json"
"fmt"
- "github.com/seaweedfs/seaweedfs/weed/credential"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
)
// GetPolicies retrieves all IAM policies from PostgreSQL
-func (store *PostgresStore) GetPolicies(ctx context.Context) (map[string]credential.PolicyDocument, error) {
+func (store *PostgresStore) GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error) {
if !store.configured {
return nil, fmt.Errorf("store not configured")
}
- policies := make(map[string]credential.PolicyDocument)
+ policies := make(map[string]policy_engine.PolicyDocument)
rows, err := store.db.QueryContext(ctx, "SELECT name, document FROM policies")
if err != nil {
@@ -30,7 +30,7 @@ func (store *PostgresStore) GetPolicies(ctx context.Context) (map[string]credent
return nil, fmt.Errorf("failed to scan policy row: %v", err)
}
- var document credential.PolicyDocument
+ var document policy_engine.PolicyDocument
if err := json.Unmarshal(documentJSON, &document); err != nil {
return nil, fmt.Errorf("failed to unmarshal policy document for %s: %v", name, err)
}
@@ -42,7 +42,7 @@ func (store *PostgresStore) GetPolicies(ctx context.Context) (map[string]credent
}
// CreatePolicy creates a new IAM policy in PostgreSQL
-func (store *PostgresStore) CreatePolicy(ctx context.Context, name string, document credential.PolicyDocument) error {
+func (store *PostgresStore) CreatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error {
if !store.configured {
return fmt.Errorf("store not configured")
}
@@ -63,7 +63,7 @@ func (store *PostgresStore) CreatePolicy(ctx context.Context, name string, docum
}
// UpdatePolicy updates an existing IAM policy in PostgreSQL
-func (store *PostgresStore) UpdatePolicy(ctx context.Context, name string, document credential.PolicyDocument) error {
+func (store *PostgresStore) UpdatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error {
if !store.configured {
return fmt.Errorf("store not configured")
}
@@ -116,7 +116,7 @@ func (store *PostgresStore) DeletePolicy(ctx context.Context, name string) error
}
// GetPolicy retrieves a specific IAM policy by name from PostgreSQL
-func (store *PostgresStore) GetPolicy(ctx context.Context, name string) (*credential.PolicyDocument, error) {
+func (store *PostgresStore) GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error) {
policies, err := store.GetPolicies(ctx)
if err != nil {
return nil, err
diff --git a/weed/credential/test/policy_test.go b/weed/credential/test/policy_test.go
index 341a05003..28fa2c619 100644
--- a/weed/credential/test/policy_test.go
+++ b/weed/credential/test/policy_test.go
@@ -6,6 +6,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/credential"
"github.com/seaweedfs/seaweedfs/weed/credential/memory"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
// Import all store implementations to register them
_ "github.com/seaweedfs/seaweedfs/weed/credential/filer_etc"
@@ -46,13 +47,13 @@ func testPolicyOperations(t *testing.T, ctx context.Context, credentialManager *
}
// Test CreatePolicy
- testPolicy := credential.PolicyDocument{
+ testPolicy := policy_engine.PolicyDocument{
Version: "2012-10-17",
- Statement: []*credential.PolicyStatement{
+ Statement: []policy_engine.PolicyStatement{
{
- Effect: "Allow",
- Action: []string{"s3:GetObject"},
- Resource: []string{"arn:aws:s3:::test-bucket/*"},
+ Effect: policy_engine.PolicyEffectAllow,
+ Action: policy_engine.NewStringOrStringSlice("s3:GetObject"),
+ Resource: policy_engine.NewStringOrStringSlice("arn:aws:s3:::test-bucket/*"),
},
},
}
@@ -84,13 +85,13 @@ func testPolicyOperations(t *testing.T, ctx context.Context, credentialManager *
}
// Test UpdatePolicy
- updatedPolicy := credential.PolicyDocument{
+ updatedPolicy := policy_engine.PolicyDocument{
Version: "2012-10-17",
- Statement: []*credential.PolicyStatement{
+ Statement: []policy_engine.PolicyStatement{
{
- Effect: "Allow",
- Action: []string{"s3:GetObject", "s3:PutObject"},
- Resource: []string{"arn:aws:s3:::test-bucket/*"},
+ Effect: policy_engine.PolicyEffectAllow,
+ Action: policy_engine.NewStringOrStringSlice("s3:GetObject", "s3:PutObject"),
+ Resource: policy_engine.NewStringOrStringSlice("arn:aws:s3:::test-bucket/*"),
},
},
}
@@ -113,8 +114,8 @@ func testPolicyOperations(t *testing.T, ctx context.Context, credentialManager *
if len(updatedPolicyResult.Statement) != 1 {
t.Errorf("Expected 1 statement after update, got %d", len(updatedPolicyResult.Statement))
}
- if len(updatedPolicyResult.Statement[0].Action) != 2 {
- t.Errorf("Expected 2 actions after update, got %d", len(updatedPolicyResult.Statement[0].Action))
+ if len(updatedPolicyResult.Statement[0].Action.Strings()) != 2 {
+ t.Errorf("Expected 2 actions after update, got %d", len(updatedPolicyResult.Statement[0].Action.Strings()))
}
// Test DeletePolicy
diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go
index 094ca2332..573d6dabc 100644
--- a/weed/iamapi/iamapi_management_handlers.go
+++ b/weed/iamapi/iamapi_management_handlers.go
@@ -16,6 +16,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
@@ -39,7 +40,7 @@ const (
var (
seededRand *rand.Rand = rand.New(
rand.NewSource(time.Now().UnixNano()))
- policyDocuments = map[string]*PolicyDocument{}
+ policyDocuments = map[string]*policy_engine.PolicyDocument{}
policyLock = sync.RWMutex{}
)
@@ -93,24 +94,8 @@ const (
USER_DOES_NOT_EXIST = "the user with name %s cannot be found."
)
-type Statement struct {
- Effect string `json:"Effect"`
- Action []string `json:"Action"`
- Resource []string `json:"Resource"`
-}
-
type Policies struct {
- Policies map[string]PolicyDocument `json:"policies"`
-}
-
-type PolicyDocument struct {
- Version string `json:"Version"`
- Statement []*Statement `json:"Statement"`
-}
-
-func (p PolicyDocument) String() string {
- b, _ := json.Marshal(p)
- return string(b)
+ Policies map[string]policy_engine.PolicyDocument `json:"policies"`
}
func Hash(s *string) string {
@@ -193,11 +178,12 @@ func (iama *IamApiServer) UpdateUser(s3cfg *iam_pb.S3ApiConfiguration, values ur
return resp, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: fmt.Errorf(USER_DOES_NOT_EXIST, userName)}
}
-func GetPolicyDocument(policy *string) (policyDocument PolicyDocument, err error) {
- if err = json.Unmarshal([]byte(*policy), &policyDocument); err != nil {
- return PolicyDocument{}, err
+func GetPolicyDocument(policy *string) (policy_engine.PolicyDocument, error) {
+ var policyDocument policy_engine.PolicyDocument
+ if err := json.Unmarshal([]byte(*policy), &policyDocument); err != nil {
+ return policy_engine.PolicyDocument{}, err
}
- return policyDocument, err
+ return policyDocument, nil
}
func (iama *IamApiServer) CreatePolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreatePolicyResponse, iamError *IamError) {
@@ -270,7 +256,7 @@ func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values
return resp, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: errors.New("no actions found")}
}
- policyDocument := PolicyDocument{Version: policyDocumentVersion}
+ policyDocument := policy_engine.PolicyDocument{Version: policyDocumentVersion}
statements := make(map[string][]string)
for _, action := range ident.Actions {
// parse "Read:EXAMPLE-BUCKET"
@@ -287,9 +273,9 @@ func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values
for resource, actions := range statements {
isEqAction := false
for i, statement := range policyDocument.Statement {
- if reflect.DeepEqual(statement.Action, actions) {
- policyDocument.Statement[i].Resource = append(
- policyDocument.Statement[i].Resource, resource)
+ if reflect.DeepEqual(statement.Action.Strings(), actions) {
+ policyDocument.Statement[i].Resource = policy_engine.NewStringOrStringSlice(append(
+ policyDocument.Statement[i].Resource.Strings(), resource)...)
isEqAction = true
break
}
@@ -297,14 +283,18 @@ func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values
if isEqAction {
continue
}
- policyDocumentStatement := Statement{
- Effect: "Allow",
- Action: actions,
+ policyDocumentStatement := policy_engine.PolicyStatement{
+ Effect: policy_engine.PolicyEffectAllow,
+ Action: policy_engine.NewStringOrStringSlice(actions...),
+ Resource: policy_engine.NewStringOrStringSlice(resource),
}
- policyDocumentStatement.Resource = append(policyDocumentStatement.Resource, resource)
- policyDocument.Statement = append(policyDocument.Statement, &policyDocumentStatement)
+ policyDocument.Statement = append(policyDocument.Statement, policyDocumentStatement)
+ }
+ policyDocumentJSON, err := json.Marshal(policyDocument)
+ if err != nil {
+ return resp, &IamError{Code: iam.ErrCodeServiceFailureException, Error: err}
}
- resp.GetUserPolicyResult.PolicyDocument = policyDocument.String()
+ resp.GetUserPolicyResult.PolicyDocument = string(policyDocumentJSON)
return resp, nil
}
return resp, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: fmt.Errorf(USER_DOES_NOT_EXIST, userName)}
@@ -321,21 +311,21 @@ func (iama *IamApiServer) DeleteUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, val
return resp, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: fmt.Errorf(USER_DOES_NOT_EXIST, userName)}
}
-func GetActions(policy *PolicyDocument) ([]string, error) {
+func GetActions(policy *policy_engine.PolicyDocument) ([]string, error) {
var actions []string
for _, statement := range policy.Statement {
- if statement.Effect != "Allow" {
+ if statement.Effect != policy_engine.PolicyEffectAllow {
return nil, fmt.Errorf("not a valid effect: '%s'. Only 'Allow' is possible", statement.Effect)
}
- for _, resource := range statement.Resource {
+ for _, resource := range statement.Resource.Strings() {
// Parse "arn:aws:s3:::my-bucket/shared/*"
res := strings.Split(resource, ":")
if len(res) != 6 || res[0] != "arn" || res[1] != "aws" || res[2] != "s3" {
glog.Infof("not a valid resource: %s", res)
continue
}
- for _, action := range statement.Action {
+ for _, action := range statement.Action.Strings() {
// Parse "s3:Get*"
act := strings.Split(action, ":")
if len(act) != 2 || act[0] != "s3" {
diff --git a/weed/iamapi/iamapi_management_handlers_test.go b/weed/iamapi/iamapi_management_handlers_test.go
index eac82caa7..5bc8eff67 100644
--- a/weed/iamapi/iamapi_management_handlers_test.go
+++ b/weed/iamapi/iamapi_management_handlers_test.go
@@ -3,28 +3,19 @@ package iamapi
import (
"testing"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
"github.com/stretchr/testify/assert"
)
func TestGetActionsUserPath(t *testing.T) {
- policyDocument := PolicyDocument{
+ policyDocument := policy_engine.PolicyDocument{
Version: "2012-10-17",
- Statement: []*Statement{
+ Statement: []policy_engine.PolicyStatement{
{
- Effect: "Allow",
- Action: []string{
- "s3:Put*",
- "s3:PutBucketAcl",
- "s3:Get*",
- "s3:GetBucketAcl",
- "s3:List*",
- "s3:Tagging*",
- "s3:DeleteBucket*",
- },
- Resource: []string{
- "arn:aws:s3:::shared/user-Alice/*",
- },
+ Effect: policy_engine.PolicyEffectAllow,
+ Action: policy_engine.NewStringOrStringSlice("s3:Put*", "s3:PutBucketAcl", "s3:Get*", "s3:GetBucketAcl", "s3:List*", "s3:Tagging*", "s3:DeleteBucket*"),
+ Resource: policy_engine.NewStringOrStringSlice("arn:aws:s3:::shared/user-Alice/*"),
},
},
}
@@ -45,18 +36,13 @@ func TestGetActionsUserPath(t *testing.T) {
func TestGetActionsWildcardPath(t *testing.T) {
- policyDocument := PolicyDocument{
+ policyDocument := policy_engine.PolicyDocument{
Version: "2012-10-17",
- Statement: []*Statement{
+ Statement: []policy_engine.PolicyStatement{
{
- Effect: "Allow",
- Action: []string{
- "s3:Get*",
- "s3:PutBucketAcl",
- },
- Resource: []string{
- "arn:aws:s3:::*",
- },
+ Effect: policy_engine.PolicyEffectAllow,
+ Action: policy_engine.NewStringOrStringSlice("s3:Get*", "s3:PutBucketAcl"),
+ Resource: policy_engine.NewStringOrStringSlice("arn:aws:s3:::*"),
},
},
}
@@ -71,17 +57,13 @@ func TestGetActionsWildcardPath(t *testing.T) {
}
func TestGetActionsInvalidAction(t *testing.T) {
- policyDocument := PolicyDocument{
+ policyDocument := policy_engine.PolicyDocument{
Version: "2012-10-17",
- Statement: []*Statement{
+ Statement: []policy_engine.PolicyStatement{
{
- Effect: "Allow",
- Action: []string{
- "s3:InvalidAction",
- },
- Resource: []string{
- "arn:aws:s3:::shared/user-Alice/*",
- },
+ Effect: policy_engine.PolicyEffectAllow,
+ Action: policy_engine.NewStringOrStringSlice("s3:InvalidAction"),
+ Resource: policy_engine.NewStringOrStringSlice("arn:aws:s3:::shared/user-Alice/*"),
},
},
}
diff --git a/weed/iamapi/iamapi_server.go b/weed/iamapi/iamapi_server.go
index 763761b94..e77d23e53 100644
--- a/weed/iamapi/iamapi_server.go
+++ b/weed/iamapi/iamapi_server.go
@@ -16,6 +16,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
. "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/seaweedfs/seaweedfs/weed/util"
@@ -160,7 +161,7 @@ func (iama *IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) {
return err
}
if err == filer_pb.ErrNotFound || buf.Len() == 0 {
- policies.Policies = make(map[string]PolicyDocument)
+ policies.Policies = make(map[string]policy_engine.PolicyDocument)
return nil
}
if err := json.Unmarshal(buf.Bytes(), policies); err != nil {
diff --git a/weed/iamapi/iamapi_test.go b/weed/iamapi/iamapi_test.go
index 067209eb5..94c48aa7f 100644
--- a/weed/iamapi/iamapi_test.go
+++ b/weed/iamapi/iamapi_test.go
@@ -14,6 +14,7 @@ import (
"github.com/gorilla/mux"
"github.com/jinzhu/copier"
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
"github.com/stretchr/testify/assert"
)
@@ -23,7 +24,7 @@ var GetPolicies func(policies *Policies) (err error)
var PutPolicies func(policies *Policies) (err error)
var s3config = iam_pb.S3ApiConfiguration{}
-var policiesFile = Policies{Policies: make(map[string]PolicyDocument)}
+var policiesFile = Policies{Policies: make(map[string]policy_engine.PolicyDocument)}
var ias = IamApiServer{s3ApiConfig: iamS3ApiConfigureMock{}}
type iamS3ApiConfigureMock struct{}
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
index e4e5fda83..7c731539f 100644
--- a/weed/s3api/auth_credentials.go
+++ b/weed/s3api/auth_credentials.go
@@ -5,6 +5,7 @@ import (
"fmt"
"net/http"
"os"
+ "slices"
"strings"
"sync"
@@ -345,11 +346,6 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt
if errCode == s3err.ErrNone {
if identity != nil && identity.Name != "" {
r.Header.Set(s3_constants.AmzIdentityId, identity.Name)
- if identity.isAdmin() {
- r.Header.Set(s3_constants.AmzIsAdmin, "true")
- } else if _, ok := r.Header[s3_constants.AmzIsAdmin]; ok {
- r.Header.Del(s3_constants.AmzIsAdmin)
- }
}
f(w, r)
return
@@ -526,12 +522,7 @@ func (identity *Identity) canDo(action Action, bucket string, objectKey string)
}
func (identity *Identity) isAdmin() bool {
- for _, a := range identity.Actions {
- if a == "Admin" {
- return true
- }
- }
- return false
+ return slices.Contains(identity.Actions, s3_constants.ACTION_ADMIN)
}
// GetCredentialManager returns the credential manager instance
diff --git a/weed/s3api/policy_engine/GOVERNANCE_PERMISSIONS.md b/weed/s3api/policy_engine/GOVERNANCE_PERMISSIONS.md
new file mode 100644
index 000000000..39e7d9dcd
--- /dev/null
+++ b/weed/s3api/policy_engine/GOVERNANCE_PERMISSIONS.md
@@ -0,0 +1,249 @@
+# Governance Permission Implementation
+
+This document explains the implementation of `s3:BypassGovernanceRetention` permission in SeaweedFS, providing AWS S3-compatible governance retention bypass functionality.
+
+## Overview
+
+The governance permission system enables proper AWS S3-compatible object retention with governance mode bypass capabilities. This implementation ensures that only users with the appropriate permissions can bypass governance retention, while maintaining security and compliance requirements.
+
+## Features
+
+### 1. Permission-Based Bypass Control
+
+- **s3:BypassGovernanceRetention**: New permission that allows users to bypass governance retention
+- **Admin Override**: Admin users can always bypass governance retention
+- **Header Detection**: Automatic detection of `x-amz-bypass-governance-retention` header
+- **Permission Validation**: Validates user permissions before allowing bypass
+
+### 2. Retention Mode Support
+
+- **GOVERNANCE Mode**: Can be bypassed with proper permission and header
+- **COMPLIANCE Mode**: Cannot be bypassed (highest security level)
+- **Legal Hold**: Always blocks operations regardless of permissions
+
+### 3. Integration Points
+
+- **DELETE Operations**: Checks governance permissions before object deletion
+- **PUT Operations**: Validates permissions before object overwrite
+- **Retention Modification**: Ensures proper permissions for retention changes
+
+## Implementation Details
+
+### Core Components
+
+1. **Permission Checker**
+ ```go
+ func (s3a *S3ApiServer) checkGovernanceBypassPermission(r *http.Request, bucket, object string) bool
+ ```
+ - Checks if user has `s3:BypassGovernanceRetention` permission
+ - Validates admin status
+ - Integrates with existing IAM system
+
+2. **Object Lock Permission Validation**
+ ```go
+ func (s3a *S3ApiServer) checkObjectLockPermissions(r *http.Request, bucket, object, versionId string, bypassGovernance bool) error
+ ```
+ - Validates governance bypass permissions
+ - Checks retention mode (GOVERNANCE vs COMPLIANCE)
+ - Enforces legal hold restrictions
+
+3. **IAM Integration**
+ - Added `ACTION_BYPASS_GOVERNANCE_RETENTION` constant
+ - Updated policy engine with `s3:BypassGovernanceRetention` action
+ - Integrated with existing identity-based access control
+
+### Permission Flow
+
+```
+Request with x-amz-bypass-governance-retention: true
+ ↓
+Check if object is under retention
+ ↓
+If GOVERNANCE mode:
+ ↓
+Check if user has s3:BypassGovernanceRetention permission
+ ↓
+If permission granted: Allow operation
+If permission denied: Deny operation
+ ↓
+If COMPLIANCE mode: Always deny
+```
+
+## Configuration
+
+### 1. Identity-Based Configuration
+
+Add governance bypass permission to user actions in `identities.json`:
+
+```json
+{
+ "identities": [
+ {
+ "name": "governance-admin",
+ "credentials": [{"accessKey": "admin123", "secretKey": "secret123"}],
+ "actions": [
+ "Read:my-bucket/*",
+ "Write:my-bucket/*",
+ "BypassGovernanceRetention:my-bucket/*"
+ ]
+ }
+ ]
+}
+```
+
+### 2. Bucket Policy Configuration
+
+Grant governance bypass permission via bucket policies:
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "s3:BypassGovernanceRetention",
+ "Resource": "arn:aws:s3:::bucket/*"
+ }
+ ]
+}
+```
+
+**Note**: The policy version should use the standard AWS policy version `PolicyVersion2012_10_17` constant (which equals `"2012-10-17"`).
+
+## Usage Examples
+
+### 1. Delete Object with Governance Bypass
+
+```bash
+# User with bypass permission
+aws s3api delete-object \
+ --bucket my-bucket \
+ --key my-object \
+ --bypass-governance-retention
+
+# Admin user (always allowed)
+aws s3api delete-object \
+ --bucket my-bucket \
+ --key my-object \
+ --bypass-governance-retention
+```
+
+### 2. Update Object Retention
+
+```bash
+# Extend retention period (requires bypass permission for governance mode)
+aws s3api put-object-retention \
+ --bucket my-bucket \
+ --key my-object \
+ --retention Mode=GOVERNANCE,RetainUntilDate=2025-01-01T00:00:00Z \
+ --bypass-governance-retention
+```
+
+### 3. Bulk Object Deletion
+
+```bash
+# Delete multiple objects with governance bypass
+aws s3api delete-objects \
+ --bucket my-bucket \
+ --delete file://delete-objects.json \
+ --bypass-governance-retention
+```
+
+## Error Handling
+
+### Permission Errors
+
+- **ErrAccessDenied**: User lacks `s3:BypassGovernanceRetention` permission
+- **ErrGovernanceModeActive**: Governance mode protection without bypass
+- **ErrComplianceModeActive**: Compliance mode cannot be bypassed
+
+### Example Error Response
+
+```xml
+<?xml version="1.0" encoding="UTF-8"?>
+<Error>
+ <Code>AccessDenied</Code>
+ <Message>User does not have permission to bypass governance retention</Message>
+ <RequestId>abc123</RequestId>
+ <Resource>/my-bucket/my-object</Resource>
+</Error>
+```
+
+## Security Considerations
+
+### 1. Least Privilege Principle
+
+- Grant bypass permission only to users who absolutely need it
+- Use bucket-specific permissions rather than global permissions
+- Regularly audit users with bypass permissions
+
+### 2. Compliance Mode Protection
+
+- COMPLIANCE mode objects cannot be bypassed by any user
+- Use COMPLIANCE mode for regulatory requirements
+- GOVERNANCE mode provides flexibility while maintaining audit trails
+
+### 3. Admin Privileges
+
+- Admin users can always bypass governance retention
+- Ensure admin access is properly secured
+- Use admin privileges responsibly
+
+## Testing
+
+### Unit Tests
+
+```bash
+# Run governance permission tests
+go test -v ./weed/s3api/ -run TestGovernance
+
+# Run all object retention tests
+go test -v ./weed/s3api/ -run TestObjectRetention
+```
+
+### Integration Tests
+
+```bash
+# Test with real S3 clients
+cd test/s3/retention
+go test -v ./... -run TestGovernanceBypass
+```
+
+## AWS Compatibility
+
+This implementation provides full AWS S3 compatibility for:
+
+- ✅ `x-amz-bypass-governance-retention` header support
+- ✅ `s3:BypassGovernanceRetention` permission
+- ✅ GOVERNANCE vs COMPLIANCE mode behavior
+- ✅ Legal hold enforcement
+- ✅ Error responses and codes
+- ✅ Bucket policy integration
+- ✅ IAM policy integration
+
+## Troubleshooting
+
+### Common Issues
+
+1. **User cannot bypass governance retention**
+ - Check if user has `s3:BypassGovernanceRetention` permission
+ - Verify the header `x-amz-bypass-governance-retention: true` is set
+ - Ensure object is in GOVERNANCE mode (not COMPLIANCE)
+
+2. **Admin bypass not working**
+ - Verify user has admin privileges in the IAM system
+ - Check that object is not under legal hold
+ - Ensure versioning is enabled on the bucket
+
+3. **Policy not taking effect**
+ - Verify bucket policy JSON syntax
+ - Check resource ARN format
+ - Ensure principal has proper format
+
+## Future Enhancements
+
+- [ ] AWS STS integration for temporary credentials
+- [ ] CloudTrail-compatible audit logging
+- [ ] Advanced condition evaluation (IP, time, etc.)
+- [ ] Integration with external identity providers
+- [ ] Fine-grained permissions for different retention operations \ No newline at end of file
diff --git a/weed/s3api/policy_engine/INTEGRATION_EXAMPLE.md b/weed/s3api/policy_engine/INTEGRATION_EXAMPLE.md
new file mode 100644
index 000000000..5c07952b5
--- /dev/null
+++ b/weed/s3api/policy_engine/INTEGRATION_EXAMPLE.md
@@ -0,0 +1,176 @@
+# Integration Example
+
+This shows how to integrate the new policy engine with the existing S3ApiServer.
+
+## Minimal Integration
+
+```go
+// In s3api_server.go - modify NewS3ApiServerWithStore function
+
+func NewS3ApiServerWithStore(router *mux.Router, option *S3ApiServerOption, explicitStore string) (s3ApiServer *S3ApiServer, err error) {
+ // ... existing code ...
+
+ // Create traditional IAM
+ iam := NewIdentityAccessManagementWithStore(option, explicitStore)
+
+ s3ApiServer = &S3ApiServer{
+ option: option,
+ iam: iam, // Keep existing for compatibility
+ randomClientId: util.RandomInt32(),
+ filerGuard: security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec),
+ cb: NewCircuitBreaker(option),
+ credentialManager: iam.credentialManager,
+ bucketConfigCache: NewBucketConfigCache(5 * time.Minute),
+ }
+
+ // Optional: Wrap with policy-backed IAM for enhanced features
+ if option.EnablePolicyEngine { // Add this config option
+ // Option 1: Create and set legacy IAM separately
+ policyBackedIAM := NewPolicyBackedIAM()
+ policyBackedIAM.SetLegacyIAM(iam)
+
+ // Option 2: Create with legacy IAM in one call (convenience method)
+ // policyBackedIAM := NewPolicyBackedIAMWithLegacy(iam)
+
+ // Load existing identities as policies
+ if err := policyBackedIAM.LoadIdentityPolicies(); err != nil {
+ glog.Warningf("Failed to load identity policies: %v", err)
+ }
+
+ // Replace IAM with policy-backed version
+ s3ApiServer.iam = policyBackedIAM
+ }
+
+ // ... rest of existing code ...
+}
+```
+
+## Router Integration
+
+```go
+// In registerRouter function, replace bucket policy handlers:
+
+// Old handlers (if they exist):
+// bucket.Methods(http.MethodGet).HandlerFunc(s3a.GetBucketPolicyHandler).Queries("policy", "")
+// bucket.Methods(http.MethodPut).HandlerFunc(s3a.PutBucketPolicyHandler).Queries("policy", "")
+// bucket.Methods(http.MethodDelete).HandlerFunc(s3a.DeleteBucketPolicyHandler).Queries("policy", "")
+
+// New handlers with policy engine:
+if policyBackedIAM, ok := s3a.iam.(*PolicyBackedIAM); ok {
+ // Use policy-backed handlers
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(policyBackedIAM.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "")
+ bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(policyBackedIAM.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "")
+ bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(policyBackedIAM.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "")
+} else {
+ // Use existing/fallback handlers
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "")
+ bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "")
+ bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "")
+}
+```
+
+## Configuration Option
+
+Add to `S3ApiServerOption`:
+
+```go
+type S3ApiServerOption struct {
+ // ... existing fields ...
+ EnablePolicyEngine bool // Add this field
+}
+```
+
+## Example Usage
+
+### 1. Existing Users (No Changes)
+
+Your existing `identities.json` continues to work:
+
+```json
+{
+ "identities": [
+ {
+ "name": "user1",
+ "credentials": [{"accessKey": "key1", "secretKey": "secret1"}],
+ "actions": ["Read:bucket1/*", "Write:bucket1/uploads/*"]
+ }
+ ]
+}
+```
+
+### 2. New Users (Enhanced Policies)
+
+Set bucket policies via S3 API:
+
+```bash
+# Allow public read
+aws s3api put-bucket-policy --bucket my-bucket --policy file://policy.json
+
+# Where policy.json contains:
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::my-bucket/*"
+ }
+ ]
+}
+```
+
+### 3. Advanced Conditions
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::secure-bucket/*",
+ "Condition": {
+ "IpAddress": {
+ "aws:SourceIp": "192.168.1.0/24"
+ },
+ "Bool": {
+ "aws:SecureTransport": "true"
+ }
+ }
+ }
+ ]
+}
+```
+
+## Migration Strategy
+
+### Phase 1: Enable Policy Engine (Opt-in)
+- Set `EnablePolicyEngine: true` in server options
+- Existing `identities.json` automatically converted to policies
+- Add bucket policies as needed
+
+### Phase 2: Full Policy Management
+- Use AWS CLI/SDK for policy management
+- Gradually migrate from `identities.json` to pure IAM policies
+- Take advantage of advanced conditions and features
+
+## Testing
+
+```bash
+# Test existing functionality
+go test -v -run TestCanDo
+
+# Test new policy engine
+go test -v -run TestPolicyEngine
+
+# Test integration
+go test -v -run TestPolicyBackedIAM
+```
+
+The integration is designed to be:
+- **Backward compatible** - Existing setups work unchanged
+- **Opt-in** - Enable policy engine only when needed
+- **Gradual** - Migrate at your own pace
+- **AWS compatible** - Use standard AWS tools and patterns \ No newline at end of file
diff --git a/weed/s3api/policy_engine/POLICY_EXAMPLES.md b/weed/s3api/policy_engine/POLICY_EXAMPLES.md
new file mode 100644
index 000000000..34a61488e
--- /dev/null
+++ b/weed/s3api/policy_engine/POLICY_EXAMPLES.md
@@ -0,0 +1,54 @@
+# Policy Engine Examples
+
+This document contains examples of how to use the SeaweedFS Policy Engine.
+
+## Overview
+
+The examples in `examples.go` demonstrate various policy configurations and usage patterns. The examples file is excluded from production builds using build tags to reduce binary size.
+
+## To Use Examples
+
+If you need to use the examples during development or testing, you can:
+
+1. **Remove the build tag**: Remove the `//go:build ignore` and `// +build ignore` lines from `examples.go`
+2. **Use during development**: The examples are available during development but not in production builds
+3. **Copy specific examples**: Copy the JSON examples you need into your own code
+
+## Example Categories
+
+The examples file includes:
+
+- **Legacy Identity Format**: Examples of existing identities.json format
+- **Policy Documents**: Various AWS S3-compatible policy examples
+- **Condition Examples**: Complex condition-based policies
+- **Migration Examples**: How to migrate from legacy to policy-based IAM
+- **Integration Examples**: How to integrate with existing systems
+
+## Usage Functions
+
+The examples file provides helper functions:
+
+- `GetAllExamples()`: Returns all example policies
+- `ValidateExamplePolicies()`: Validates all examples
+- `GetExamplePolicy(name)`: Gets a specific example
+- `CreateExamplePolicyDocument(name)`: Creates a policy document
+- `PrintExamplePolicyPretty(name)`: Pretty-prints an example
+- `ExampleUsage()`: Shows basic usage patterns
+- `ExampleLegacyIntegration()`: Shows legacy integration
+- `ExampleConditions()`: Shows condition usage
+- `ExampleMigrationStrategy()`: Shows migration approach
+
+## To Enable Examples in Development
+
+```go
+// Remove build tags from examples.go, then:
+import "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
+
+// Use examples
+examples := policy_engine.GetAllExamples()
+policy, err := policy_engine.GetExamplePolicy("read-only-user")
+```
+
+## Note
+
+The examples are excluded from production builds to keep binary size minimal. They are available for development and testing purposes only. \ No newline at end of file
diff --git a/weed/s3api/policy_engine/README_POLICY_ENGINE.md b/weed/s3api/policy_engine/README_POLICY_ENGINE.md
new file mode 100644
index 000000000..70dbf37f1
--- /dev/null
+++ b/weed/s3api/policy_engine/README_POLICY_ENGINE.md
@@ -0,0 +1,279 @@
+# SeaweedFS Policy Evaluation Engine
+
+This document describes the comprehensive policy evaluation engine that has been added to SeaweedFS, providing AWS S3-compatible policy support while maintaining full backward compatibility with existing `identities.json` configuration.
+
+## Overview
+
+The policy engine provides:
+- **Full AWS S3 policy compatibility** - JSON policies with conditions, wildcards, and complex logic
+- **Backward compatibility** - Existing `identities.json` continues to work unchanged
+- **Bucket policies** - Per-bucket access control policies
+- **IAM policies** - User and group-level policies
+- **Condition evaluation** - IP restrictions, time-based access, SSL-only, etc.
+- **AWS-compliant evaluation order** - Explicit Deny > Explicit Allow > Default Deny
+
+## Architecture
+
+### Files Created
+
+1. **`policy_engine/types.go`** - Core policy data structures and validation
+2. **`policy_engine/conditions.go`** - Condition evaluators (StringEquals, IpAddress, etc.)
+3. **`policy_engine/engine.go`** - Main policy evaluation engine
+4. **`policy_engine/integration.go`** - Integration with existing IAM system
+5. **`policy_engine/engine_test.go`** - Comprehensive tests
+6. **`policy_engine/examples.go`** - Usage examples and documentation (excluded from builds)
+7. **`policy_engine/wildcard_matcher.go`** - Optimized wildcard pattern matching
+8. **`policy_engine/wildcard_matcher_test.go`** - Wildcard matching tests
+
+### Key Components
+
+```
+PolicyEngine
+├── Bucket Policies (per-bucket JSON policies)
+├── User Policies (converted from identities.json + new IAM policies)
+├── Condition Evaluators (IP, time, string, numeric, etc.)
+└── Evaluation Logic (AWS-compliant precedence)
+```
+
+## Backward Compatibility
+
+### Existing identities.json (No Changes Required)
+
+Your existing configuration continues to work exactly as before:
+
+```json
+{
+ "identities": [
+ {
+ "name": "readonly_user",
+ "credentials": [{"accessKey": "key123", "secretKey": "secret123"}],
+ "actions": ["Read:public-bucket/*", "List:public-bucket"]
+ }
+ ]
+}
+```
+
+Legacy actions are automatically converted to AWS-style policies:
+- `Read:bucket/*` → `s3:GetObject` on `arn:aws:s3:::bucket/*`
+- `Write:bucket` → `s3:PutObject`, `s3:DeleteObject` on `arn:aws:s3:::bucket/*`
+- `Admin` → `s3:*` on `arn:aws:s3:::*`
+
+## New Capabilities
+
+### 1. Bucket Policies
+
+Set bucket-level policies using standard S3 API:
+
+```bash
+# Set bucket policy
+curl -X PUT "http://localhost:8333/bucket?policy" \
+ -H "Authorization: AWS access_key:signature" \
+ -d '{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::bucket/*"
+ }
+ ]
+ }'
+
+# Get bucket policy
+curl "http://localhost:8333/bucket?policy"
+
+# Delete bucket policy
+curl -X DELETE "http://localhost:8333/bucket?policy"
+```
+
+### 2. Advanced Conditions
+
+Support for all AWS condition operators:
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::secure-bucket/*",
+ "Condition": {
+ "IpAddress": {
+ "aws:SourceIp": ["192.168.1.0/24", "10.0.0.0/8"]
+ },
+ "Bool": {
+ "aws:SecureTransport": "true"
+ },
+ "DateGreaterThan": {
+ "aws:CurrentTime": "2023-01-01T00:00:00Z"
+ }
+ }
+ }
+ ]
+}
+```
+
+### 3. Supported Condition Operators
+
+- **String**: `StringEquals`, `StringNotEquals`, `StringLike`, `StringNotLike`
+- **Numeric**: `NumericEquals`, `NumericLessThan`, `NumericGreaterThan`, etc.
+- **Date**: `DateEquals`, `DateLessThan`, `DateGreaterThan`, etc.
+- **IP**: `IpAddress`, `NotIpAddress` (supports CIDR notation)
+- **Boolean**: `Bool`
+- **ARN**: `ArnEquals`, `ArnLike`
+- **Null**: `Null`
+
+### 4. Condition Keys
+
+Standard AWS condition keys are supported:
+- `aws:CurrentTime` - Current request time
+- `aws:SourceIp` - Client IP address
+- `aws:SecureTransport` - Whether HTTPS is used
+- `aws:UserAgent` - Client user agent
+- `s3:x-amz-acl` - Requested ACL
+- `s3:VersionId` - Object version ID
+- And many more...
+
+## Policy Evaluation
+
+### Evaluation Order (AWS-Compatible)
+
+1. **Explicit Deny** - If any policy explicitly denies access → **DENY**
+2. **Explicit Allow** - If any policy explicitly allows access → **ALLOW**
+3. **Default Deny** - If no policy matches → **DENY**
+
+### Policy Sources (Evaluated Together)
+
+1. **Bucket Policies** - Stored per-bucket, highest priority
+2. **User Policies** - Converted from `identities.json` + new IAM policies
+3. **Legacy IAM** - For backward compatibility (lowest priority)
+
+## Examples
+
+### Public Read Bucket
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "PublicRead",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::public-bucket/*"
+ }
+ ]
+}
+```
+
+### IP-Restricted Bucket
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": ["s3:GetObject", "s3:PutObject"],
+ "Resource": "arn:aws:s3:::secure-bucket/*",
+ "Condition": {
+ "IpAddress": {
+ "aws:SourceIp": "192.168.1.0/24"
+ }
+ }
+ }
+ ]
+}
+```
+
+### SSL-Only Access
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Principal": "*",
+ "Action": "s3:*",
+ "Resource": ["arn:aws:s3:::ssl-bucket/*", "arn:aws:s3:::ssl-bucket"],
+ "Condition": {
+ "Bool": {
+ "aws:SecureTransport": "false"
+ }
+ }
+ }
+ ]
+}
+```
+
+## Integration
+
+### For Existing SeaweedFS Users
+
+1. **No changes required** - Your existing setup continues to work
+2. **Optional enhancement** - Add bucket policies for fine-grained control
+3. **Gradual migration** - Move to full AWS policies over time
+
+### For New Users
+
+1. Start with either `identities.json` or AWS-style policies
+2. Use bucket policies for complex access patterns
+3. Full feature parity with AWS S3 policies
+
+## Testing
+
+Run the policy engine tests:
+
+```bash
+# Core policy tests
+go test -v -run TestPolicyEngine
+
+# Condition evaluator tests
+go test -v -run TestConditionEvaluators
+
+# Legacy compatibility tests
+go test -v -run TestConvertIdentityToPolicy
+
+# Validation tests
+go test -v -run TestPolicyValidation
+```
+
+## Performance
+
+- **Compiled patterns** - Regex patterns are pre-compiled for fast matching
+- **Cached policies** - Policies are cached in memory with TTL
+- **Early termination** - Evaluation stops on first explicit deny
+- **Minimal overhead** - Backward compatibility with minimal performance impact
+
+## Migration Path
+
+### Phase 1: Backward Compatible (Current)
+- Keep existing `identities.json` unchanged
+- Add bucket policies as needed
+- Legacy actions automatically converted to AWS policies
+
+### Phase 2: Enhanced (Optional)
+- Add advanced conditions to policies
+- Use full AWS S3 policy features
+- Maintain backward compatibility
+
+### Phase 3: Full Migration (Future)
+- Migrate to pure IAM policies
+- Use AWS CLI/SDK for policy management
+- Complete AWS S3 feature parity
+
+## Compatibility
+
+- ✅ **Full backward compatibility** with existing `identities.json`
+- ✅ **AWS S3 API compatibility** for bucket policies
+- ✅ **Standard condition operators** and keys
+- ✅ **Proper evaluation precedence** (Deny > Allow > Default Deny)
+- ✅ **Performance optimized** with caching and compiled patterns
+
+The policy engine provides a seamless upgrade path from SeaweedFS's existing simple IAM system to full AWS S3-compatible policies, giving you the best of both worlds: simplicity for basic use cases and power for complex enterprise scenarios. \ No newline at end of file
diff --git a/weed/s3api/policy_engine/conditions.go b/weed/s3api/policy_engine/conditions.go
new file mode 100644
index 000000000..fc8005fd0
--- /dev/null
+++ b/weed/s3api/policy_engine/conditions.go
@@ -0,0 +1,768 @@
+package policy_engine
+
+import (
+ "fmt"
+ "net"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+)
+
+// LRUNode represents a node in the doubly-linked list for efficient LRU operations
+type LRUNode struct {
+ key string
+ value []string
+ prev *LRUNode
+ next *LRUNode
+}
+
+// NormalizedValueCache provides size-limited caching for normalized values with efficient LRU eviction
+type NormalizedValueCache struct {
+ mu sync.RWMutex
+ cache map[string]*LRUNode
+ maxSize int
+ head *LRUNode // Most recently used
+ tail *LRUNode // Least recently used
+}
+
+// NewNormalizedValueCache creates a new normalized value cache with configurable size
+func NewNormalizedValueCache(maxSize int) *NormalizedValueCache {
+ if maxSize <= 0 {
+ maxSize = 1000 // Default size
+ }
+
+ // Create dummy head and tail nodes for easier list manipulation
+ head := &LRUNode{}
+ tail := &LRUNode{}
+ head.next = tail
+ tail.prev = head
+
+ return &NormalizedValueCache{
+ cache: make(map[string]*LRUNode),
+ maxSize: maxSize,
+ head: head,
+ tail: tail,
+ }
+}
+
+// Get retrieves a cached value and updates access order in O(1) time
+func (c *NormalizedValueCache) Get(key string) ([]string, bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if node, exists := c.cache[key]; exists {
+ // Move to head (most recently used) - O(1) operation
+ c.moveToHead(node)
+ return node.value, true
+ }
+ return nil, false
+}
+
+// Set stores a value in the cache with size limit enforcement in O(1) time
+func (c *NormalizedValueCache) Set(key string, value []string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if node, exists := c.cache[key]; exists {
+ // Update existing node and move to head
+ node.value = value
+ c.moveToHead(node)
+ return
+ }
+
+ // Create new node
+ newNode := &LRUNode{
+ key: key,
+ value: value,
+ }
+
+ // If at max size, evict least recently used
+ if len(c.cache) >= c.maxSize {
+ c.evictLeastRecentlyUsed()
+ }
+
+ // Add to cache and move to head
+ c.cache[key] = newNode
+ c.addToHead(newNode)
+}
+
+// moveToHead moves a node to the head of the list (most recently used) - O(1)
+func (c *NormalizedValueCache) moveToHead(node *LRUNode) {
+ c.removeNode(node)
+ c.addToHead(node)
+}
+
+// addToHead adds a node right after the head - O(1)
+func (c *NormalizedValueCache) addToHead(node *LRUNode) {
+ node.prev = c.head
+ node.next = c.head.next
+ c.head.next.prev = node
+ c.head.next = node
+}
+
+// removeNode removes a node from the list - O(1)
+func (c *NormalizedValueCache) removeNode(node *LRUNode) {
+ node.prev.next = node.next
+ node.next.prev = node.prev
+}
+
+// removeTail removes the last node before tail (least recently used) - O(1)
+func (c *NormalizedValueCache) removeTail() *LRUNode {
+ lastNode := c.tail.prev
+ c.removeNode(lastNode)
+ return lastNode
+}
+
+// evictLeastRecentlyUsed removes the least recently used item in O(1) time
+func (c *NormalizedValueCache) evictLeastRecentlyUsed() {
+ tail := c.removeTail()
+ delete(c.cache, tail.key)
+}
+
+// Clear clears all cached values
+func (c *NormalizedValueCache) Clear() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.cache = make(map[string]*LRUNode)
+ c.head.next = c.tail
+ c.tail.prev = c.head
+}
+
+// GetStats returns cache statistics
+func (c *NormalizedValueCache) GetStats() (size int, maxSize int) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return len(c.cache), c.maxSize
+}
+
+// Global cache instance with size limit
+var normalizedValueCache = NewNormalizedValueCache(1000)
+
+// getCachedNormalizedValues returns cached normalized values or caches new ones
+func getCachedNormalizedValues(value interface{}) []string {
+ // Create a string key for caching - more efficient than fmt.Sprintf
+ typeStr := reflect.TypeOf(value).String()
+ cacheKey := typeStr + ":" + fmt.Sprint(value)
+
+ // Try to get from cache
+ if cached, exists := normalizedValueCache.Get(cacheKey); exists {
+ return cached
+ }
+
+ // Not in cache, normalize and store
+ // Use the error-handling version for better error reporting
+ normalized, err := normalizeToStringSliceWithError(value)
+ if err != nil {
+ glog.Warningf("Failed to normalize policy value %v: %v", value, err)
+ // Fallback to string conversion for backward compatibility
+ normalized = []string{fmt.Sprintf("%v", value)}
+ }
+
+ normalizedValueCache.Set(cacheKey, normalized)
+
+ return normalized
+}
+
+// ConditionEvaluator evaluates policy conditions
+type ConditionEvaluator interface {
+ Evaluate(conditionValue interface{}, contextValues []string) bool
+}
+
+// StringEqualsEvaluator evaluates StringEquals conditions
+type StringEqualsEvaluator struct{}
+
+func (e *StringEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ for _, contextValue := range contextValues {
+ if expected == contextValue {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// StringNotEqualsEvaluator evaluates StringNotEquals conditions
+type StringNotEqualsEvaluator struct{}
+
+func (e *StringNotEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ for _, contextValue := range contextValues {
+ if expected == contextValue {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// StringLikeEvaluator evaluates StringLike conditions (supports wildcards)
+type StringLikeEvaluator struct{}
+
+func (e *StringLikeEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ patterns := getCachedNormalizedValues(conditionValue)
+ for _, pattern := range patterns {
+ for _, contextValue := range contextValues {
+ if MatchesWildcard(pattern, contextValue) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// StringNotLikeEvaluator evaluates StringNotLike conditions
+type StringNotLikeEvaluator struct{}
+
+func (e *StringNotLikeEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ patterns := getCachedNormalizedValues(conditionValue)
+ for _, pattern := range patterns {
+ for _, contextValue := range contextValues {
+ if MatchesWildcard(pattern, contextValue) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// NumericEqualsEvaluator evaluates NumericEquals conditions
+type NumericEqualsEvaluator struct{}
+
+func (e *NumericEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedFloat, err := strconv.ParseFloat(expected, 64)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextFloat, err := strconv.ParseFloat(contextValue, 64)
+ if err != nil {
+ continue
+ }
+ if expectedFloat == contextFloat {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// NumericNotEqualsEvaluator evaluates NumericNotEquals conditions
+type NumericNotEqualsEvaluator struct{}
+
+func (e *NumericNotEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedFloat, err := strconv.ParseFloat(expected, 64)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextFloat, err := strconv.ParseFloat(contextValue, 64)
+ if err != nil {
+ continue
+ }
+ if expectedFloat == contextFloat {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// NumericLessThanEvaluator evaluates NumericLessThan conditions
+type NumericLessThanEvaluator struct{}
+
+func (e *NumericLessThanEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedFloat, err := strconv.ParseFloat(expected, 64)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextFloat, err := strconv.ParseFloat(contextValue, 64)
+ if err != nil {
+ continue
+ }
+ if contextFloat < expectedFloat {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// NumericLessThanEqualsEvaluator evaluates NumericLessThanEquals conditions
+type NumericLessThanEqualsEvaluator struct{}
+
+func (e *NumericLessThanEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedFloat, err := strconv.ParseFloat(expected, 64)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextFloat, err := strconv.ParseFloat(contextValue, 64)
+ if err != nil {
+ continue
+ }
+ if contextFloat <= expectedFloat {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// NumericGreaterThanEvaluator evaluates NumericGreaterThan conditions
+type NumericGreaterThanEvaluator struct{}
+
+func (e *NumericGreaterThanEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedFloat, err := strconv.ParseFloat(expected, 64)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextFloat, err := strconv.ParseFloat(contextValue, 64)
+ if err != nil {
+ continue
+ }
+ if contextFloat > expectedFloat {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// NumericGreaterThanEqualsEvaluator evaluates NumericGreaterThanEquals conditions
+type NumericGreaterThanEqualsEvaluator struct{}
+
+func (e *NumericGreaterThanEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedFloat, err := strconv.ParseFloat(expected, 64)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextFloat, err := strconv.ParseFloat(contextValue, 64)
+ if err != nil {
+ continue
+ }
+ if contextFloat >= expectedFloat {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// DateEqualsEvaluator evaluates DateEquals conditions
+type DateEqualsEvaluator struct{}
+
+func (e *DateEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedTime, err := time.Parse(time.RFC3339, expected)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextTime, err := time.Parse(time.RFC3339, contextValue)
+ if err != nil {
+ continue
+ }
+ if expectedTime.Equal(contextTime) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// DateNotEqualsEvaluator evaluates DateNotEquals conditions
+type DateNotEqualsEvaluator struct{}
+
+func (e *DateNotEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedTime, err := time.Parse(time.RFC3339, expected)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextTime, err := time.Parse(time.RFC3339, contextValue)
+ if err != nil {
+ continue
+ }
+ if expectedTime.Equal(contextTime) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// DateLessThanEvaluator evaluates DateLessThan conditions
+type DateLessThanEvaluator struct{}
+
+func (e *DateLessThanEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedTime, err := time.Parse(time.RFC3339, expected)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextTime, err := time.Parse(time.RFC3339, contextValue)
+ if err != nil {
+ continue
+ }
+ if contextTime.Before(expectedTime) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// DateLessThanEqualsEvaluator evaluates DateLessThanEquals conditions
+type DateLessThanEqualsEvaluator struct{}
+
+func (e *DateLessThanEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedTime, err := time.Parse(time.RFC3339, expected)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextTime, err := time.Parse(time.RFC3339, contextValue)
+ if err != nil {
+ continue
+ }
+ if contextTime.Before(expectedTime) || contextTime.Equal(expectedTime) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// DateGreaterThanEvaluator evaluates DateGreaterThan conditions
+type DateGreaterThanEvaluator struct{}
+
+func (e *DateGreaterThanEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedTime, err := time.Parse(time.RFC3339, expected)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextTime, err := time.Parse(time.RFC3339, contextValue)
+ if err != nil {
+ continue
+ }
+ if contextTime.After(expectedTime) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// DateGreaterThanEqualsEvaluator evaluates DateGreaterThanEquals conditions
+type DateGreaterThanEqualsEvaluator struct{}
+
+func (e *DateGreaterThanEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedTime, err := time.Parse(time.RFC3339, expected)
+ if err != nil {
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextTime, err := time.Parse(time.RFC3339, contextValue)
+ if err != nil {
+ continue
+ }
+ if contextTime.After(expectedTime) || contextTime.Equal(expectedTime) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// BoolEvaluator evaluates Bool conditions
+type BoolEvaluator struct{}
+
+func (e *BoolEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ for _, contextValue := range contextValues {
+ if strings.ToLower(expected) == strings.ToLower(contextValue) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// IpAddressEvaluator evaluates IpAddress conditions
+type IpAddressEvaluator struct{}
+
+func (e *IpAddressEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ _, expectedNet, err := net.ParseCIDR(expected)
+ if err != nil {
+ // Try parsing as single IP
+ expectedIP := net.ParseIP(expected)
+ if expectedIP == nil {
+ glog.V(3).Infof("Failed to parse expected IP address: %s", expected)
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextIP := net.ParseIP(contextValue)
+ if contextIP == nil {
+ glog.V(3).Infof("Failed to parse IP address: %s", contextValue)
+ continue
+ }
+ if contextIP.Equal(expectedIP) {
+ return true
+ }
+ }
+ } else {
+ // CIDR network
+ for _, contextValue := range contextValues {
+ contextIP := net.ParseIP(contextValue)
+ if contextIP == nil {
+ glog.V(3).Infof("Failed to parse IP address: %s", contextValue)
+ continue
+ }
+ if expectedNet.Contains(contextIP) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// NotIpAddressEvaluator evaluates NotIpAddress conditions
+type NotIpAddressEvaluator struct{}
+
+func (e *NotIpAddressEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ _, expectedNet, err := net.ParseCIDR(expected)
+ if err != nil {
+ // Try parsing as single IP
+ expectedIP := net.ParseIP(expected)
+ if expectedIP == nil {
+ glog.V(3).Infof("Failed to parse expected IP address: %s", expected)
+ continue
+ }
+ for _, contextValue := range contextValues {
+ contextIP := net.ParseIP(contextValue)
+ if contextIP == nil {
+ glog.V(3).Infof("Failed to parse IP address: %s", contextValue)
+ continue
+ }
+ if contextIP.Equal(expectedIP) {
+ return false
+ }
+ }
+ } else {
+ // CIDR network
+ for _, contextValue := range contextValues {
+ contextIP := net.ParseIP(contextValue)
+ if contextIP == nil {
+ glog.V(3).Infof("Failed to parse IP address: %s", contextValue)
+ continue
+ }
+ if expectedNet.Contains(contextIP) {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// ArnEqualsEvaluator evaluates ArnEquals conditions
+type ArnEqualsEvaluator struct{}
+
+func (e *ArnEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ for _, contextValue := range contextValues {
+ if expected == contextValue {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// ArnLikeEvaluator evaluates ArnLike conditions
+type ArnLikeEvaluator struct{}
+
+func (e *ArnLikeEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ patterns := getCachedNormalizedValues(conditionValue)
+ for _, pattern := range patterns {
+ for _, contextValue := range contextValues {
+ if MatchesWildcard(pattern, contextValue) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// NullEvaluator evaluates Null conditions
+type NullEvaluator struct{}
+
+func (e *NullEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool {
+ expectedValues := getCachedNormalizedValues(conditionValue)
+ for _, expected := range expectedValues {
+ expectedBool := strings.ToLower(expected) == "true"
+ contextExists := len(contextValues) > 0
+ if expectedBool && !contextExists {
+ return true // Key should be null and it is
+ }
+ if !expectedBool && contextExists {
+ return true // Key should not be null and it isn't
+ }
+ }
+ return false
+}
+
+// GetConditionEvaluator returns the appropriate evaluator for a condition operator
+func GetConditionEvaluator(operator string) (ConditionEvaluator, error) {
+ switch operator {
+ case "StringEquals":
+ return &StringEqualsEvaluator{}, nil
+ case "StringNotEquals":
+ return &StringNotEqualsEvaluator{}, nil
+ case "StringLike":
+ return &StringLikeEvaluator{}, nil
+ case "StringNotLike":
+ return &StringNotLikeEvaluator{}, nil
+ case "NumericEquals":
+ return &NumericEqualsEvaluator{}, nil
+ case "NumericNotEquals":
+ return &NumericNotEqualsEvaluator{}, nil
+ case "NumericLessThan":
+ return &NumericLessThanEvaluator{}, nil
+ case "NumericLessThanEquals":
+ return &NumericLessThanEqualsEvaluator{}, nil
+ case "NumericGreaterThan":
+ return &NumericGreaterThanEvaluator{}, nil
+ case "NumericGreaterThanEquals":
+ return &NumericGreaterThanEqualsEvaluator{}, nil
+ case "DateEquals":
+ return &DateEqualsEvaluator{}, nil
+ case "DateNotEquals":
+ return &DateNotEqualsEvaluator{}, nil
+ case "DateLessThan":
+ return &DateLessThanEvaluator{}, nil
+ case "DateLessThanEquals":
+ return &DateLessThanEqualsEvaluator{}, nil
+ case "DateGreaterThan":
+ return &DateGreaterThanEvaluator{}, nil
+ case "DateGreaterThanEquals":
+ return &DateGreaterThanEqualsEvaluator{}, nil
+ case "Bool":
+ return &BoolEvaluator{}, nil
+ case "IpAddress":
+ return &IpAddressEvaluator{}, nil
+ case "NotIpAddress":
+ return &NotIpAddressEvaluator{}, nil
+ case "ArnEquals":
+ return &ArnEqualsEvaluator{}, nil
+ case "ArnLike":
+ return &ArnLikeEvaluator{}, nil
+ case "Null":
+ return &NullEvaluator{}, nil
+ default:
+ return nil, fmt.Errorf("unsupported condition operator: %s", operator)
+ }
+}
+
+// EvaluateConditions evaluates all conditions in a policy statement
+func EvaluateConditions(conditions PolicyConditions, contextValues map[string][]string) bool {
+ if len(conditions) == 0 {
+ return true // No conditions means always true
+ }
+
+ for operator, conditionMap := range conditions {
+ conditionEvaluator, err := GetConditionEvaluator(operator)
+ if err != nil {
+ glog.Warningf("Unsupported condition operator: %s", operator)
+ continue
+ }
+
+ for key, value := range conditionMap {
+ contextVals, exists := contextValues[key]
+ if !exists {
+ contextVals = []string{}
+ }
+
+ if !conditionEvaluator.Evaluate(value.Strings(), contextVals) {
+ return false // If any condition fails, the whole condition block fails
+ }
+ }
+ }
+
+ return true
+}
+
+// EvaluateConditionsLegacy evaluates conditions using the old interface{} format for backward compatibility
+func EvaluateConditionsLegacy(conditions map[string]interface{}, contextValues map[string][]string) bool {
+ if len(conditions) == 0 {
+ return true // No conditions means always true
+ }
+
+ for operator, conditionMap := range conditions {
+ conditionEvaluator, err := GetConditionEvaluator(operator)
+ if err != nil {
+ glog.Warningf("Unsupported condition operator: %s", operator)
+ continue
+ }
+
+ conditionMapTyped, ok := conditionMap.(map[string]interface{})
+ if !ok {
+ glog.Warningf("Invalid condition format for operator: %s", operator)
+ continue
+ }
+
+ for key, value := range conditionMapTyped {
+ contextVals, exists := contextValues[key]
+ if !exists {
+ contextVals = []string{}
+ }
+
+ if !conditionEvaluator.Evaluate(value, contextVals) {
+ return false // If any condition fails, the whole condition block fails
+ }
+ }
+ }
+
+ return true
+}
diff --git a/weed/s3api/policy_engine/engine.go b/weed/s3api/policy_engine/engine.go
new file mode 100644
index 000000000..1e0126eb6
--- /dev/null
+++ b/weed/s3api/policy_engine/engine.go
@@ -0,0 +1,432 @@
+package policy_engine
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+)
+
+// PolicyEvaluationResult represents the result of policy evaluation
+type PolicyEvaluationResult int
+
+const (
+ PolicyResultDeny PolicyEvaluationResult = iota
+ PolicyResultAllow
+ PolicyResultIndeterminate
+)
+
+// PolicyEvaluationContext manages policy evaluation for a bucket
+type PolicyEvaluationContext struct {
+ bucketName string
+ policy *CompiledPolicy
+ cache *PolicyCache
+ mutex sync.RWMutex
+}
+
+// PolicyEngine is the main policy evaluation engine
+type PolicyEngine struct {
+ contexts map[string]*PolicyEvaluationContext
+ mutex sync.RWMutex
+}
+
+// NewPolicyEngine creates a new policy evaluation engine
+func NewPolicyEngine() *PolicyEngine {
+ return &PolicyEngine{
+ contexts: make(map[string]*PolicyEvaluationContext),
+ }
+}
+
+// SetBucketPolicy sets the policy for a bucket
+func (engine *PolicyEngine) SetBucketPolicy(bucketName string, policyJSON string) error {
+ policy, err := ParsePolicy(policyJSON)
+ if err != nil {
+ return fmt.Errorf("invalid policy: %v", err)
+ }
+
+ compiled, err := CompilePolicy(policy)
+ if err != nil {
+ return fmt.Errorf("failed to compile policy: %v", err)
+ }
+
+ engine.mutex.Lock()
+ defer engine.mutex.Unlock()
+
+ context := &PolicyEvaluationContext{
+ bucketName: bucketName,
+ policy: compiled,
+ cache: NewPolicyCache(),
+ }
+
+ engine.contexts[bucketName] = context
+ glog.V(2).Infof("Set bucket policy for %s", bucketName)
+ return nil
+}
+
+// GetBucketPolicy gets the policy for a bucket
+func (engine *PolicyEngine) GetBucketPolicy(bucketName string) (*PolicyDocument, error) {
+ engine.mutex.RLock()
+ defer engine.mutex.RUnlock()
+
+ context, exists := engine.contexts[bucketName]
+ if !exists {
+ return nil, fmt.Errorf("no policy found for bucket %s", bucketName)
+ }
+
+ return context.policy.Document, nil
+}
+
+// DeleteBucketPolicy deletes the policy for a bucket
+func (engine *PolicyEngine) DeleteBucketPolicy(bucketName string) error {
+ engine.mutex.Lock()
+ defer engine.mutex.Unlock()
+
+ delete(engine.contexts, bucketName)
+ glog.V(2).Infof("Deleted bucket policy for %s", bucketName)
+ return nil
+}
+
+// EvaluatePolicy evaluates a policy for the given arguments
+func (engine *PolicyEngine) EvaluatePolicy(bucketName string, args *PolicyEvaluationArgs) PolicyEvaluationResult {
+ engine.mutex.RLock()
+ context, exists := engine.contexts[bucketName]
+ engine.mutex.RUnlock()
+
+ if !exists {
+ return PolicyResultIndeterminate
+ }
+
+ return engine.evaluateCompiledPolicy(context.policy, args)
+}
+
+// evaluateCompiledPolicy evaluates a compiled policy
+func (engine *PolicyEngine) evaluateCompiledPolicy(policy *CompiledPolicy, args *PolicyEvaluationArgs) PolicyEvaluationResult {
+ // AWS Policy evaluation logic:
+ // 1. Check for explicit Deny - if found, return Deny
+ // 2. Check for explicit Allow - if found, return Allow
+ // 3. If no explicit Allow is found, return Deny (default deny)
+
+ hasExplicitAllow := false
+
+ for _, stmt := range policy.Statements {
+ if engine.evaluateStatement(&stmt, args) {
+ if stmt.Statement.Effect == PolicyEffectDeny {
+ return PolicyResultDeny // Explicit deny trumps everything
+ }
+ if stmt.Statement.Effect == PolicyEffectAllow {
+ hasExplicitAllow = true
+ }
+ }
+ }
+
+ if hasExplicitAllow {
+ return PolicyResultAllow
+ }
+
+ return PolicyResultDeny // Default deny
+}
+
+// evaluateStatement evaluates a single policy statement
+func (engine *PolicyEngine) evaluateStatement(stmt *CompiledStatement, args *PolicyEvaluationArgs) bool {
+ // Check if action matches
+ if !engine.matchesPatterns(stmt.ActionPatterns, args.Action) {
+ return false
+ }
+
+ // Check if resource matches
+ if !engine.matchesPatterns(stmt.ResourcePatterns, args.Resource) {
+ return false
+ }
+
+ // Check if principal matches (if specified)
+ if len(stmt.PrincipalPatterns) > 0 {
+ if !engine.matchesPatterns(stmt.PrincipalPatterns, args.Principal) {
+ return false
+ }
+ }
+
+ // Check conditions
+ if len(stmt.Statement.Condition) > 0 {
+ if !EvaluateConditions(stmt.Statement.Condition, args.Conditions) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// matchesPatterns checks if a value matches any of the compiled patterns
+func (engine *PolicyEngine) matchesPatterns(patterns []*regexp.Regexp, value string) bool {
+ for _, pattern := range patterns {
+ if pattern.MatchString(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// ExtractConditionValuesFromRequest extracts condition values from HTTP request
+func ExtractConditionValuesFromRequest(r *http.Request) map[string][]string {
+ values := make(map[string][]string)
+
+ // AWS condition keys
+ // Extract IP address without port for proper IP matching
+ host, _, err := net.SplitHostPort(r.RemoteAddr)
+ if err != nil {
+ // Log a warning if splitting fails
+ glog.Warningf("Failed to parse IP address from RemoteAddr %q: %v", r.RemoteAddr, err)
+ // If splitting fails, use the original RemoteAddr (might be just IP without port)
+ host = r.RemoteAddr
+ }
+ values["aws:SourceIp"] = []string{host}
+ values["aws:SecureTransport"] = []string{fmt.Sprintf("%t", r.TLS != nil)}
+ // Use AWS standard condition key for current time
+ values["aws:CurrentTime"] = []string{time.Now().Format(time.RFC3339)}
+ // Keep RequestTime for backward compatibility
+ values["aws:RequestTime"] = []string{time.Now().Format(time.RFC3339)}
+
+ // S3 specific condition keys
+ if userAgent := r.Header.Get("User-Agent"); userAgent != "" {
+ values["aws:UserAgent"] = []string{userAgent}
+ }
+
+ if referer := r.Header.Get("Referer"); referer != "" {
+ values["aws:Referer"] = []string{referer}
+ }
+
+ // S3 object-level conditions
+ if r.Method == "GET" || r.Method == "HEAD" {
+ values["s3:ExistingObjectTag"] = extractObjectTags(r)
+ }
+
+ // S3 bucket-level conditions
+ if delimiter := r.URL.Query().Get("delimiter"); delimiter != "" {
+ values["s3:delimiter"] = []string{delimiter}
+ }
+
+ if prefix := r.URL.Query().Get("prefix"); prefix != "" {
+ values["s3:prefix"] = []string{prefix}
+ }
+
+ if maxKeys := r.URL.Query().Get("max-keys"); maxKeys != "" {
+ values["s3:max-keys"] = []string{maxKeys}
+ }
+
+ // Authentication method
+ if authHeader := r.Header.Get("Authorization"); authHeader != "" {
+ if strings.HasPrefix(authHeader, "AWS4-HMAC-SHA256") {
+ values["s3:authType"] = []string{"REST-HEADER"}
+ } else if strings.HasPrefix(authHeader, "AWS ") {
+ values["s3:authType"] = []string{"REST-HEADER"}
+ }
+ } else if r.URL.Query().Get("AWSAccessKeyId") != "" {
+ values["s3:authType"] = []string{"REST-QUERY-STRING"}
+ }
+
+ // HTTP method
+ values["s3:RequestMethod"] = []string{r.Method}
+
+ // Extract custom headers
+ for key, headerValues := range r.Header {
+ if strings.HasPrefix(strings.ToLower(key), "x-amz-") {
+ values[strings.ToLower(key)] = headerValues
+ }
+ }
+
+ return values
+}
+
+// extractObjectTags extracts object tags from request (placeholder implementation)
+func extractObjectTags(r *http.Request) []string {
+ // This would need to be implemented based on how object tags are stored
+ // For now, return empty slice
+ return []string{}
+}
+
+// BuildResourceArn builds an ARN for the given bucket and object
+func BuildResourceArn(bucketName, objectName string) string {
+ if objectName == "" {
+ return fmt.Sprintf("arn:aws:s3:::%s", bucketName)
+ }
+ return fmt.Sprintf("arn:aws:s3:::%s/%s", bucketName, objectName)
+}
+
+// BuildActionName builds a standardized action name
+func BuildActionName(action string) string {
+ if strings.HasPrefix(action, "s3:") {
+ return action
+ }
+ return fmt.Sprintf("s3:%s", action)
+}
+
+// IsReadAction checks if an action is a read action
+func IsReadAction(action string) bool {
+ readActions := []string{
+ "s3:GetObject",
+ "s3:GetObjectVersion",
+ "s3:GetObjectAcl",
+ "s3:GetObjectVersionAcl",
+ "s3:GetObjectTagging",
+ "s3:GetObjectVersionTagging",
+ "s3:ListBucket",
+ "s3:ListBucketVersions",
+ "s3:GetBucketLocation",
+ "s3:GetBucketVersioning",
+ "s3:GetBucketAcl",
+ "s3:GetBucketCors",
+ "s3:GetBucketPolicy",
+ "s3:GetBucketTagging",
+ "s3:GetBucketNotification",
+ "s3:GetBucketObjectLockConfiguration",
+ "s3:GetObjectRetention",
+ "s3:GetObjectLegalHold",
+ }
+
+ for _, readAction := range readActions {
+ if action == readAction {
+ return true
+ }
+ }
+ return false
+}
+
+// IsWriteAction checks if an action is a write action
+func IsWriteAction(action string) bool {
+ writeActions := []string{
+ "s3:PutObject",
+ "s3:PutObjectAcl",
+ "s3:PutObjectTagging",
+ "s3:DeleteObject",
+ "s3:DeleteObjectVersion",
+ "s3:DeleteObjectTagging",
+ "s3:AbortMultipartUpload",
+ "s3:ListMultipartUploads",
+ "s3:ListParts",
+ "s3:PutBucketAcl",
+ "s3:PutBucketCors",
+ "s3:PutBucketPolicy",
+ "s3:PutBucketTagging",
+ "s3:PutBucketNotification",
+ "s3:PutBucketVersioning",
+ "s3:DeleteBucketPolicy",
+ "s3:DeleteBucketTagging",
+ "s3:DeleteBucketCors",
+ "s3:PutBucketObjectLockConfiguration",
+ "s3:PutObjectRetention",
+ "s3:PutObjectLegalHold",
+ "s3:BypassGovernanceRetention",
+ }
+
+ for _, writeAction := range writeActions {
+ if action == writeAction {
+ return true
+ }
+ }
+ return false
+}
+
+// GetBucketNameFromArn extracts bucket name from ARN
+func GetBucketNameFromArn(arn string) string {
+ if strings.HasPrefix(arn, "arn:aws:s3:::") {
+ parts := strings.SplitN(arn[13:], "/", 2)
+ return parts[0]
+ }
+ return ""
+}
+
+// GetObjectNameFromArn extracts object name from ARN
+func GetObjectNameFromArn(arn string) string {
+ if strings.HasPrefix(arn, "arn:aws:s3:::") {
+ parts := strings.SplitN(arn[13:], "/", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ }
+ return ""
+}
+
+// HasPolicyForBucket checks if a bucket has a policy
+func (engine *PolicyEngine) HasPolicyForBucket(bucketName string) bool {
+ engine.mutex.RLock()
+ defer engine.mutex.RUnlock()
+
+ _, exists := engine.contexts[bucketName]
+ return exists
+}
+
+// GetPolicyStatements returns all policy statements for a bucket
+func (engine *PolicyEngine) GetPolicyStatements(bucketName string) []PolicyStatement {
+ engine.mutex.RLock()
+ defer engine.mutex.RUnlock()
+
+ context, exists := engine.contexts[bucketName]
+ if !exists {
+ return nil
+ }
+
+ return context.policy.Document.Statement
+}
+
+// ValidatePolicyForBucket validates if a policy is valid for a bucket
+func (engine *PolicyEngine) ValidatePolicyForBucket(bucketName string, policyJSON string) error {
+ policy, err := ParsePolicy(policyJSON)
+ if err != nil {
+ return err
+ }
+
+ // Additional validation specific to the bucket
+ for _, stmt := range policy.Statement {
+ resources := normalizeToStringSlice(stmt.Resource)
+ for _, resource := range resources {
+ if resourceBucket := GetBucketFromResource(resource); resourceBucket != "" {
+ if resourceBucket != bucketName {
+ return fmt.Errorf("policy resource %s does not match bucket %s", resource, bucketName)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// ClearAllPolicies clears all bucket policies
+func (engine *PolicyEngine) ClearAllPolicies() {
+ engine.mutex.Lock()
+ defer engine.mutex.Unlock()
+
+ engine.contexts = make(map[string]*PolicyEvaluationContext)
+ glog.V(2).Info("Cleared all bucket policies")
+}
+
+// GetAllBucketsWithPolicies returns all buckets that have policies
+func (engine *PolicyEngine) GetAllBucketsWithPolicies() []string {
+ engine.mutex.RLock()
+ defer engine.mutex.RUnlock()
+
+ buckets := make([]string, 0, len(engine.contexts))
+ for bucketName := range engine.contexts {
+ buckets = append(buckets, bucketName)
+ }
+ return buckets
+}
+
+// EvaluatePolicyForRequest evaluates policy for an HTTP request
+func (engine *PolicyEngine) EvaluatePolicyForRequest(bucketName, objectName, action, principal string, r *http.Request) PolicyEvaluationResult {
+ resource := BuildResourceArn(bucketName, objectName)
+ actionName := BuildActionName(action)
+ conditions := ExtractConditionValuesFromRequest(r)
+
+ args := &PolicyEvaluationArgs{
+ Action: actionName,
+ Resource: resource,
+ Principal: principal,
+ Conditions: conditions,
+ }
+
+ return engine.EvaluatePolicy(bucketName, args)
+}
diff --git a/weed/s3api/policy_engine/engine_test.go b/weed/s3api/policy_engine/engine_test.go
new file mode 100644
index 000000000..799579ce6
--- /dev/null
+++ b/weed/s3api/policy_engine/engine_test.go
@@ -0,0 +1,716 @@
+package policy_engine
+
+import (
+ "net/http"
+ "net/url"
+ "testing"
+
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
+)
+
+func TestPolicyEngine(t *testing.T) {
+ engine := NewPolicyEngine()
+
+ // Test policy JSON
+ policyJSON := `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": ["s3:GetObject", "s3:PutObject"],
+ "Resource": ["arn:aws:s3:::test-bucket/*"]
+ },
+ {
+ "Effect": "Deny",
+ "Action": ["s3:DeleteObject"],
+ "Resource": ["arn:aws:s3:::test-bucket/*"],
+ "Condition": {
+ "StringEquals": {
+ "s3:RequestMethod": ["DELETE"]
+ }
+ }
+ }
+ ]
+ }`
+
+ // Set bucket policy
+ err := engine.SetBucketPolicy("test-bucket", policyJSON)
+ if err != nil {
+ t.Fatalf("Failed to set bucket policy: %v", err)
+ }
+
+ // Test Allow case
+ args := &PolicyEvaluationArgs{
+ Action: "s3:GetObject",
+ Resource: "arn:aws:s3:::test-bucket/test-object",
+ Principal: "user1",
+ Conditions: map[string][]string{},
+ }
+
+ result := engine.EvaluatePolicy("test-bucket", args)
+ if result != PolicyResultAllow {
+ t.Errorf("Expected Allow, got %v", result)
+ }
+
+ // Test Deny case
+ args = &PolicyEvaluationArgs{
+ Action: "s3:DeleteObject",
+ Resource: "arn:aws:s3:::test-bucket/test-object",
+ Principal: "user1",
+ Conditions: map[string][]string{
+ "s3:RequestMethod": {"DELETE"},
+ },
+ }
+
+ result = engine.EvaluatePolicy("test-bucket", args)
+ if result != PolicyResultDeny {
+ t.Errorf("Expected Deny, got %v", result)
+ }
+
+ // Test non-matching action
+ args = &PolicyEvaluationArgs{
+ Action: "s3:ListBucket",
+ Resource: "arn:aws:s3:::test-bucket",
+ Principal: "user1",
+ Conditions: map[string][]string{},
+ }
+
+ result = engine.EvaluatePolicy("test-bucket", args)
+ if result != PolicyResultDeny {
+ t.Errorf("Expected Deny for non-matching action, got %v", result)
+ }
+
+ // Test GetBucketPolicy
+ policy, err := engine.GetBucketPolicy("test-bucket")
+ if err != nil {
+ t.Fatalf("Failed to get bucket policy: %v", err)
+ }
+ if policy.Version != "2012-10-17" {
+ t.Errorf("Expected version 2012-10-17, got %s", policy.Version)
+ }
+
+ // Test DeleteBucketPolicy
+ err = engine.DeleteBucketPolicy("test-bucket")
+ if err != nil {
+ t.Fatalf("Failed to delete bucket policy: %v", err)
+ }
+
+ // Test policy is gone
+ result = engine.EvaluatePolicy("test-bucket", args)
+ if result != PolicyResultIndeterminate {
+ t.Errorf("Expected Indeterminate after policy deletion, got %v", result)
+ }
+}
+
+func TestConditionEvaluators(t *testing.T) {
+ tests := []struct {
+ name string
+ operator string
+ conditionValue interface{}
+ contextValues []string
+ expected bool
+ }{
+ {
+ name: "StringEquals - match",
+ operator: "StringEquals",
+ conditionValue: "test-value",
+ contextValues: []string{"test-value"},
+ expected: true,
+ },
+ {
+ name: "StringEquals - no match",
+ operator: "StringEquals",
+ conditionValue: "test-value",
+ contextValues: []string{"other-value"},
+ expected: false,
+ },
+ {
+ name: "StringLike - wildcard match",
+ operator: "StringLike",
+ conditionValue: "test-*",
+ contextValues: []string{"test-value"},
+ expected: true,
+ },
+ {
+ name: "StringLike - wildcard no match",
+ operator: "StringLike",
+ conditionValue: "test-*",
+ contextValues: []string{"other-value"},
+ expected: false,
+ },
+ {
+ name: "NumericEquals - match",
+ operator: "NumericEquals",
+ conditionValue: "42",
+ contextValues: []string{"42"},
+ expected: true,
+ },
+ {
+ name: "NumericLessThan - match",
+ operator: "NumericLessThan",
+ conditionValue: "100",
+ contextValues: []string{"50"},
+ expected: true,
+ },
+ {
+ name: "NumericLessThan - no match",
+ operator: "NumericLessThan",
+ conditionValue: "100",
+ contextValues: []string{"150"},
+ expected: false,
+ },
+ {
+ name: "IpAddress - CIDR match",
+ operator: "IpAddress",
+ conditionValue: "192.168.1.0/24",
+ contextValues: []string{"192.168.1.100"},
+ expected: true,
+ },
+ {
+ name: "IpAddress - CIDR no match",
+ operator: "IpAddress",
+ conditionValue: "192.168.1.0/24",
+ contextValues: []string{"10.0.0.1"},
+ expected: false,
+ },
+ {
+ name: "Bool - true match",
+ operator: "Bool",
+ conditionValue: "true",
+ contextValues: []string{"true"},
+ expected: true,
+ },
+ {
+ name: "Bool - false match",
+ operator: "Bool",
+ conditionValue: "false",
+ contextValues: []string{"false"},
+ expected: true,
+ },
+ {
+ name: "Bool - no match",
+ operator: "Bool",
+ conditionValue: "true",
+ contextValues: []string{"false"},
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ evaluator, err := GetConditionEvaluator(tt.operator)
+ if err != nil {
+ t.Fatalf("Failed to get condition evaluator: %v", err)
+ }
+
+ result := evaluator.Evaluate(tt.conditionValue, tt.contextValues)
+ if result != tt.expected {
+ t.Errorf("Expected %v, got %v", tt.expected, result)
+ }
+ })
+ }
+}
+
+func TestConvertIdentityToPolicy(t *testing.T) {
+ identityActions := []string{
+ "Read:bucket1/*",
+ "Write:bucket1/*",
+ "Admin:bucket2",
+ }
+
+ policy, err := ConvertIdentityToPolicy(identityActions, "bucket1")
+ if err != nil {
+ t.Fatalf("Failed to convert identity to policy: %v", err)
+ }
+
+ if policy.Version != "2012-10-17" {
+ t.Errorf("Expected version 2012-10-17, got %s", policy.Version)
+ }
+
+ if len(policy.Statement) != 3 {
+ t.Errorf("Expected 3 statements, got %d", len(policy.Statement))
+ }
+
+ // Check first statement (Read)
+ stmt := policy.Statement[0]
+ if stmt.Effect != PolicyEffectAllow {
+ t.Errorf("Expected Allow effect, got %s", stmt.Effect)
+ }
+
+ actions := normalizeToStringSlice(stmt.Action)
+ if len(actions) != 3 {
+ t.Errorf("Expected 3 read actions, got %d", len(actions))
+ }
+
+ resources := normalizeToStringSlice(stmt.Resource)
+ if len(resources) != 2 {
+ t.Errorf("Expected 2 resources, got %d", len(resources))
+ }
+}
+
+func TestPolicyValidation(t *testing.T) {
+ tests := []struct {
+ name string
+ policyJSON string
+ expectError bool
+ }{
+ {
+ name: "Valid policy",
+ policyJSON: `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::test-bucket/*"
+ }
+ ]
+ }`,
+ expectError: false,
+ },
+ {
+ name: "Invalid version",
+ policyJSON: `{
+ "Version": "2008-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::test-bucket/*"
+ }
+ ]
+ }`,
+ expectError: true,
+ },
+ {
+ name: "Missing action",
+ policyJSON: `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Resource": "arn:aws:s3:::test-bucket/*"
+ }
+ ]
+ }`,
+ expectError: true,
+ },
+ {
+ name: "Invalid JSON",
+ policyJSON: `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::test-bucket/*"
+ }
+ ]
+ }extra`,
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := ParsePolicy(tt.policyJSON)
+ if (err != nil) != tt.expectError {
+ t.Errorf("Expected error: %v, got error: %v", tt.expectError, err)
+ }
+ })
+ }
+}
+
+func TestPatternMatching(t *testing.T) {
+ tests := []struct {
+ name string
+ pattern string
+ value string
+ expected bool
+ }{
+ {
+ name: "Exact match",
+ pattern: "s3:GetObject",
+ value: "s3:GetObject",
+ expected: true,
+ },
+ {
+ name: "Wildcard match",
+ pattern: "s3:Get*",
+ value: "s3:GetObject",
+ expected: true,
+ },
+ {
+ name: "Wildcard no match",
+ pattern: "s3:Put*",
+ value: "s3:GetObject",
+ expected: false,
+ },
+ {
+ name: "Full wildcard",
+ pattern: "*",
+ value: "anything",
+ expected: true,
+ },
+ {
+ name: "Question mark wildcard",
+ pattern: "s3:GetObjec?",
+ value: "s3:GetObject",
+ expected: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ compiled, err := compilePattern(tt.pattern)
+ if err != nil {
+ t.Fatalf("Failed to compile pattern %s: %v", tt.pattern, err)
+ }
+
+ result := compiled.MatchString(tt.value)
+ if result != tt.expected {
+ t.Errorf("Pattern %s against %s: expected %v, got %v", tt.pattern, tt.value, tt.expected, result)
+ }
+ })
+ }
+}
+
+func TestExtractConditionValuesFromRequest(t *testing.T) {
+ // Create a test request
+ req := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Path: "/test-bucket/test-object",
+ RawQuery: "prefix=test&delimiter=/",
+ },
+ Header: map[string][]string{
+ "User-Agent": {"test-agent"},
+ "X-Amz-Copy-Source": {"source-bucket/source-object"},
+ },
+ RemoteAddr: "192.168.1.100:12345",
+ }
+
+ values := ExtractConditionValuesFromRequest(req)
+
+ // Check extracted values
+ if len(values["aws:SourceIp"]) != 1 || values["aws:SourceIp"][0] != "192.168.1.100" {
+ t.Errorf("Expected SourceIp to be 192.168.1.100, got %v", values["aws:SourceIp"])
+ }
+
+ if len(values["aws:UserAgent"]) != 1 || values["aws:UserAgent"][0] != "test-agent" {
+ t.Errorf("Expected UserAgent to be test-agent, got %v", values["aws:UserAgent"])
+ }
+
+ if len(values["s3:prefix"]) != 1 || values["s3:prefix"][0] != "test" {
+ t.Errorf("Expected prefix to be test, got %v", values["s3:prefix"])
+ }
+
+ if len(values["s3:delimiter"]) != 1 || values["s3:delimiter"][0] != "/" {
+ t.Errorf("Expected delimiter to be /, got %v", values["s3:delimiter"])
+ }
+
+ if len(values["s3:RequestMethod"]) != 1 || values["s3:RequestMethod"][0] != "GET" {
+ t.Errorf("Expected RequestMethod to be GET, got %v", values["s3:RequestMethod"])
+ }
+
+ if len(values["x-amz-copy-source"]) != 1 || values["x-amz-copy-source"][0] != "source-bucket/source-object" {
+ t.Errorf("Expected X-Amz-Copy-Source header to be extracted, got %v", values["x-amz-copy-source"])
+ }
+
+ // Check that aws:CurrentTime is properly set
+ if len(values["aws:CurrentTime"]) != 1 {
+ t.Errorf("Expected aws:CurrentTime to be set, got %v", values["aws:CurrentTime"])
+ }
+
+ // Check that aws:RequestTime is still available for backward compatibility
+ if len(values["aws:RequestTime"]) != 1 {
+ t.Errorf("Expected aws:RequestTime to be set for backward compatibility, got %v", values["aws:RequestTime"])
+ }
+}
+
+func TestPolicyEvaluationWithConditions(t *testing.T) {
+ engine := NewPolicyEngine()
+
+ // Policy with IP condition
+ policyJSON := `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::test-bucket/*",
+ "Condition": {
+ "IpAddress": {
+ "aws:SourceIp": "192.168.1.0/24"
+ }
+ }
+ }
+ ]
+ }`
+
+ err := engine.SetBucketPolicy("test-bucket", policyJSON)
+ if err != nil {
+ t.Fatalf("Failed to set bucket policy: %v", err)
+ }
+
+ // Test matching IP
+ args := &PolicyEvaluationArgs{
+ Action: "s3:GetObject",
+ Resource: "arn:aws:s3:::test-bucket/test-object",
+ Principal: "user1",
+ Conditions: map[string][]string{
+ "aws:SourceIp": {"192.168.1.100"},
+ },
+ }
+
+ result := engine.EvaluatePolicy("test-bucket", args)
+ if result != PolicyResultAllow {
+ t.Errorf("Expected Allow for matching IP, got %v", result)
+ }
+
+ // Test non-matching IP
+ args.Conditions["aws:SourceIp"] = []string{"10.0.0.1"}
+ result = engine.EvaluatePolicy("test-bucket", args)
+ if result != PolicyResultDeny {
+ t.Errorf("Expected Deny for non-matching IP, got %v", result)
+ }
+}
+
+func TestResourceArn(t *testing.T) {
+ tests := []struct {
+ name string
+ bucketName string
+ objectName string
+ expected string
+ }{
+ {
+ name: "Bucket only",
+ bucketName: "test-bucket",
+ objectName: "",
+ expected: "arn:aws:s3:::test-bucket",
+ },
+ {
+ name: "Bucket and object",
+ bucketName: "test-bucket",
+ objectName: "test-object",
+ expected: "arn:aws:s3:::test-bucket/test-object",
+ },
+ {
+ name: "Bucket and nested object",
+ bucketName: "test-bucket",
+ objectName: "folder/subfolder/test-object",
+ expected: "arn:aws:s3:::test-bucket/folder/subfolder/test-object",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := BuildResourceArn(tt.bucketName, tt.objectName)
+ if result != tt.expected {
+ t.Errorf("Expected %s, got %s", tt.expected, result)
+ }
+ })
+ }
+}
+
+func TestActionConversion(t *testing.T) {
+ tests := []struct {
+ name string
+ action string
+ expected string
+ }{
+ {
+ name: "Already has s3 prefix",
+ action: "s3:GetObject",
+ expected: "s3:GetObject",
+ },
+ {
+ name: "Add s3 prefix",
+ action: "GetObject",
+ expected: "s3:GetObject",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := BuildActionName(tt.action)
+ if result != tt.expected {
+ t.Errorf("Expected %s, got %s", tt.expected, result)
+ }
+ })
+ }
+}
+
+func TestPolicyEngineForRequest(t *testing.T) {
+ engine := NewPolicyEngine()
+
+ // Set up a policy
+ policyJSON := `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::test-bucket/*",
+ "Condition": {
+ "StringEquals": {
+ "s3:RequestMethod": "GET"
+ }
+ }
+ }
+ ]
+ }`
+
+ err := engine.SetBucketPolicy("test-bucket", policyJSON)
+ if err != nil {
+ t.Fatalf("Failed to set bucket policy: %v", err)
+ }
+
+ // Create test request
+ req := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Path: "/test-bucket/test-object",
+ },
+ Header: make(map[string][]string),
+ RemoteAddr: "192.168.1.100:12345",
+ }
+
+ // Test the request
+ result := engine.EvaluatePolicyForRequest("test-bucket", "test-object", "GetObject", "user1", req)
+ if result != PolicyResultAllow {
+ t.Errorf("Expected Allow for matching request, got %v", result)
+ }
+}
+
+func TestWildcardMatching(t *testing.T) {
+ tests := []struct {
+ name string
+ pattern string
+ str string
+ expected bool
+ }{
+ {
+ name: "Exact match",
+ pattern: "test",
+ str: "test",
+ expected: true,
+ },
+ {
+ name: "Single wildcard",
+ pattern: "*",
+ str: "anything",
+ expected: true,
+ },
+ {
+ name: "Prefix wildcard",
+ pattern: "test*",
+ str: "test123",
+ expected: true,
+ },
+ {
+ name: "Suffix wildcard",
+ pattern: "*test",
+ str: "123test",
+ expected: true,
+ },
+ {
+ name: "Middle wildcard",
+ pattern: "test*123",
+ str: "testABC123",
+ expected: true,
+ },
+ {
+ name: "No match",
+ pattern: "test*",
+ str: "other",
+ expected: false,
+ },
+ {
+ name: "Multiple wildcards",
+ pattern: "test*abc*123",
+ str: "testXYZabcDEF123",
+ expected: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := MatchesWildcard(tt.pattern, tt.str)
+ if result != tt.expected {
+ t.Errorf("Pattern %s against %s: expected %v, got %v", tt.pattern, tt.str, tt.expected, result)
+ }
+ })
+ }
+}
+
+func TestCompilePolicy(t *testing.T) {
+ policyJSON := `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": ["s3:GetObject", "s3:PutObject"],
+ "Resource": "arn:aws:s3:::test-bucket/*"
+ }
+ ]
+ }`
+
+ policy, err := ParsePolicy(policyJSON)
+ if err != nil {
+ t.Fatalf("Failed to parse policy: %v", err)
+ }
+
+ compiled, err := CompilePolicy(policy)
+ if err != nil {
+ t.Fatalf("Failed to compile policy: %v", err)
+ }
+
+ if len(compiled.Statements) != 1 {
+ t.Errorf("Expected 1 compiled statement, got %d", len(compiled.Statements))
+ }
+
+ stmt := compiled.Statements[0]
+ if len(stmt.ActionPatterns) != 2 {
+ t.Errorf("Expected 2 action patterns, got %d", len(stmt.ActionPatterns))
+ }
+
+ if len(stmt.ResourcePatterns) != 1 {
+ t.Errorf("Expected 1 resource pattern, got %d", len(stmt.ResourcePatterns))
+ }
+}
+
+// TestNewPolicyBackedIAMWithLegacy tests the constructor overload
+func TestNewPolicyBackedIAMWithLegacy(t *testing.T) {
+ // Mock legacy IAM
+ mockLegacyIAM := &MockLegacyIAM{}
+
+ // Test the new constructor
+ policyBackedIAM := NewPolicyBackedIAMWithLegacy(mockLegacyIAM)
+
+ // Verify that the legacy IAM is set
+ if policyBackedIAM.legacyIAM != mockLegacyIAM {
+ t.Errorf("Expected legacy IAM to be set, but it wasn't")
+ }
+
+ // Verify that the policy engine is initialized
+ if policyBackedIAM.policyEngine == nil {
+ t.Errorf("Expected policy engine to be initialized, but it wasn't")
+ }
+
+ // Compare with the traditional approach
+ traditionalIAM := NewPolicyBackedIAM()
+ traditionalIAM.SetLegacyIAM(mockLegacyIAM)
+
+ // Both should behave the same
+ if policyBackedIAM.legacyIAM != traditionalIAM.legacyIAM {
+ t.Errorf("Expected both approaches to result in the same legacy IAM")
+ }
+}
+
+// MockLegacyIAM implements the LegacyIAM interface for testing
+type MockLegacyIAM struct{}
+
+func (m *MockLegacyIAM) authRequest(r *http.Request, action Action) (Identity, s3err.ErrorCode) {
+ return nil, s3err.ErrNone
+}
diff --git a/weed/s3api/policy_engine/examples.go b/weed/s3api/policy_engine/examples.go
new file mode 100644
index 000000000..6f14127f3
--- /dev/null
+++ b/weed/s3api/policy_engine/examples.go
@@ -0,0 +1,463 @@
+//go:build ignore
+// +build ignore
+
+package policy_engine
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// This file contains examples and documentation for the policy engine
+
+// ExampleIdentityJSON shows the existing identities.json format (unchanged)
+var ExampleIdentityJSON = `{
+ "identities": [
+ {
+ "name": "user1",
+ "credentials": [
+ {
+ "accessKey": "AKIAIOSFODNN7EXAMPLE",
+ "secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+ }
+ ],
+ "actions": [
+ "Read:bucket1/*",
+ "Write:bucket1/*",
+ "Admin:bucket2"
+ ]
+ },
+ {
+ "name": "readonly-user",
+ "credentials": [
+ {
+ "accessKey": "AKIAI44QH8DHBEXAMPLE",
+ "secretKey": "je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY"
+ }
+ ],
+ "actions": [
+ "Read:bucket1/*",
+ "List:bucket1"
+ ]
+ }
+ ]
+}`
+
+// ExampleBucketPolicy shows an AWS S3 bucket policy with conditions
+var ExampleBucketPolicy = `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowGetObjectFromSpecificIP",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::my-bucket/*",
+ "Condition": {
+ "IpAddress": {
+ "aws:SourceIp": "192.168.1.0/24"
+ }
+ }
+ },
+ {
+ "Sid": "AllowPutObjectWithSSL",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:PutObject",
+ "Resource": "arn:aws:s3:::my-bucket/*",
+ "Condition": {
+ "Bool": {
+ "aws:SecureTransport": "true"
+ }
+ }
+ },
+ {
+ "Sid": "DenyDeleteFromProduction",
+ "Effect": "Deny",
+ "Principal": "*",
+ "Action": "s3:DeleteObject",
+ "Resource": "arn:aws:s3:::my-bucket/production/*"
+ }
+ ]
+}`
+
+// ExampleTimeBasedPolicy shows a policy with time-based conditions
+var ExampleTimeBasedPolicy = `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowAccessDuringBusinessHours",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": ["s3:GetObject", "s3:PutObject"],
+ "Resource": "arn:aws:s3:::my-bucket/*",
+ "Condition": {
+ "DateGreaterThan": {
+ "aws:RequestTime": "2023-01-01T08:00:00Z"
+ },
+ "DateLessThan": {
+ "aws:RequestTime": "2023-12-31T18:00:00Z"
+ }
+ }
+ }
+ ]
+}`
+
+// ExampleIPRestrictedPolicy shows a policy with IP restrictions
+var ExampleIPRestrictedPolicy = `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowFromOfficeNetwork",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:*",
+ "Resource": [
+ "arn:aws:s3:::my-bucket",
+ "arn:aws:s3:::my-bucket/*"
+ ],
+ "Condition": {
+ "IpAddress": {
+ "aws:SourceIp": [
+ "203.0.113.0/24",
+ "198.51.100.0/24"
+ ]
+ }
+ }
+ },
+ {
+ "Sid": "DenyFromRestrictedIPs",
+ "Effect": "Deny",
+ "Principal": "*",
+ "Action": "*",
+ "Resource": "*",
+ "Condition": {
+ "IpAddress": {
+ "aws:SourceIp": [
+ "192.0.2.0/24"
+ ]
+ }
+ }
+ }
+ ]
+}`
+
+// ExamplePublicReadPolicy shows a policy for public read access
+var ExamplePublicReadPolicy = `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "PublicReadGetObject",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::my-public-bucket/*"
+ }
+ ]
+}`
+
+// ExampleCORSPolicy shows a policy with CORS-related conditions
+var ExampleCORSPolicy = `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowCrossOriginRequests",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": ["s3:GetObject", "s3:PutObject"],
+ "Resource": "arn:aws:s3:::my-bucket/*",
+ "Condition": {
+ "StringLike": {
+ "aws:Referer": [
+ "https://example.com/*",
+ "https://*.example.com/*"
+ ]
+ }
+ }
+ }
+ ]
+}`
+
+// ExampleUserAgentPolicy shows a policy with user agent restrictions
+var ExampleUserAgentPolicy = `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowSpecificUserAgents",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::my-bucket/*",
+ "Condition": {
+ "StringLike": {
+ "aws:UserAgent": [
+ "MyApp/*",
+ "curl/*"
+ ]
+ }
+ }
+ }
+ ]
+}`
+
+// ExamplePrefixBasedPolicy shows a policy with prefix-based access
+var ExamplePrefixBasedPolicy = `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowUserFolderAccess",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
+ "Resource": "arn:aws:s3:::my-bucket/${aws:username}/*",
+ "Condition": {
+ "StringEquals": {
+ "s3:prefix": "${aws:username}/"
+ }
+ }
+ }
+ ]
+}`
+
+// ExampleMultiStatementPolicy shows a complex policy with multiple statements
+var ExampleMultiStatementPolicy = `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowListBucket",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:ListBucket",
+ "Resource": "arn:aws:s3:::my-bucket",
+ "Condition": {
+ "StringEquals": {
+ "s3:prefix": "public/"
+ }
+ }
+ },
+ {
+ "Sid": "AllowGetPublicObjects",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::my-bucket/public/*"
+ },
+ {
+ "Sid": "AllowAuthenticatedUpload",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": "s3:PutObject",
+ "Resource": "arn:aws:s3:::my-bucket/uploads/*",
+ "Condition": {
+ "StringEquals": {
+ "s3:x-amz-acl": "private"
+ }
+ }
+ },
+ {
+ "Sid": "DenyInsecureConnections",
+ "Effect": "Deny",
+ "Principal": "*",
+ "Action": "s3:*",
+ "Resource": [
+ "arn:aws:s3:::my-bucket",
+ "arn:aws:s3:::my-bucket/*"
+ ],
+ "Condition": {
+ "Bool": {
+ "aws:SecureTransport": "false"
+ }
+ }
+ }
+ ]
+}`
+
+// GetAllExamples returns all example policies
+func GetAllExamples() map[string]string {
+ return map[string]string{
+ "basic-bucket-policy": ExampleBucketPolicy,
+ "time-based-policy": ExampleTimeBasedPolicy,
+ "ip-restricted-policy": ExampleIPRestrictedPolicy,
+ "public-read-policy": ExamplePublicReadPolicy,
+ "cors-policy": ExampleCORSPolicy,
+ "user-agent-policy": ExampleUserAgentPolicy,
+ "prefix-based-policy": ExamplePrefixBasedPolicy,
+ "multi-statement-policy": ExampleMultiStatementPolicy,
+ }
+}
+
+// ValidateExamplePolicies validates all example policies
+func ValidateExamplePolicies() error {
+ examples := GetAllExamples()
+
+ for name, policyJSON := range examples {
+ _, err := ParsePolicy(policyJSON)
+ if err != nil {
+ return fmt.Errorf("invalid example policy %s: %v", name, err)
+ }
+ }
+
+ return nil
+}
+
+// GetExamplePolicy returns a specific example policy
+func GetExamplePolicy(name string) (string, error) {
+ examples := GetAllExamples()
+
+ policy, exists := examples[name]
+ if !exists {
+ return "", fmt.Errorf("example policy %s not found", name)
+ }
+
+ return policy, nil
+}
+
+// CreateExamplePolicyDocument creates a PolicyDocument from an example
+func CreateExamplePolicyDocument(name string) (*PolicyDocument, error) {
+ policyJSON, err := GetExamplePolicy(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return ParsePolicy(policyJSON)
+}
+
+// PrintExamplePolicyPretty prints an example policy in pretty format
+func PrintExamplePolicyPretty(name string) error {
+ policyJSON, err := GetExamplePolicy(name)
+ if err != nil {
+ return err
+ }
+
+ var policy interface{}
+ if err := json.Unmarshal([]byte(policyJSON), &policy); err != nil {
+ return err
+ }
+
+ prettyJSON, err := json.MarshalIndent(policy, "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Example Policy: %s\n", name)
+ fmt.Printf("================\n")
+ fmt.Println(string(prettyJSON))
+
+ return nil
+}
+
+// ExampleUsage demonstrates how to use the policy engine
+func ExampleUsage() {
+ // Create a new policy engine
+ engine := NewPolicyEngine()
+
+ // Set a bucket policy
+ policyJSON := ExampleBucketPolicy
+ err := engine.SetBucketPolicy("my-bucket", policyJSON)
+ if err != nil {
+ fmt.Printf("Error setting bucket policy: %v\n", err)
+ return
+ }
+
+ // Evaluate a policy
+ args := &PolicyEvaluationArgs{
+ Action: "s3:GetObject",
+ Resource: "arn:aws:s3:::my-bucket/test-object",
+ Principal: "*",
+ Conditions: map[string][]string{
+ "aws:SourceIp": {"192.168.1.100"},
+ },
+ }
+
+ result := engine.EvaluatePolicy("my-bucket", args)
+
+ switch result {
+ case PolicyResultAllow:
+ fmt.Println("Access allowed")
+ case PolicyResultDeny:
+ fmt.Println("Access denied")
+ case PolicyResultIndeterminate:
+ fmt.Println("Access indeterminate")
+ }
+}
+
+// ExampleLegacyIntegration demonstrates backward compatibility
+func ExampleLegacyIntegration() {
+ // Legacy identity actions
+ legacyActions := []string{
+ "Read:bucket1/*",
+ "Write:bucket1/uploads/*",
+ "Admin:bucket2",
+ }
+
+ // Convert to policy
+ policy, err := ConvertIdentityToPolicy(legacyActions, "bucket1")
+ if err != nil {
+ fmt.Printf("Error converting identity to policy: %v\n", err)
+ return
+ }
+
+ // Create policy-backed IAM
+ policyIAM := NewPolicyBackedIAM()
+
+ // Set the converted policy
+ policyJSON, _ := json.MarshalIndent(policy, "", " ")
+ err = policyIAM.SetBucketPolicy("bucket1", string(policyJSON))
+ if err != nil {
+ fmt.Printf("Error setting bucket policy: %v\n", err)
+ return
+ }
+
+ fmt.Println("Legacy identity successfully converted to AWS S3 policy")
+}
+
+// ExampleConditions demonstrates various condition types
+func ExampleConditions() {
+ examples := map[string]string{
+ "StringEquals": `"StringEquals": {"s3:prefix": "documents/"}`,
+ "StringLike": `"StringLike": {"aws:UserAgent": "MyApp/*"}`,
+ "NumericEquals": `"NumericEquals": {"s3:max-keys": "10"}`,
+ "NumericLessThan": `"NumericLessThan": {"s3:max-keys": "1000"}`,
+ "DateGreaterThan": `"DateGreaterThan": {"aws:RequestTime": "2023-01-01T00:00:00Z"}`,
+ "DateLessThan": `"DateLessThan": {"aws:RequestTime": "2023-12-31T23:59:59Z"}`,
+ "IpAddress": `"IpAddress": {"aws:SourceIp": "192.168.1.0/24"}`,
+ "NotIpAddress": `"NotIpAddress": {"aws:SourceIp": "10.0.0.0/8"}`,
+ "Bool": `"Bool": {"aws:SecureTransport": "true"}`,
+ "Null": `"Null": {"s3:x-amz-server-side-encryption": "false"}`,
+ }
+
+ fmt.Println("Supported Condition Operators:")
+ fmt.Println("==============================")
+
+ for operator, example := range examples {
+ fmt.Printf("%s: %s\n", operator, example)
+ }
+}
+
+// ExampleMigrationStrategy demonstrates migration from legacy to policy-based system
+func ExampleMigrationStrategy() {
+ fmt.Println("Migration Strategy:")
+ fmt.Println("==================")
+ fmt.Println("1. Keep existing identities.json unchanged")
+ fmt.Println("2. Legacy actions are automatically converted to AWS policies internally")
+ fmt.Println("3. Add bucket policies for advanced features:")
+ fmt.Println(" - IP restrictions")
+ fmt.Println(" - Time-based access")
+ fmt.Println(" - SSL-only access")
+ fmt.Println(" - User agent restrictions")
+ fmt.Println("4. Policy evaluation precedence:")
+ fmt.Println(" - Explicit Deny (highest priority)")
+ fmt.Println(" - Explicit Allow")
+ fmt.Println(" - Default Deny (lowest priority)")
+}
+
+// PrintAllExamples prints all example policies
+func PrintAllExamples() {
+ examples := GetAllExamples()
+
+ for name := range examples {
+ fmt.Printf("\n")
+ PrintExamplePolicyPretty(name)
+ fmt.Printf("\n")
+ }
+}
diff --git a/weed/s3api/policy_engine/integration.go b/weed/s3api/policy_engine/integration.go
new file mode 100644
index 000000000..2a6a5c8fa
--- /dev/null
+++ b/weed/s3api/policy_engine/integration.go
@@ -0,0 +1,438 @@
+package policy_engine
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
+)
+
+// Action represents an S3 action - this should match the type in auth_credentials.go
+type Action string
+
+// Identity represents a user identity - this should match the type in auth_credentials.go
+type Identity interface {
+ canDo(action Action, bucket string, objectKey string) bool
+}
+
+// PolicyBackedIAM provides policy-based access control with fallback to legacy IAM
+type PolicyBackedIAM struct {
+ policyEngine *PolicyEngine
+ legacyIAM LegacyIAM // Interface to delegate to existing IAM system
+}
+
+// LegacyIAM interface for delegating to existing IAM implementation
+type LegacyIAM interface {
+ authRequest(r *http.Request, action Action) (Identity, s3err.ErrorCode)
+}
+
+// NewPolicyBackedIAM creates a new policy-backed IAM system
+func NewPolicyBackedIAM() *PolicyBackedIAM {
+ return &PolicyBackedIAM{
+ policyEngine: NewPolicyEngine(),
+ legacyIAM: nil, // Will be set when integrated with existing IAM
+ }
+}
+
+// NewPolicyBackedIAMWithLegacy creates a new policy-backed IAM system with legacy IAM set
+func NewPolicyBackedIAMWithLegacy(legacyIAM LegacyIAM) *PolicyBackedIAM {
+ return &PolicyBackedIAM{
+ policyEngine: NewPolicyEngine(),
+ legacyIAM: legacyIAM,
+ }
+}
+
+// SetLegacyIAM sets the legacy IAM system for fallback
+func (p *PolicyBackedIAM) SetLegacyIAM(legacyIAM LegacyIAM) {
+ p.legacyIAM = legacyIAM
+}
+
+// SetBucketPolicy sets the policy for a bucket
+func (p *PolicyBackedIAM) SetBucketPolicy(bucketName string, policyJSON string) error {
+ return p.policyEngine.SetBucketPolicy(bucketName, policyJSON)
+}
+
+// GetBucketPolicy gets the policy for a bucket
+func (p *PolicyBackedIAM) GetBucketPolicy(bucketName string) (*PolicyDocument, error) {
+ return p.policyEngine.GetBucketPolicy(bucketName)
+}
+
+// DeleteBucketPolicy deletes the policy for a bucket
+func (p *PolicyBackedIAM) DeleteBucketPolicy(bucketName string) error {
+ return p.policyEngine.DeleteBucketPolicy(bucketName)
+}
+
+// CanDo checks if a principal can perform an action on a resource
+func (p *PolicyBackedIAM) CanDo(action, bucketName, objectName, principal string, r *http.Request) bool {
+ // If there's a bucket policy, evaluate it
+ if p.policyEngine.HasPolicyForBucket(bucketName) {
+ result := p.policyEngine.EvaluatePolicyForRequest(bucketName, objectName, action, principal, r)
+ switch result {
+ case PolicyResultAllow:
+ return true
+ case PolicyResultDeny:
+ return false
+ case PolicyResultIndeterminate:
+ // Fall through to legacy system
+ }
+ }
+
+ // No bucket policy or indeterminate result, use legacy conversion
+ return p.evaluateLegacyAction(action, bucketName, objectName, principal)
+}
+
+// evaluateLegacyAction evaluates actions using legacy identity-based rules
+func (p *PolicyBackedIAM) evaluateLegacyAction(action, bucketName, objectName, principal string) bool {
+ // If we have a legacy IAM system to delegate to, use it
+ if p.legacyIAM != nil {
+ // Create a dummy request for legacy evaluation
+ // In real implementation, this would use the actual request
+ r := &http.Request{
+ Header: make(http.Header),
+ }
+
+ // Convert the action string to Action type
+ legacyAction := Action(action)
+
+ // Use legacy IAM to check permission
+ identity, errCode := p.legacyIAM.authRequest(r, legacyAction)
+ if errCode != s3err.ErrNone {
+ return false
+ }
+
+ // If we have an identity, check if it can perform the action
+ if identity != nil {
+ return identity.canDo(legacyAction, bucketName, objectName)
+ }
+ }
+
+ // No legacy IAM available, convert to policy and evaluate
+ return p.evaluateUsingPolicyConversion(action, bucketName, objectName, principal)
+}
+
+// evaluateUsingPolicyConversion converts legacy action to policy and evaluates
+func (p *PolicyBackedIAM) evaluateUsingPolicyConversion(action, bucketName, objectName, principal string) bool {
+ // For now, use a conservative approach for legacy actions
+ // In a real implementation, this would integrate with the existing identity system
+ glog.V(2).Infof("Legacy action evaluation for %s on %s/%s by %s", action, bucketName, objectName, principal)
+
+ // Return false to maintain security until proper legacy integration is implemented
+ // This ensures no unintended access is granted
+ return false
+}
+
+// ConvertIdentityToPolicy converts a legacy identity action to an AWS policy
+func ConvertIdentityToPolicy(identityActions []string, bucketName string) (*PolicyDocument, error) {
+ statements := make([]PolicyStatement, 0)
+
+ for _, action := range identityActions {
+ stmt, err := convertSingleAction(action, bucketName)
+ if err != nil {
+ glog.Warningf("Failed to convert action %s: %v", action, err)
+ continue
+ }
+ if stmt != nil {
+ statements = append(statements, *stmt)
+ }
+ }
+
+ if len(statements) == 0 {
+ return nil, fmt.Errorf("no valid statements generated")
+ }
+
+ return &PolicyDocument{
+ Version: PolicyVersion2012_10_17,
+ Statement: statements,
+ }, nil
+}
+
+// convertSingleAction converts a single legacy action to a policy statement
+func convertSingleAction(action, bucketName string) (*PolicyStatement, error) {
+ parts := strings.Split(action, ":")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid action format: %s", action)
+ }
+
+ actionType := parts[0]
+ resourcePattern := parts[1]
+
+ var s3Actions []string
+ var resources []string
+
+ switch actionType {
+ case "Read":
+ s3Actions = []string{"s3:GetObject", "s3:GetObjectVersion", "s3:ListBucket"}
+ if strings.HasSuffix(resourcePattern, "/*") {
+ // Object-level read access
+ bucket := strings.TrimSuffix(resourcePattern, "/*")
+ resources = []string{
+ fmt.Sprintf("arn:aws:s3:::%s", bucket),
+ fmt.Sprintf("arn:aws:s3:::%s/*", bucket),
+ }
+ } else {
+ // Bucket-level read access
+ resources = []string{fmt.Sprintf("arn:aws:s3:::%s", resourcePattern)}
+ }
+
+ case "Write":
+ s3Actions = []string{"s3:PutObject", "s3:DeleteObject", "s3:PutObjectAcl"}
+ if strings.HasSuffix(resourcePattern, "/*") {
+ // Object-level write access
+ bucket := strings.TrimSuffix(resourcePattern, "/*")
+ resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", bucket)}
+ } else {
+ // Bucket-level write access
+ resources = []string{fmt.Sprintf("arn:aws:s3:::%s", resourcePattern)}
+ }
+
+ case "Admin":
+ s3Actions = []string{"s3:*"}
+ resources = []string{
+ fmt.Sprintf("arn:aws:s3:::%s", resourcePattern),
+ fmt.Sprintf("arn:aws:s3:::%s/*", resourcePattern),
+ }
+
+ case "List":
+ s3Actions = []string{"s3:ListBucket", "s3:ListBucketVersions"}
+ resources = []string{fmt.Sprintf("arn:aws:s3:::%s", resourcePattern)}
+
+ case "Tagging":
+ s3Actions = []string{"s3:GetObjectTagging", "s3:PutObjectTagging", "s3:DeleteObjectTagging"}
+ resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", resourcePattern)}
+
+ case "BypassGovernanceRetention":
+ s3Actions = []string{"s3:BypassGovernanceRetention"}
+ if strings.HasSuffix(resourcePattern, "/*") {
+ // Object-level bypass governance access
+ bucket := strings.TrimSuffix(resourcePattern, "/*")
+ resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", bucket)}
+ } else {
+ // Bucket-level bypass governance access
+ resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", resourcePattern)}
+ }
+
+ default:
+ return nil, fmt.Errorf("unknown action type: %s", actionType)
+ }
+
+ return &PolicyStatement{
+ Effect: PolicyEffectAllow,
+ Action: NewStringOrStringSlice(s3Actions...),
+ Resource: NewStringOrStringSlice(resources...),
+ }, nil
+}
+
+// GetActionMappings returns the mapping of legacy actions to S3 actions
+func GetActionMappings() map[string][]string {
+ return map[string][]string{
+ "Read": {
+ "s3:GetObject",
+ "s3:GetObjectVersion",
+ "s3:GetObjectAcl",
+ "s3:GetObjectVersionAcl",
+ "s3:GetObjectTagging",
+ "s3:GetObjectVersionTagging",
+ "s3:ListBucket",
+ "s3:ListBucketVersions",
+ "s3:GetBucketLocation",
+ "s3:GetBucketVersioning",
+ "s3:GetBucketAcl",
+ "s3:GetBucketCors",
+ "s3:GetBucketTagging",
+ "s3:GetBucketNotification",
+ },
+ "Write": {
+ "s3:PutObject",
+ "s3:PutObjectAcl",
+ "s3:PutObjectTagging",
+ "s3:DeleteObject",
+ "s3:DeleteObjectVersion",
+ "s3:DeleteObjectTagging",
+ "s3:AbortMultipartUpload",
+ "s3:ListMultipartUploads",
+ "s3:ListParts",
+ "s3:PutBucketAcl",
+ "s3:PutBucketCors",
+ "s3:PutBucketTagging",
+ "s3:PutBucketNotification",
+ "s3:PutBucketVersioning",
+ "s3:DeleteBucketTagging",
+ "s3:DeleteBucketCors",
+ },
+ "Admin": {
+ "s3:*",
+ },
+ "List": {
+ "s3:ListBucket",
+ "s3:ListBucketVersions",
+ "s3:ListAllMyBuckets",
+ },
+ "Tagging": {
+ "s3:GetObjectTagging",
+ "s3:PutObjectTagging",
+ "s3:DeleteObjectTagging",
+ "s3:GetBucketTagging",
+ "s3:PutBucketTagging",
+ "s3:DeleteBucketTagging",
+ },
+ "BypassGovernanceRetention": {
+ "s3:BypassGovernanceRetention",
+ },
+ }
+}
+
+// ValidateActionMapping validates that a legacy action can be mapped to S3 actions
+func ValidateActionMapping(action string) error {
+ mappings := GetActionMappings()
+
+ parts := strings.Split(action, ":")
+ if len(parts) != 2 {
+ return fmt.Errorf("invalid action format: %s, expected format: 'ActionType:Resource'", action)
+ }
+
+ actionType := parts[0]
+ resource := parts[1]
+
+ if _, exists := mappings[actionType]; !exists {
+ return fmt.Errorf("unknown action type: %s", actionType)
+ }
+
+ if resource == "" {
+ return fmt.Errorf("resource cannot be empty")
+ }
+
+ return nil
+}
+
+// ConvertLegacyActions converts an array of legacy actions to S3 actions
+func ConvertLegacyActions(legacyActions []string) ([]string, error) {
+ mappings := GetActionMappings()
+ s3Actions := make([]string, 0)
+
+ for _, legacyAction := range legacyActions {
+ if err := ValidateActionMapping(legacyAction); err != nil {
+ return nil, err
+ }
+
+ parts := strings.Split(legacyAction, ":")
+ actionType := parts[0]
+
+ if actionType == "Admin" {
+ // Admin gives all permissions, so we can just return s3:*
+ return []string{"s3:*"}, nil
+ }
+
+ if mapped, exists := mappings[actionType]; exists {
+ s3Actions = append(s3Actions, mapped...)
+ }
+ }
+
+ // Remove duplicates
+ uniqueActions := make([]string, 0)
+ seen := make(map[string]bool)
+ for _, action := range s3Actions {
+ if !seen[action] {
+ uniqueActions = append(uniqueActions, action)
+ seen[action] = true
+ }
+ }
+
+ return uniqueActions, nil
+}
+
+// GetResourcesFromLegacyAction extracts resources from a legacy action
+func GetResourcesFromLegacyAction(legacyAction string) ([]string, error) {
+ parts := strings.Split(legacyAction, ":")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid action format: %s", legacyAction)
+ }
+
+ resourcePattern := parts[1]
+ resources := make([]string, 0)
+
+ if strings.HasSuffix(resourcePattern, "/*") {
+ // Object-level access
+ bucket := strings.TrimSuffix(resourcePattern, "/*")
+ resources = append(resources, fmt.Sprintf("arn:aws:s3:::%s", bucket))
+ resources = append(resources, fmt.Sprintf("arn:aws:s3:::%s/*", bucket))
+ } else {
+ // Bucket-level access
+ resources = append(resources, fmt.Sprintf("arn:aws:s3:::%s", resourcePattern))
+ }
+
+ return resources, nil
+}
+
+// CreatePolicyFromLegacyIdentity creates a policy document from legacy identity actions
+func CreatePolicyFromLegacyIdentity(identityName string, actions []string) (*PolicyDocument, error) {
+ statements := make([]PolicyStatement, 0)
+
+ // Group actions by resource pattern
+ resourceActions := make(map[string][]string)
+
+ for _, action := range actions {
+ parts := strings.Split(action, ":")
+ if len(parts) != 2 {
+ continue
+ }
+
+ resourcePattern := parts[1]
+ actionType := parts[0]
+
+ if _, exists := resourceActions[resourcePattern]; !exists {
+ resourceActions[resourcePattern] = make([]string, 0)
+ }
+ resourceActions[resourcePattern] = append(resourceActions[resourcePattern], actionType)
+ }
+
+ // Create statements for each resource pattern
+ for resourcePattern, actionTypes := range resourceActions {
+ s3Actions := make([]string, 0)
+
+ for _, actionType := range actionTypes {
+ if actionType == "Admin" {
+ s3Actions = []string{"s3:*"}
+ break
+ }
+
+ if mapped, exists := GetActionMappings()[actionType]; exists {
+ s3Actions = append(s3Actions, mapped...)
+ }
+ }
+
+ resources, err := GetResourcesFromLegacyAction(fmt.Sprintf("dummy:%s", resourcePattern))
+ if err != nil {
+ continue
+ }
+
+ statement := PolicyStatement{
+ Sid: fmt.Sprintf("%s-%s", identityName, strings.ReplaceAll(resourcePattern, "/", "-")),
+ Effect: PolicyEffectAllow,
+ Action: NewStringOrStringSlice(s3Actions...),
+ Resource: NewStringOrStringSlice(resources...),
+ }
+
+ statements = append(statements, statement)
+ }
+
+ if len(statements) == 0 {
+ return nil, fmt.Errorf("no valid statements generated for identity %s", identityName)
+ }
+
+ return &PolicyDocument{
+ Version: PolicyVersion2012_10_17,
+ Statement: statements,
+ }, nil
+}
+
+// HasPolicyForBucket checks if a bucket has a policy
+func (p *PolicyBackedIAM) HasPolicyForBucket(bucketName string) bool {
+ return p.policyEngine.HasPolicyForBucket(bucketName)
+}
+
+// GetPolicyEngine returns the underlying policy engine
+func (p *PolicyBackedIAM) GetPolicyEngine() *PolicyEngine {
+ return p.policyEngine
+}
diff --git a/weed/s3api/policy_engine/types.go b/weed/s3api/policy_engine/types.go
new file mode 100644
index 000000000..134305183
--- /dev/null
+++ b/weed/s3api/policy_engine/types.go
@@ -0,0 +1,454 @@
+package policy_engine
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+)
+
+// Policy Engine Types
+//
+// This package provides enhanced AWS S3-compatible policy types with improved type safety.
+//
+// MIGRATION COMPLETE:
+// This is now the unified PolicyDocument type used throughout the SeaweedFS codebase.
+// The previous duplicate PolicyDocument types in iamapi and credential packages have
+// been migrated to use these enhanced types, providing:
+// - Principal specifications
+// - Complex conditions (IP, time, string patterns, etc.)
+// - Flexible string/array types with proper JSON marshaling
+// - Policy compilation for performance
+//
+// All policy operations now use this single, consistent type definition.
+
+// Constants for policy validation
+const (
+ // PolicyVersion2012_10_17 is the standard AWS policy version
+ PolicyVersion2012_10_17 = "2012-10-17"
+)
+
+// StringOrStringSlice represents a value that can be either a string or []string
+type StringOrStringSlice struct {
+ values []string
+}
+
+// UnmarshalJSON implements json.Unmarshaler for StringOrStringSlice
+func (s *StringOrStringSlice) UnmarshalJSON(data []byte) error {
+ // Try unmarshaling as string first
+ var str string
+ if err := json.Unmarshal(data, &str); err == nil {
+ s.values = []string{str}
+ return nil
+ }
+
+ // Try unmarshaling as []string
+ var strs []string
+ if err := json.Unmarshal(data, &strs); err == nil {
+ s.values = strs
+ return nil
+ }
+
+ return fmt.Errorf("value must be string or []string")
+}
+
+// MarshalJSON implements json.Marshaler for StringOrStringSlice
+func (s StringOrStringSlice) MarshalJSON() ([]byte, error) {
+ if len(s.values) == 1 {
+ return json.Marshal(s.values[0])
+ }
+ return json.Marshal(s.values)
+}
+
+// Strings returns the slice of strings
+func (s StringOrStringSlice) Strings() []string {
+ return s.values
+}
+
+// NewStringOrStringSlice creates a new StringOrStringSlice from strings
+func NewStringOrStringSlice(values ...string) StringOrStringSlice {
+ return StringOrStringSlice{values: values}
+}
+
+// PolicyConditions represents policy conditions with proper typing
+type PolicyConditions map[string]map[string]StringOrStringSlice
+
+// PolicyDocument represents an AWS S3 bucket policy document
+type PolicyDocument struct {
+ Version string `json:"Version"`
+ Statement []PolicyStatement `json:"Statement"`
+}
+
+// PolicyStatement represents a single policy statement
+type PolicyStatement struct {
+ Sid string `json:"Sid,omitempty"`
+ Effect PolicyEffect `json:"Effect"`
+ Principal *StringOrStringSlice `json:"Principal,omitempty"`
+ Action StringOrStringSlice `json:"Action"`
+ Resource StringOrStringSlice `json:"Resource"`
+ Condition PolicyConditions `json:"Condition,omitempty"`
+}
+
+// PolicyEffect represents Allow or Deny
+type PolicyEffect string
+
+const (
+ PolicyEffectAllow PolicyEffect = "Allow"
+ PolicyEffectDeny PolicyEffect = "Deny"
+)
+
+// PolicyEvaluationArgs contains the arguments for policy evaluation
+type PolicyEvaluationArgs struct {
+ Action string
+ Resource string
+ Principal string
+ Conditions map[string][]string
+}
+
+// PolicyCache for caching compiled policies
+type PolicyCache struct {
+ policies map[string]*CompiledPolicy
+ lastUpdate time.Time
+}
+
+// CompiledPolicy represents a policy that has been compiled for efficient evaluation
+type CompiledPolicy struct {
+ Document *PolicyDocument
+ Statements []CompiledStatement
+}
+
+// CompiledStatement represents a compiled policy statement
+type CompiledStatement struct {
+ Statement *PolicyStatement
+ ActionMatchers []*WildcardMatcher
+ ResourceMatchers []*WildcardMatcher
+ PrincipalMatchers []*WildcardMatcher
+ // Keep regex patterns for backward compatibility
+ ActionPatterns []*regexp.Regexp
+ ResourcePatterns []*regexp.Regexp
+ PrincipalPatterns []*regexp.Regexp
+}
+
+// NewPolicyCache creates a new policy cache
+func NewPolicyCache() *PolicyCache {
+ return &PolicyCache{
+ policies: make(map[string]*CompiledPolicy),
+ }
+}
+
+// ValidatePolicy validates a policy document
+func ValidatePolicy(policyDoc *PolicyDocument) error {
+ if policyDoc.Version != PolicyVersion2012_10_17 {
+ return fmt.Errorf("unsupported policy version: %s", policyDoc.Version)
+ }
+
+ if len(policyDoc.Statement) == 0 {
+ return fmt.Errorf("policy must contain at least one statement")
+ }
+
+ for i, stmt := range policyDoc.Statement {
+ if err := validateStatement(&stmt); err != nil {
+ return fmt.Errorf("invalid statement %d: %v", i, err)
+ }
+ }
+
+ return nil
+}
+
+// validateStatement validates a single policy statement
+func validateStatement(stmt *PolicyStatement) error {
+ if stmt.Effect != PolicyEffectAllow && stmt.Effect != PolicyEffectDeny {
+ return fmt.Errorf("invalid effect: %s", stmt.Effect)
+ }
+
+ if len(stmt.Action.Strings()) == 0 {
+ return fmt.Errorf("action is required")
+ }
+
+ if len(stmt.Resource.Strings()) == 0 {
+ return fmt.Errorf("resource is required")
+ }
+
+ return nil
+}
+
+// ParsePolicy parses a policy JSON string
+func ParsePolicy(policyJSON string) (*PolicyDocument, error) {
+ var policy PolicyDocument
+ if err := json.Unmarshal([]byte(policyJSON), &policy); err != nil {
+ return nil, fmt.Errorf("failed to parse policy JSON: %v", err)
+ }
+
+ if err := ValidatePolicy(&policy); err != nil {
+ return nil, fmt.Errorf("invalid policy: %v", err)
+ }
+
+ return &policy, nil
+}
+
+// CompilePolicy compiles a policy for efficient evaluation
+func CompilePolicy(policy *PolicyDocument) (*CompiledPolicy, error) {
+ compiled := &CompiledPolicy{
+ Document: policy,
+ Statements: make([]CompiledStatement, len(policy.Statement)),
+ }
+
+ for i, stmt := range policy.Statement {
+ compiledStmt, err := compileStatement(&stmt)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compile statement %d: %v", i, err)
+ }
+ compiled.Statements[i] = *compiledStmt
+ }
+
+ return compiled, nil
+}
+
+// compileStatement compiles a single policy statement
+func compileStatement(stmt *PolicyStatement) (*CompiledStatement, error) {
+ compiled := &CompiledStatement{
+ Statement: stmt,
+ }
+
+ // Compile action patterns and matchers
+ for _, action := range stmt.Action.Strings() {
+ pattern, err := compilePattern(action)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compile action pattern %s: %v", action, err)
+ }
+ compiled.ActionPatterns = append(compiled.ActionPatterns, pattern)
+
+ matcher, err := NewWildcardMatcher(action)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create action matcher %s: %v", action, err)
+ }
+ compiled.ActionMatchers = append(compiled.ActionMatchers, matcher)
+ }
+
+ // Compile resource patterns and matchers
+ for _, resource := range stmt.Resource.Strings() {
+ pattern, err := compilePattern(resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compile resource pattern %s: %v", resource, err)
+ }
+ compiled.ResourcePatterns = append(compiled.ResourcePatterns, pattern)
+
+ matcher, err := NewWildcardMatcher(resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create resource matcher %s: %v", resource, err)
+ }
+ compiled.ResourceMatchers = append(compiled.ResourceMatchers, matcher)
+ }
+
+ // Compile principal patterns and matchers if present
+ if stmt.Principal != nil && len(stmt.Principal.Strings()) > 0 {
+ for _, principal := range stmt.Principal.Strings() {
+ pattern, err := compilePattern(principal)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compile principal pattern %s: %v", principal, err)
+ }
+ compiled.PrincipalPatterns = append(compiled.PrincipalPatterns, pattern)
+
+ matcher, err := NewWildcardMatcher(principal)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create principal matcher %s: %v", principal, err)
+ }
+ compiled.PrincipalMatchers = append(compiled.PrincipalMatchers, matcher)
+ }
+ }
+
+ return compiled, nil
+}
+
+// compilePattern compiles a wildcard pattern to regex
+func compilePattern(pattern string) (*regexp.Regexp, error) {
+ return CompileWildcardPattern(pattern)
+}
+
+// normalizeToStringSlice converts various types to string slice - kept for backward compatibility
+func normalizeToStringSlice(value interface{}) []string {
+ result, err := normalizeToStringSliceWithError(value)
+ if err != nil {
+ glog.Warningf("unexpected type for policy value: %T, error: %v", value, err)
+ return []string{fmt.Sprintf("%v", value)}
+ }
+ return result
+}
+
+// normalizeToStringSliceWithError converts various types to string slice with proper error handling
+func normalizeToStringSliceWithError(value interface{}) ([]string, error) {
+ switch v := value.(type) {
+ case string:
+ return []string{v}, nil
+ case []string:
+ return v, nil
+ case []interface{}:
+ result := make([]string, len(v))
+ for i, item := range v {
+ result[i] = fmt.Sprintf("%v", item)
+ }
+ return result, nil
+ case StringOrStringSlice:
+ return v.Strings(), nil
+ default:
+ return nil, fmt.Errorf("unexpected type for policy value: %T", v)
+ }
+}
+
+// GetBucketFromResource extracts bucket name from resource ARN
+func GetBucketFromResource(resource string) string {
+ // Handle ARN format: arn:aws:s3:::bucket-name/object-path
+ if strings.HasPrefix(resource, "arn:aws:s3:::") {
+ parts := strings.SplitN(resource[13:], "/", 2)
+ return parts[0]
+ }
+ return ""
+}
+
+// IsObjectResource checks if resource refers to objects
+func IsObjectResource(resource string) bool {
+ return strings.Contains(resource, "/")
+}
+
+// S3Actions contains common S3 actions
+var S3Actions = map[string]string{
+ "GetObject": "s3:GetObject",
+ "PutObject": "s3:PutObject",
+ "DeleteObject": "s3:DeleteObject",
+ "GetObjectVersion": "s3:GetObjectVersion",
+ "DeleteObjectVersion": "s3:DeleteObjectVersion",
+ "ListBucket": "s3:ListBucket",
+ "ListBucketVersions": "s3:ListBucketVersions",
+ "GetBucketLocation": "s3:GetBucketLocation",
+ "GetBucketVersioning": "s3:GetBucketVersioning",
+ "PutBucketVersioning": "s3:PutBucketVersioning",
+ "GetBucketAcl": "s3:GetBucketAcl",
+ "PutBucketAcl": "s3:PutBucketAcl",
+ "GetObjectAcl": "s3:GetObjectAcl",
+ "PutObjectAcl": "s3:PutObjectAcl",
+ "GetBucketPolicy": "s3:GetBucketPolicy",
+ "PutBucketPolicy": "s3:PutBucketPolicy",
+ "DeleteBucketPolicy": "s3:DeleteBucketPolicy",
+ "GetBucketCors": "s3:GetBucketCors",
+ "PutBucketCors": "s3:PutBucketCors",
+ "DeleteBucketCors": "s3:DeleteBucketCors",
+ "GetBucketNotification": "s3:GetBucketNotification",
+ "PutBucketNotification": "s3:PutBucketNotification",
+ "GetBucketTagging": "s3:GetBucketTagging",
+ "PutBucketTagging": "s3:PutBucketTagging",
+ "DeleteBucketTagging": "s3:DeleteBucketTagging",
+ "GetObjectTagging": "s3:GetObjectTagging",
+ "PutObjectTagging": "s3:PutObjectTagging",
+ "DeleteObjectTagging": "s3:DeleteObjectTagging",
+ "ListMultipartUploads": "s3:ListMultipartUploads",
+ "AbortMultipartUpload": "s3:AbortMultipartUpload",
+ "ListParts": "s3:ListParts",
+ "GetObjectRetention": "s3:GetObjectRetention",
+ "PutObjectRetention": "s3:PutObjectRetention",
+ "GetObjectLegalHold": "s3:GetObjectLegalHold",
+ "PutObjectLegalHold": "s3:PutObjectLegalHold",
+ "GetBucketObjectLockConfiguration": "s3:GetBucketObjectLockConfiguration",
+ "PutBucketObjectLockConfiguration": "s3:PutBucketObjectLockConfiguration",
+ "BypassGovernanceRetention": "s3:BypassGovernanceRetention",
+}
+
+// MatchesAction checks if an action matches any of the compiled action matchers
+func (cs *CompiledStatement) MatchesAction(action string) bool {
+ for _, matcher := range cs.ActionMatchers {
+ if matcher.Match(action) {
+ return true
+ }
+ }
+ return false
+}
+
+// MatchesResource checks if a resource matches any of the compiled resource matchers
+func (cs *CompiledStatement) MatchesResource(resource string) bool {
+ for _, matcher := range cs.ResourceMatchers {
+ if matcher.Match(resource) {
+ return true
+ }
+ }
+ return false
+}
+
+// MatchesPrincipal checks if a principal matches any of the compiled principal matchers
+func (cs *CompiledStatement) MatchesPrincipal(principal string) bool {
+ // If no principals specified, match all
+ if len(cs.PrincipalMatchers) == 0 {
+ return true
+ }
+
+ for _, matcher := range cs.PrincipalMatchers {
+ if matcher.Match(principal) {
+ return true
+ }
+ }
+ return false
+}
+
+// EvaluateStatement evaluates a compiled statement against the given arguments
+func (cs *CompiledStatement) EvaluateStatement(args *PolicyEvaluationArgs) bool {
+ // Check if action matches
+ if !cs.MatchesAction(args.Action) {
+ return false
+ }
+
+ // Check if resource matches
+ if !cs.MatchesResource(args.Resource) {
+ return false
+ }
+
+ // Check if principal matches
+ if !cs.MatchesPrincipal(args.Principal) {
+ return false
+ }
+
+ // TODO: Add condition evaluation if needed
+ // if !cs.evaluateConditions(args.Conditions) {
+ // return false
+ // }
+
+ return true
+}
+
+// EvaluatePolicy evaluates a compiled policy against the given arguments
+func (cp *CompiledPolicy) EvaluatePolicy(args *PolicyEvaluationArgs) (bool, PolicyEffect) {
+ var explicitAllow, explicitDeny bool
+
+ // Evaluate each statement
+ for _, stmt := range cp.Statements {
+ if stmt.EvaluateStatement(args) {
+ if stmt.Statement.Effect == PolicyEffectAllow {
+ explicitAllow = true
+ } else if stmt.Statement.Effect == PolicyEffectDeny {
+ explicitDeny = true
+ }
+ }
+ }
+
+ // AWS policy evaluation logic: explicit deny overrides allow
+ if explicitDeny {
+ return false, PolicyEffectDeny
+ }
+ if explicitAllow {
+ return true, PolicyEffectAllow
+ }
+
+ // No matching statements - implicit deny
+ return false, PolicyEffectDeny
+}
+
+// FastMatchesWildcard uses cached WildcardMatcher for performance
+func FastMatchesWildcard(pattern, str string) bool {
+ matcher, err := GetCachedWildcardMatcher(pattern)
+ if err != nil {
+ glog.Errorf("Error getting cached WildcardMatcher for pattern %s: %v", pattern, err)
+ // Fall back to the original implementation
+ return MatchesWildcard(pattern, str)
+ }
+ return matcher.Match(str)
+}
diff --git a/weed/s3api/policy_engine/wildcard_matcher.go b/weed/s3api/policy_engine/wildcard_matcher.go
new file mode 100644
index 000000000..7fd36abf9
--- /dev/null
+++ b/weed/s3api/policy_engine/wildcard_matcher.go
@@ -0,0 +1,253 @@
+package policy_engine
+
+import (
+ "regexp"
+ "strings"
+ "sync"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+)
+
+// WildcardMatcher provides unified wildcard matching functionality
+type WildcardMatcher struct {
+ // Use regex for complex patterns with ? wildcards
+ // Use string manipulation for simple * patterns (better performance)
+ useRegex bool
+ regex *regexp.Regexp
+ pattern string
+}
+
+// WildcardMatcherCache provides caching for WildcardMatcher instances
+type WildcardMatcherCache struct {
+ mu sync.RWMutex
+ matchers map[string]*WildcardMatcher
+ maxSize int
+ accessOrder []string // For LRU eviction
+}
+
+// NewWildcardMatcherCache creates a new WildcardMatcherCache with a configurable maxSize
+func NewWildcardMatcherCache(maxSize int) *WildcardMatcherCache {
+ if maxSize <= 0 {
+ maxSize = 1000 // Default value
+ }
+ return &WildcardMatcherCache{
+ matchers: make(map[string]*WildcardMatcher),
+ maxSize: maxSize,
+ }
+}
+
+// Global cache instance
+var wildcardMatcherCache = NewWildcardMatcherCache(1000) // Default maxSize
+
+// GetCachedWildcardMatcher gets or creates a cached WildcardMatcher for the given pattern
+func GetCachedWildcardMatcher(pattern string) (*WildcardMatcher, error) {
+ // Fast path: check if already in cache
+ wildcardMatcherCache.mu.RLock()
+ if matcher, exists := wildcardMatcherCache.matchers[pattern]; exists {
+ wildcardMatcherCache.mu.RUnlock()
+ wildcardMatcherCache.updateAccessOrder(pattern)
+ return matcher, nil
+ }
+ wildcardMatcherCache.mu.RUnlock()
+
+ // Slow path: create new matcher and cache it
+ wildcardMatcherCache.mu.Lock()
+ defer wildcardMatcherCache.mu.Unlock()
+
+ // Double-check after acquiring write lock
+ if matcher, exists := wildcardMatcherCache.matchers[pattern]; exists {
+ wildcardMatcherCache.updateAccessOrderLocked(pattern)
+ return matcher, nil
+ }
+
+ // Create new matcher
+ matcher, err := NewWildcardMatcher(pattern)
+ if err != nil {
+ return nil, err
+ }
+
+ // Evict old entries if cache is full
+ if len(wildcardMatcherCache.matchers) >= wildcardMatcherCache.maxSize {
+ wildcardMatcherCache.evictLeastRecentlyUsed()
+ }
+
+ // Cache it
+ wildcardMatcherCache.matchers[pattern] = matcher
+ wildcardMatcherCache.accessOrder = append(wildcardMatcherCache.accessOrder, pattern)
+ return matcher, nil
+}
+
+// updateAccessOrder updates the access order for LRU eviction (with read lock)
+func (c *WildcardMatcherCache) updateAccessOrder(pattern string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.updateAccessOrderLocked(pattern)
+}
+
+// updateAccessOrderLocked updates the access order for LRU eviction (without locking)
+func (c *WildcardMatcherCache) updateAccessOrderLocked(pattern string) {
+ // Remove pattern from its current position
+ for i, p := range c.accessOrder {
+ if p == pattern {
+ c.accessOrder = append(c.accessOrder[:i], c.accessOrder[i+1:]...)
+ break
+ }
+ }
+ // Add pattern to the end (most recently used)
+ c.accessOrder = append(c.accessOrder, pattern)
+}
+
+// evictLeastRecentlyUsed removes the least recently used pattern from the cache
+func (c *WildcardMatcherCache) evictLeastRecentlyUsed() {
+ if len(c.accessOrder) == 0 {
+ return
+ }
+
+ // Remove the least recently used pattern (first in the list)
+ lruPattern := c.accessOrder[0]
+ c.accessOrder = c.accessOrder[1:]
+ delete(c.matchers, lruPattern)
+}
+
+// ClearCache clears all cached patterns (useful for testing)
+func (c *WildcardMatcherCache) ClearCache() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.matchers = make(map[string]*WildcardMatcher)
+ c.accessOrder = c.accessOrder[:0]
+}
+
+// GetCacheStats returns cache statistics
+func (c *WildcardMatcherCache) GetCacheStats() (size int, maxSize int) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return len(c.matchers), c.maxSize
+}
+
+// NewWildcardMatcher creates a new wildcard matcher for the given pattern
+func NewWildcardMatcher(pattern string) (*WildcardMatcher, error) {
+ matcher := &WildcardMatcher{
+ pattern: pattern,
+ }
+
+ // Determine if we need regex (contains ? wildcards)
+ if strings.Contains(pattern, "?") {
+ matcher.useRegex = true
+ regex, err := compileWildcardPattern(pattern)
+ if err != nil {
+ return nil, err
+ }
+ matcher.regex = regex
+ } else {
+ matcher.useRegex = false
+ }
+
+ return matcher, nil
+}
+
+// Match checks if a string matches the wildcard pattern
+func (m *WildcardMatcher) Match(str string) bool {
+ if m.useRegex {
+ return m.regex.MatchString(str)
+ }
+ return matchWildcardString(m.pattern, str)
+}
+
+// MatchesWildcard provides a simple function interface for wildcard matching
+// This function consolidates the logic from the previous separate implementations
+func MatchesWildcard(pattern, str string) bool {
+ // Handle simple cases first
+ if pattern == "*" {
+ return true
+ }
+ if pattern == str {
+ return true
+ }
+
+ // Use regex for patterns with ? wildcards, string manipulation for * only
+ if strings.Contains(pattern, "?") {
+ return matchWildcardRegex(pattern, str)
+ }
+ return matchWildcardString(pattern, str)
+}
+
+// CompileWildcardPattern converts a wildcard pattern to a compiled regex
+// This replaces the previous compilePattern function
+func CompileWildcardPattern(pattern string) (*regexp.Regexp, error) {
+ return compileWildcardPattern(pattern)
+}
+
+// matchWildcardString uses string manipulation for * wildcards only (more efficient)
+func matchWildcardString(pattern, str string) bool {
+ // Handle simple cases
+ if pattern == "*" {
+ return true
+ }
+ if pattern == str {
+ return true
+ }
+
+ // Split pattern by wildcards
+ parts := strings.Split(pattern, "*")
+ if len(parts) == 1 {
+ // No wildcards, exact match
+ return pattern == str
+ }
+
+ // Check if string starts with first part
+ if len(parts[0]) > 0 && !strings.HasPrefix(str, parts[0]) {
+ return false
+ }
+
+ // Check if string ends with last part
+ if len(parts[len(parts)-1]) > 0 && !strings.HasSuffix(str, parts[len(parts)-1]) {
+ return false
+ }
+
+ // Check middle parts
+ searchStr := str
+ if len(parts[0]) > 0 {
+ searchStr = searchStr[len(parts[0]):]
+ }
+ if len(parts[len(parts)-1]) > 0 {
+ searchStr = searchStr[:len(searchStr)-len(parts[len(parts)-1])]
+ }
+
+ for i := 1; i < len(parts)-1; i++ {
+ if len(parts[i]) > 0 {
+ index := strings.Index(searchStr, parts[i])
+ if index == -1 {
+ return false
+ }
+ searchStr = searchStr[index+len(parts[i]):]
+ }
+ }
+
+ return true
+}
+
+// matchWildcardRegex uses WildcardMatcher for patterns with ? wildcards
+func matchWildcardRegex(pattern, str string) bool {
+ matcher, err := GetCachedWildcardMatcher(pattern)
+ if err != nil {
+ glog.Errorf("Error getting WildcardMatcher for pattern %s: %v. Falling back to matchWildcardString.", pattern, err)
+ // Fallback to matchWildcardString
+ return matchWildcardString(pattern, str)
+ }
+ return matcher.Match(str)
+}
+
+// compileWildcardPattern converts a wildcard pattern to regex
+func compileWildcardPattern(pattern string) (*regexp.Regexp, error) {
+ // Escape special regex characters except * and ?
+ escaped := regexp.QuoteMeta(pattern)
+
+ // Replace escaped wildcards with regex equivalents
+ escaped = strings.ReplaceAll(escaped, `\*`, `.*`)
+ escaped = strings.ReplaceAll(escaped, `\?`, `.`)
+
+ // Anchor the pattern
+ escaped = "^" + escaped + "$"
+
+ return regexp.Compile(escaped)
+}
diff --git a/weed/s3api/policy_engine/wildcard_matcher_test.go b/weed/s3api/policy_engine/wildcard_matcher_test.go
new file mode 100644
index 000000000..43e16284e
--- /dev/null
+++ b/weed/s3api/policy_engine/wildcard_matcher_test.go
@@ -0,0 +1,469 @@
+package policy_engine
+
+import (
+ "testing"
+)
+
+func TestMatchesWildcard(t *testing.T) {
+ tests := []struct {
+ name string
+ pattern string
+ str string
+ expected bool
+ }{
+ // Basic functionality tests
+ {
+ name: "Exact match",
+ pattern: "test",
+ str: "test",
+ expected: true,
+ },
+ {
+ name: "Single wildcard",
+ pattern: "*",
+ str: "anything",
+ expected: true,
+ },
+ {
+ name: "Empty string with wildcard",
+ pattern: "*",
+ str: "",
+ expected: true,
+ },
+
+ // Star (*) wildcard tests
+ {
+ name: "Prefix wildcard",
+ pattern: "test*",
+ str: "test123",
+ expected: true,
+ },
+ {
+ name: "Suffix wildcard",
+ pattern: "*test",
+ str: "123test",
+ expected: true,
+ },
+ {
+ name: "Middle wildcard",
+ pattern: "test*123",
+ str: "testABC123",
+ expected: true,
+ },
+ {
+ name: "Multiple wildcards",
+ pattern: "test*abc*123",
+ str: "testXYZabcDEF123",
+ expected: true,
+ },
+ {
+ name: "No match",
+ pattern: "test*",
+ str: "other",
+ expected: false,
+ },
+
+ // Question mark (?) wildcard tests
+ {
+ name: "Single question mark",
+ pattern: "test?",
+ str: "test1",
+ expected: true,
+ },
+ {
+ name: "Multiple question marks",
+ pattern: "test??",
+ str: "test12",
+ expected: true,
+ },
+ {
+ name: "Question mark no match",
+ pattern: "test?",
+ str: "test12",
+ expected: false,
+ },
+ {
+ name: "Mixed wildcards",
+ pattern: "test*abc?def",
+ str: "testXYZabc1def",
+ expected: true,
+ },
+
+ // Edge cases
+ {
+ name: "Empty pattern",
+ pattern: "",
+ str: "",
+ expected: true,
+ },
+ {
+ name: "Empty pattern with string",
+ pattern: "",
+ str: "test",
+ expected: false,
+ },
+ {
+ name: "Pattern with string empty",
+ pattern: "test",
+ str: "",
+ expected: false,
+ },
+
+ // Special characters
+ {
+ name: "Pattern with regex special chars",
+ pattern: "test[abc]",
+ str: "test[abc]",
+ expected: true,
+ },
+ {
+ name: "Pattern with dots",
+ pattern: "test.txt",
+ str: "test.txt",
+ expected: true,
+ },
+ {
+ name: "Pattern with dots and wildcard",
+ pattern: "*.txt",
+ str: "test.txt",
+ expected: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := MatchesWildcard(tt.pattern, tt.str)
+ if result != tt.expected {
+ t.Errorf("Pattern %s against %s: expected %v, got %v", tt.pattern, tt.str, tt.expected, result)
+ }
+ })
+ }
+}
+
+func TestWildcardMatcher(t *testing.T) {
+ tests := []struct {
+ name string
+ pattern string
+ strings []string
+ expected []bool
+ }{
+ {
+ name: "Simple star pattern",
+ pattern: "test*",
+ strings: []string{"test", "test123", "testing", "other"},
+ expected: []bool{true, true, true, false},
+ },
+ {
+ name: "Question mark pattern",
+ pattern: "test?",
+ strings: []string{"test1", "test2", "test", "test12"},
+ expected: []bool{true, true, false, false},
+ },
+ {
+ name: "Mixed pattern",
+ pattern: "*.txt",
+ strings: []string{"file.txt", "test.txt", "file.doc", "txt"},
+ expected: []bool{true, true, false, false},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ matcher, err := NewWildcardMatcher(tt.pattern)
+ if err != nil {
+ t.Fatalf("Failed to create matcher: %v", err)
+ }
+
+ for i, str := range tt.strings {
+ result := matcher.Match(str)
+ if result != tt.expected[i] {
+ t.Errorf("Pattern %s against %s: expected %v, got %v", tt.pattern, str, tt.expected[i], result)
+ }
+ }
+ })
+ }
+}
+
+func TestCompileWildcardPattern(t *testing.T) {
+ tests := []struct {
+ name string
+ pattern string
+ input string
+ want bool
+ }{
+ {"Star wildcard", "s3:Get*", "s3:GetObject", true},
+ {"Question mark wildcard", "s3:Get?bject", "s3:GetObject", true},
+ {"Mixed wildcards", "s3:*Object*", "s3:GetObjectAcl", true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ regex, err := CompileWildcardPattern(tt.pattern)
+ if err != nil {
+ t.Errorf("CompileWildcardPattern() error = %v", err)
+ return
+ }
+ got := regex.MatchString(tt.input)
+ if got != tt.want {
+ t.Errorf("CompileWildcardPattern() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+// BenchmarkWildcardMatchingPerformance demonstrates the performance benefits of caching
+func BenchmarkWildcardMatchingPerformance(b *testing.B) {
+ patterns := []string{
+ "s3:Get*",
+ "s3:Put*",
+ "s3:Delete*",
+ "s3:List*",
+ "arn:aws:s3:::bucket/*",
+ "arn:aws:s3:::bucket/prefix*",
+ "user:*",
+ "user:admin-*",
+ }
+
+ inputs := []string{
+ "s3:GetObject",
+ "s3:PutObject",
+ "s3:DeleteObject",
+ "s3:ListBucket",
+ "arn:aws:s3:::bucket/file.txt",
+ "arn:aws:s3:::bucket/prefix/file.txt",
+ "user:admin",
+ "user:admin-john",
+ }
+
+ b.Run("WithoutCache", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ for _, pattern := range patterns {
+ for _, input := range inputs {
+ MatchesWildcard(pattern, input)
+ }
+ }
+ }
+ })
+
+ b.Run("WithCache", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ for _, pattern := range patterns {
+ for _, input := range inputs {
+ FastMatchesWildcard(pattern, input)
+ }
+ }
+ }
+ })
+}
+
+// BenchmarkWildcardMatcherReuse demonstrates the performance benefits of reusing WildcardMatcher instances
+func BenchmarkWildcardMatcherReuse(b *testing.B) {
+ pattern := "s3:Get*"
+ input := "s3:GetObject"
+
+ b.Run("NewMatcherEveryTime", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ matcher, _ := NewWildcardMatcher(pattern)
+ matcher.Match(input)
+ }
+ })
+
+ b.Run("CachedMatcher", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ matcher, _ := GetCachedWildcardMatcher(pattern)
+ matcher.Match(input)
+ }
+ })
+}
+
+// TestWildcardMatcherCaching verifies that caching works correctly
+func TestWildcardMatcherCaching(t *testing.T) {
+ pattern := "s3:Get*"
+
+ // Get the first matcher
+ matcher1, err := GetCachedWildcardMatcher(pattern)
+ if err != nil {
+ t.Fatalf("Failed to get cached matcher: %v", err)
+ }
+
+ // Get the second matcher - should be the same instance
+ matcher2, err := GetCachedWildcardMatcher(pattern)
+ if err != nil {
+ t.Fatalf("Failed to get cached matcher: %v", err)
+ }
+
+ // Check that they're the same instance (same pointer)
+ if matcher1 != matcher2 {
+ t.Errorf("Expected same matcher instance, got different instances")
+ }
+
+ // Test that both matchers work correctly
+ testInput := "s3:GetObject"
+ if !matcher1.Match(testInput) {
+ t.Errorf("First matcher failed to match %s", testInput)
+ }
+ if !matcher2.Match(testInput) {
+ t.Errorf("Second matcher failed to match %s", testInput)
+ }
+}
+
+// TestFastMatchesWildcard verifies that the fast matching function works correctly
+func TestFastMatchesWildcard(t *testing.T) {
+ tests := []struct {
+ pattern string
+ input string
+ want bool
+ }{
+ {"s3:Get*", "s3:GetObject", true},
+ {"s3:Put*", "s3:GetObject", false},
+ {"arn:aws:s3:::bucket/*", "arn:aws:s3:::bucket/file.txt", true},
+ {"user:admin-*", "user:admin-john", true},
+ {"user:admin-*", "user:guest-john", false},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.pattern+"_"+tt.input, func(t *testing.T) {
+ got := FastMatchesWildcard(tt.pattern, tt.input)
+ if got != tt.want {
+ t.Errorf("FastMatchesWildcard(%q, %q) = %v, want %v", tt.pattern, tt.input, got, tt.want)
+ }
+ })
+ }
+}
+
+// TestWildcardMatcherCacheBounding tests the bounded cache functionality
+func TestWildcardMatcherCacheBounding(t *testing.T) {
+ // Clear cache before test
+ wildcardMatcherCache.ClearCache()
+
+ // Get original max size
+ originalMaxSize := wildcardMatcherCache.maxSize
+
+ // Set a small max size for testing
+ wildcardMatcherCache.maxSize = 3
+ defer func() {
+ wildcardMatcherCache.maxSize = originalMaxSize
+ wildcardMatcherCache.ClearCache()
+ }()
+
+ // Add patterns up to max size
+ patterns := []string{"pattern1", "pattern2", "pattern3"}
+ for _, pattern := range patterns {
+ _, err := GetCachedWildcardMatcher(pattern)
+ if err != nil {
+ t.Fatalf("Failed to get cached matcher for %s: %v", pattern, err)
+ }
+ }
+
+ // Verify cache size
+ size, maxSize := wildcardMatcherCache.GetCacheStats()
+ if size != 3 {
+ t.Errorf("Expected cache size 3, got %d", size)
+ }
+ if maxSize != 3 {
+ t.Errorf("Expected max size 3, got %d", maxSize)
+ }
+
+ // Add another pattern, should evict the least recently used
+ _, err := GetCachedWildcardMatcher("pattern4")
+ if err != nil {
+ t.Fatalf("Failed to get cached matcher for pattern4: %v", err)
+ }
+
+ // Cache should still be at max size
+ size, _ = wildcardMatcherCache.GetCacheStats()
+ if size != 3 {
+ t.Errorf("Expected cache size 3 after eviction, got %d", size)
+ }
+
+ // The first pattern should have been evicted
+ wildcardMatcherCache.mu.RLock()
+ if _, exists := wildcardMatcherCache.matchers["pattern1"]; exists {
+ t.Errorf("Expected pattern1 to be evicted, but it still exists")
+ }
+ if _, exists := wildcardMatcherCache.matchers["pattern4"]; !exists {
+ t.Errorf("Expected pattern4 to be in cache, but it doesn't exist")
+ }
+ wildcardMatcherCache.mu.RUnlock()
+}
+
+// TestWildcardMatcherCacheLRU tests the LRU eviction policy
+func TestWildcardMatcherCacheLRU(t *testing.T) {
+ // Clear cache before test
+ wildcardMatcherCache.ClearCache()
+
+ // Get original max size
+ originalMaxSize := wildcardMatcherCache.maxSize
+
+ // Set a small max size for testing
+ wildcardMatcherCache.maxSize = 3
+ defer func() {
+ wildcardMatcherCache.maxSize = originalMaxSize
+ wildcardMatcherCache.ClearCache()
+ }()
+
+ // Add patterns to fill cache
+ patterns := []string{"pattern1", "pattern2", "pattern3"}
+ for _, pattern := range patterns {
+ _, err := GetCachedWildcardMatcher(pattern)
+ if err != nil {
+ t.Fatalf("Failed to get cached matcher for %s: %v", pattern, err)
+ }
+ }
+
+ // Access pattern1 to make it most recently used
+ _, err := GetCachedWildcardMatcher("pattern1")
+ if err != nil {
+ t.Fatalf("Failed to access pattern1: %v", err)
+ }
+
+ // Add another pattern, should evict pattern2 (now least recently used)
+ _, err = GetCachedWildcardMatcher("pattern4")
+ if err != nil {
+ t.Fatalf("Failed to get cached matcher for pattern4: %v", err)
+ }
+
+ // pattern1 should still be in cache (was accessed recently)
+ // pattern2 should be evicted (was least recently used)
+ wildcardMatcherCache.mu.RLock()
+ if _, exists := wildcardMatcherCache.matchers["pattern1"]; !exists {
+ t.Errorf("Expected pattern1 to remain in cache (most recently used)")
+ }
+ if _, exists := wildcardMatcherCache.matchers["pattern2"]; exists {
+ t.Errorf("Expected pattern2 to be evicted (least recently used)")
+ }
+ if _, exists := wildcardMatcherCache.matchers["pattern3"]; !exists {
+ t.Errorf("Expected pattern3 to remain in cache")
+ }
+ if _, exists := wildcardMatcherCache.matchers["pattern4"]; !exists {
+ t.Errorf("Expected pattern4 to be in cache")
+ }
+ wildcardMatcherCache.mu.RUnlock()
+}
+
+// TestWildcardMatcherCacheClear tests the cache clearing functionality
+func TestWildcardMatcherCacheClear(t *testing.T) {
+ // Add some patterns to cache
+ patterns := []string{"pattern1", "pattern2", "pattern3"}
+ for _, pattern := range patterns {
+ _, err := GetCachedWildcardMatcher(pattern)
+ if err != nil {
+ t.Fatalf("Failed to get cached matcher for %s: %v", pattern, err)
+ }
+ }
+
+ // Verify cache has patterns
+ size, _ := wildcardMatcherCache.GetCacheStats()
+ if size == 0 {
+ t.Errorf("Expected cache to have patterns before clearing")
+ }
+
+ // Clear cache
+ wildcardMatcherCache.ClearCache()
+
+ // Verify cache is empty
+ size, _ = wildcardMatcherCache.GetCacheStats()
+ if size != 0 {
+ t.Errorf("Expected cache to be empty after clearing, got size %d", size)
+ }
+}
diff --git a/weed/s3api/s3_constants/extend_key.go b/weed/s3api/s3_constants/extend_key.go
index 79fcbb239..e7eee0cc1 100644
--- a/weed/s3api/s3_constants/extend_key.go
+++ b/weed/s3api/s3_constants/extend_key.go
@@ -12,6 +12,9 @@ const (
ExtLatestVersionIdKey = "Seaweed-X-Amz-Latest-Version-Id"
ExtLatestVersionFileNameKey = "Seaweed-X-Amz-Latest-Version-File-Name"
+ // Bucket Policy
+ ExtBucketPolicyKey = "Seaweed-X-Amz-Bucket-Policy"
+
// Object Retention and Legal Hold
ExtObjectLockModeKey = "Seaweed-X-Amz-Object-Lock-Mode"
ExtRetentionUntilDateKey = "Seaweed-X-Amz-Retention-Until-Date"
diff --git a/weed/s3api/s3_constants/header.go b/weed/s3api/s3_constants/header.go
index 897931154..48e4609e0 100644
--- a/weed/s3api/s3_constants/header.go
+++ b/weed/s3api/s3_constants/header.go
@@ -65,7 +65,6 @@ const (
AmzIdentityId = "s3-identity-id"
AmzAccountId = "s3-account-id"
AmzAuthType = "s3-auth-type"
- AmzIsAdmin = "s3-is-admin" // only set to http request header as a context
)
func GetBucketAndObject(r *http.Request) (bucket, object string) {
diff --git a/weed/s3api/s3_constants/s3_actions.go b/weed/s3api/s3_constants/s3_actions.go
index 864979784..a565ec115 100644
--- a/weed/s3api/s3_constants/s3_actions.go
+++ b/weed/s3api/s3_constants/s3_actions.go
@@ -1,14 +1,15 @@
package s3_constants
const (
- ACTION_READ = "Read"
- ACTION_READ_ACP = "ReadAcp"
- ACTION_WRITE = "Write"
- ACTION_WRITE_ACP = "WriteAcp"
- ACTION_ADMIN = "Admin"
- ACTION_TAGGING = "Tagging"
- ACTION_LIST = "List"
- ACTION_DELETE_BUCKET = "DeleteBucket"
+ ACTION_READ = "Read"
+ ACTION_READ_ACP = "ReadAcp"
+ ACTION_WRITE = "Write"
+ ACTION_WRITE_ACP = "WriteAcp"
+ ACTION_ADMIN = "Admin"
+ ACTION_TAGGING = "Tagging"
+ ACTION_LIST = "List"
+ ACTION_DELETE_BUCKET = "DeleteBucket"
+ ACTION_BYPASS_GOVERNANCE_RETENTION = "BypassGovernanceRetention"
SeaweedStorageDestinationHeader = "x-seaweedfs-destination"
MultipartUploadsFolder = ".uploads"
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index e5d1ec6ad..ecc6af2ac 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -225,10 +225,11 @@ func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorC
}
func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool {
- isAdmin := r.Header.Get(s3_constants.AmzIsAdmin) != ""
- if isAdmin {
+ // Check if user is properly authenticated as admin through IAM system
+ if s3a.isUserAdmin(r) {
return true
}
+
if entry.Extended == nil {
return true
}
@@ -243,6 +244,20 @@ func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool {
return true
}
+// isUserAdmin securely checks if the authenticated user is an admin
+// This validates admin status through proper IAM authentication, not spoofable headers
+func (s3a *S3ApiServer) isUserAdmin(r *http.Request) bool {
+ // Use a minimal admin action to authenticate and check admin status
+ adminAction := Action("Admin")
+ identity, errCode := s3a.iam.authRequest(r, adminAction)
+ if errCode != s3err.ErrNone {
+ return false
+ }
+
+ // Check if the authenticated identity has admin privileges
+ return identity != nil && identity.isAdmin()
+}
+
// GetBucketAclHandler Get Bucket ACL
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAcl.html
func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) {
diff --git a/weed/s3api/s3api_governance_permissions_test.go b/weed/s3api/s3api_governance_permissions_test.go
new file mode 100644
index 000000000..2b8a35232
--- /dev/null
+++ b/weed/s3api/s3api_governance_permissions_test.go
@@ -0,0 +1,599 @@
+package s3api
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
+)
+
+// TestCheckGovernanceBypassPermissionResourceGeneration tests that the function
+// correctly generates resource paths for the permission check
+func TestCheckGovernanceBypassPermissionResourceGeneration(t *testing.T) {
+ tests := []struct {
+ name string
+ bucket string
+ object string
+ expectedPath string
+ description string
+ }{
+ {
+ name: "simple_object",
+ bucket: "test-bucket",
+ object: "test-object.txt",
+ expectedPath: "test-bucket/test-object.txt",
+ description: "Simple bucket and object should be joined with slash",
+ },
+ {
+ name: "object_with_leading_slash",
+ bucket: "test-bucket",
+ object: "/test-object.txt",
+ expectedPath: "test-bucket/test-object.txt",
+ description: "Leading slash should be trimmed from object name",
+ },
+ {
+ name: "nested_object",
+ bucket: "test-bucket",
+ object: "/folder/subfolder/test-object.txt",
+ expectedPath: "test-bucket/folder/subfolder/test-object.txt",
+ description: "Nested object path should be handled correctly",
+ },
+ {
+ name: "empty_object",
+ bucket: "test-bucket",
+ object: "",
+ expectedPath: "test-bucket/",
+ description: "Empty object should result in bucket with trailing slash",
+ },
+ {
+ name: "root_object",
+ bucket: "test-bucket",
+ object: "/",
+ expectedPath: "test-bucket/",
+ description: "Root object should result in bucket with trailing slash",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Test the resource generation logic used in checkGovernanceBypassPermission
+ resource := strings.TrimPrefix(tt.object, "/")
+ actualPath := tt.bucket + "/" + resource
+
+ if actualPath != tt.expectedPath {
+ t.Errorf("Resource path generation failed. Expected: %s, Got: %s. %s",
+ tt.expectedPath, actualPath, tt.description)
+ }
+ })
+ }
+}
+
+// TestCheckGovernanceBypassPermissionActionGeneration tests that the function
+// correctly generates action strings for IAM checking
+func TestCheckGovernanceBypassPermissionActionGeneration(t *testing.T) {
+ tests := []struct {
+ name string
+ bucket string
+ object string
+ expectedBypassAction string
+ expectedAdminAction string
+ description string
+ }{
+ {
+ name: "bypass_action_generation",
+ bucket: "test-bucket",
+ object: "test-object.txt",
+ expectedBypassAction: "BypassGovernanceRetention:test-bucket/test-object.txt",
+ expectedAdminAction: "Admin:test-bucket/test-object.txt",
+ description: "Actions should be properly formatted with resource path",
+ },
+ {
+ name: "leading_slash_handling",
+ bucket: "test-bucket",
+ object: "/test-object.txt",
+ expectedBypassAction: "BypassGovernanceRetention:test-bucket/test-object.txt",
+ expectedAdminAction: "Admin:test-bucket/test-object.txt",
+ description: "Leading slash should be trimmed in action generation",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Test the action generation logic used in checkGovernanceBypassPermission
+ resource := strings.TrimPrefix(tt.object, "/")
+ resourcePath := tt.bucket + "/" + resource
+
+ bypassAction := s3_constants.ACTION_BYPASS_GOVERNANCE_RETENTION + ":" + resourcePath
+ adminAction := s3_constants.ACTION_ADMIN + ":" + resourcePath
+
+ if bypassAction != tt.expectedBypassAction {
+ t.Errorf("Bypass action generation failed. Expected: %s, Got: %s. %s",
+ tt.expectedBypassAction, bypassAction, tt.description)
+ }
+
+ if adminAction != tt.expectedAdminAction {
+ t.Errorf("Admin action generation failed. Expected: %s, Got: %s. %s",
+ tt.expectedAdminAction, adminAction, tt.description)
+ }
+ })
+ }
+}
+
+// TestCheckGovernanceBypassPermissionErrorHandling tests error handling scenarios
+func TestCheckGovernanceBypassPermissionErrorHandling(t *testing.T) {
+ // Note: This test demonstrates the expected behavior for different error scenarios
+ // without requiring full IAM setup
+
+ tests := []struct {
+ name string
+ bucket string
+ object string
+ description string
+ }{
+ {
+ name: "empty_bucket",
+ bucket: "",
+ object: "test-object.txt",
+ description: "Empty bucket should be handled gracefully",
+ },
+ {
+ name: "special_characters",
+ bucket: "test-bucket",
+ object: "test object with spaces.txt",
+ description: "Objects with special characters should be handled",
+ },
+ {
+ name: "unicode_characters",
+ bucket: "test-bucket",
+ object: "测试文件.txt",
+ description: "Objects with unicode characters should be handled",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Test that the function doesn't panic with various inputs
+ // This would normally call checkGovernanceBypassPermission
+ // but since we don't have a full S3ApiServer setup, we just test
+ // that the resource generation logic works without panicking
+ resource := strings.TrimPrefix(tt.object, "/")
+ resourcePath := tt.bucket + "/" + resource
+
+ // Verify the resource path is generated
+ if resourcePath == "" {
+ t.Errorf("Resource path should not be empty for test case: %s", tt.description)
+ }
+
+ t.Logf("Generated resource path for %s: %s", tt.description, resourcePath)
+ })
+ }
+}
+
+// TestCheckGovernanceBypassPermissionIntegrationBehavior documents the expected behavior
+// when integrated with a full IAM system
+func TestCheckGovernanceBypassPermissionIntegrationBehavior(t *testing.T) {
+ t.Skip("Documentation test - describes expected behavior with full IAM integration")
+
+ // This test documents the expected behavior when checkGovernanceBypassPermission
+ // is called with a full IAM system:
+ //
+ // 1. Function calls s3a.iam.authRequest() with the bypass action
+ // 2. If authRequest returns errCode != s3err.ErrNone, function returns false
+ // 3. If authRequest succeeds, function checks identity.canDo() with the bypass action
+ // 4. If canDo() returns true, function returns true
+ // 5. If bypass permission fails, function checks admin action with identity.canDo()
+ // 6. If admin action succeeds, function returns true and logs admin access
+ // 7. If all checks fail, function returns false
+ //
+ // The function correctly uses:
+ // - s3_constants.ACTION_BYPASS_GOVERNANCE_RETENTION for bypass permission
+ // - s3_constants.ACTION_ADMIN for admin permission
+ // - Proper resource path generation with bucket/object format
+ // - Trimming of leading slashes from object names
+}
+
+// TestGovernanceBypassPermission was removed because it tested the old
+// insecure behavior of trusting the AmzIsAdmin header. The new implementation
+// uses proper IAM authentication instead of relying on client-provided headers.
+
+// Test specifically for users with IAM bypass permission
+func TestGovernanceBypassWithIAMPermission(t *testing.T) {
+ // This test demonstrates the expected behavior for non-admin users with bypass permission
+ // In a real implementation, this would integrate with the full IAM system
+
+ t.Skip("Integration test requires full IAM setup - demonstrates expected behavior")
+
+ // The expected behavior would be:
+ // 1. Non-admin user makes request with bypass header
+ // 2. checkGovernanceBypassPermission calls s3a.iam.authRequest
+ // 3. authRequest validates user identity and checks permissions
+ // 4. If user has s3:BypassGovernanceRetention permission, return true
+ // 5. Otherwise return false
+
+ // For now, the function correctly returns false for non-admin users
+ // when the IAM system doesn't have the user configured with bypass permission
+}
+
+func TestGovernancePermissionIntegration(t *testing.T) {
+ // Note: This test demonstrates the expected integration behavior
+ // In a real implementation, this would require setting up a proper IAM mock
+ // with identities that have the bypass governance permission
+
+ t.Skip("Integration test requires full IAM setup - demonstrates expected behavior")
+
+ // This test would verify:
+ // 1. User with BypassGovernanceRetention permission can bypass governance
+ // 2. User without permission cannot bypass governance
+ // 3. Admin users can always bypass governance
+ // 4. Anonymous users cannot bypass governance
+}
+
+func TestGovernanceBypassHeader(t *testing.T) {
+ tests := []struct {
+ name string
+ headerValue string
+ expectedResult bool
+ description string
+ }{
+ {
+ name: "bypass_header_true",
+ headerValue: "true",
+ expectedResult: true,
+ description: "Header with 'true' value should enable bypass",
+ },
+ {
+ name: "bypass_header_false",
+ headerValue: "false",
+ expectedResult: false,
+ description: "Header with 'false' value should not enable bypass",
+ },
+ {
+ name: "bypass_header_empty",
+ headerValue: "",
+ expectedResult: false,
+ description: "Empty header should not enable bypass",
+ },
+ {
+ name: "bypass_header_invalid",
+ headerValue: "invalid",
+ expectedResult: false,
+ description: "Invalid header value should not enable bypass",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ req := httptest.NewRequest("DELETE", "/bucket/object", nil)
+ if tt.headerValue != "" {
+ req.Header.Set("x-amz-bypass-governance-retention", tt.headerValue)
+ }
+
+ result := req.Header.Get("x-amz-bypass-governance-retention") == "true"
+
+ if result != tt.expectedResult {
+ t.Errorf("bypass header check = %v, want %v. %s", result, tt.expectedResult, tt.description)
+ }
+ })
+ }
+}
+
+func TestGovernanceRetentionModeChecking(t *testing.T) {
+ tests := []struct {
+ name string
+ retentionMode string
+ bypassGovernance bool
+ hasPermission bool
+ expectedError bool
+ expectedErrorType string
+ description string
+ }{
+ {
+ name: "compliance_mode_cannot_bypass",
+ retentionMode: s3_constants.RetentionModeCompliance,
+ bypassGovernance: true,
+ hasPermission: true,
+ expectedError: true,
+ expectedErrorType: "compliance mode",
+ description: "Compliance mode should not be bypassable even with permission",
+ },
+ {
+ name: "governance_mode_without_bypass",
+ retentionMode: s3_constants.RetentionModeGovernance,
+ bypassGovernance: false,
+ hasPermission: false,
+ expectedError: true,
+ expectedErrorType: "governance mode",
+ description: "Governance mode should be blocked without bypass",
+ },
+ {
+ name: "governance_mode_with_bypass_no_permission",
+ retentionMode: s3_constants.RetentionModeGovernance,
+ bypassGovernance: true,
+ hasPermission: false,
+ expectedError: true,
+ expectedErrorType: "permission",
+ description: "Governance mode bypass should fail without permission",
+ },
+ {
+ name: "governance_mode_with_bypass_and_permission",
+ retentionMode: s3_constants.RetentionModeGovernance,
+ bypassGovernance: true,
+ hasPermission: true,
+ expectedError: false,
+ expectedErrorType: "",
+ description: "Governance mode bypass should succeed with permission",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Test validates the logic without actually needing the full implementation
+ // This demonstrates the expected behavior patterns
+
+ var hasError bool
+ var errorType string
+
+ if tt.retentionMode == s3_constants.RetentionModeCompliance {
+ hasError = true
+ errorType = "compliance mode"
+ } else if tt.retentionMode == s3_constants.RetentionModeGovernance {
+ if !tt.bypassGovernance {
+ hasError = true
+ errorType = "governance mode"
+ } else if !tt.hasPermission {
+ hasError = true
+ errorType = "permission"
+ }
+ }
+
+ if hasError != tt.expectedError {
+ t.Errorf("expected error: %v, got error: %v. %s", tt.expectedError, hasError, tt.description)
+ }
+
+ if tt.expectedError && !strings.Contains(errorType, tt.expectedErrorType) {
+ t.Errorf("expected error type containing '%s', got '%s'. %s", tt.expectedErrorType, errorType, tt.description)
+ }
+ })
+ }
+}
+
+func TestGovernancePermissionActionGeneration(t *testing.T) {
+ tests := []struct {
+ name string
+ bucket string
+ object string
+ expectedAction string
+ description string
+ }{
+ {
+ name: "bucket_and_object_action",
+ bucket: "test-bucket",
+ object: "/test-object", // Object has "/" prefix from GetBucketAndObject
+ expectedAction: "BypassGovernanceRetention:test-bucket/test-object",
+ description: "Action should be generated correctly for bucket and object",
+ },
+ {
+ name: "bucket_only_action",
+ bucket: "test-bucket",
+ object: "",
+ expectedAction: "BypassGovernanceRetention:test-bucket",
+ description: "Action should be generated correctly for bucket only",
+ },
+ {
+ name: "nested_object_action",
+ bucket: "test-bucket",
+ object: "/folder/subfolder/object", // Object has "/" prefix from GetBucketAndObject
+ expectedAction: "BypassGovernanceRetention:test-bucket/folder/subfolder/object",
+ description: "Action should be generated correctly for nested objects",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ action := s3_constants.ACTION_BYPASS_GOVERNANCE_RETENTION + ":" + tt.bucket + tt.object
+
+ if action != tt.expectedAction {
+ t.Errorf("generated action: %s, expected: %s. %s", action, tt.expectedAction, tt.description)
+ }
+ })
+ }
+}
+
+// TestGovernancePermissionEndToEnd tests the complete object lock permission flow
+func TestGovernancePermissionEndToEnd(t *testing.T) {
+ t.Skip("End-to-end testing requires full S3 API server setup - demonstrates expected behavior")
+
+ // This test demonstrates the end-to-end flow that would be tested in a full integration test
+ // The checkObjectLockPermissions method is called by:
+ // 1. DeleteObjectHandler - when versioning is enabled and object lock is configured
+ // 2. DeleteMultipleObjectsHandler - for each object in versioned buckets
+ // 3. PutObjectHandler - via checkObjectLockPermissionsForPut for versioned buckets
+ // 4. PutObjectRetentionHandler - when setting retention on objects
+ //
+ // Each handler:
+ // - Extracts bypassGovernance from "x-amz-bypass-governance-retention" header
+ // - Calls checkObjectLockPermissions with the appropriate parameters
+ // - Handles the returned errors appropriately (ErrAccessDenied, etc.)
+ //
+ // The method integrates with the IAM system through checkGovernanceBypassPermission
+ // which validates the s3:BypassGovernanceRetention permission
+}
+
+// TestGovernancePermissionHTTPFlow tests the HTTP header processing and method calls
+func TestGovernancePermissionHTTPFlow(t *testing.T) {
+ tests := []struct {
+ name string
+ headerValue string
+ expectedBypassGovernance bool
+ }{
+ {
+ name: "bypass_header_true",
+ headerValue: "true",
+ expectedBypassGovernance: true,
+ },
+ {
+ name: "bypass_header_false",
+ headerValue: "false",
+ expectedBypassGovernance: false,
+ },
+ {
+ name: "bypass_header_missing",
+ headerValue: "",
+ expectedBypassGovernance: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Create a mock HTTP request
+ req, _ := http.NewRequest("DELETE", "/bucket/test-object", nil)
+ if tt.headerValue != "" {
+ req.Header.Set("x-amz-bypass-governance-retention", tt.headerValue)
+ }
+
+ // Test the header processing logic used in handlers
+ bypassGovernance := req.Header.Get("x-amz-bypass-governance-retention") == "true"
+
+ if bypassGovernance != tt.expectedBypassGovernance {
+ t.Errorf("Expected bypassGovernance to be %v, got %v", tt.expectedBypassGovernance, bypassGovernance)
+ }
+ })
+ }
+}
+
+// TestGovernancePermissionMethodCalls tests that the governance permission methods are called correctly
+func TestGovernancePermissionMethodCalls(t *testing.T) {
+ // Test that demonstrates the method call pattern used in handlers
+
+ // This is the pattern used in DeleteObjectHandler:
+ t.Run("delete_object_handler_pattern", func(t *testing.T) {
+ req, _ := http.NewRequest("DELETE", "/bucket/test-object", nil)
+ req.Header.Set("x-amz-bypass-governance-retention", "true")
+
+ // Extract parameters as done in the handler
+ bucket, object := s3_constants.GetBucketAndObject(req)
+ versionId := req.URL.Query().Get("versionId")
+ bypassGovernance := req.Header.Get("x-amz-bypass-governance-retention") == "true"
+
+ // Verify the parameters are extracted correctly
+ // Note: The actual bucket and object extraction depends on the URL structure
+ t.Logf("Extracted bucket: %s, object: %s", bucket, object)
+ if versionId != "" {
+ t.Errorf("Expected versionId to be empty, got %v", versionId)
+ }
+ if !bypassGovernance {
+ t.Errorf("Expected bypassGovernance to be true")
+ }
+ })
+
+ // This is the pattern used in PutObjectHandler:
+ t.Run("put_object_handler_pattern", func(t *testing.T) {
+ req, _ := http.NewRequest("PUT", "/bucket/test-object", nil)
+ req.Header.Set("x-amz-bypass-governance-retention", "true")
+
+ // Extract parameters as done in the handler
+ bucket, object := s3_constants.GetBucketAndObject(req)
+ bypassGovernance := req.Header.Get("x-amz-bypass-governance-retention") == "true"
+ versioningEnabled := true // Would be determined by isVersioningEnabled(bucket)
+
+ // Verify the parameters are extracted correctly
+ // Note: The actual bucket and object extraction depends on the URL structure
+ t.Logf("Extracted bucket: %s, object: %s", bucket, object)
+ if !bypassGovernance {
+ t.Errorf("Expected bypassGovernance to be true")
+ }
+ if !versioningEnabled {
+ t.Errorf("Expected versioningEnabled to be true")
+ }
+ })
+}
+
+// TestGovernanceBypassNotPermittedError tests that ErrGovernanceBypassNotPermitted
+// is returned when bypass is requested but the user lacks permission
+func TestGovernanceBypassNotPermittedError(t *testing.T) {
+ // Test the error constant itself
+ if ErrGovernanceBypassNotPermitted == nil {
+ t.Error("ErrGovernanceBypassNotPermitted should be defined")
+ }
+
+ // Verify the error message
+ expectedMessage := "user does not have permission to bypass governance retention"
+ if ErrGovernanceBypassNotPermitted.Error() != expectedMessage {
+ t.Errorf("expected error message '%s', got '%s'",
+ expectedMessage, ErrGovernanceBypassNotPermitted.Error())
+ }
+
+ // Test the scenario where this error should be returned
+ // This documents the expected behavior when:
+ // 1. Object is under governance retention
+ // 2. bypassGovernance is true
+ // 3. checkGovernanceBypassPermission returns false
+ testCases := []struct {
+ name string
+ retentionMode string
+ bypassGovernance bool
+ hasPermission bool
+ expectedError error
+ description string
+ }{
+ {
+ name: "governance_bypass_without_permission",
+ retentionMode: s3_constants.RetentionModeGovernance,
+ bypassGovernance: true,
+ hasPermission: false,
+ expectedError: ErrGovernanceBypassNotPermitted,
+ description: "Should return ErrGovernanceBypassNotPermitted when bypass is requested but user lacks permission",
+ },
+ {
+ name: "governance_bypass_with_permission",
+ retentionMode: s3_constants.RetentionModeGovernance,
+ bypassGovernance: true,
+ hasPermission: true,
+ expectedError: nil,
+ description: "Should succeed when bypass is requested and user has permission",
+ },
+ {
+ name: "governance_no_bypass",
+ retentionMode: s3_constants.RetentionModeGovernance,
+ bypassGovernance: false,
+ hasPermission: false,
+ expectedError: ErrGovernanceModeActive,
+ description: "Should return ErrGovernanceModeActive when bypass is not requested",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ // This test documents the expected behavior pattern
+ // The actual checkObjectLockPermissions method implements this logic:
+ // if retention.Mode == s3_constants.RetentionModeGovernance {
+ // if !bypassGovernance {
+ // return ErrGovernanceModeActive
+ // }
+ // if !s3a.checkGovernanceBypassPermission(request, bucket, object) {
+ // return ErrGovernanceBypassNotPermitted
+ // }
+ // }
+
+ var simulatedError error
+ if tc.retentionMode == s3_constants.RetentionModeGovernance {
+ if !tc.bypassGovernance {
+ simulatedError = ErrGovernanceModeActive
+ } else if !tc.hasPermission {
+ simulatedError = ErrGovernanceBypassNotPermitted
+ }
+ }
+
+ if simulatedError != tc.expectedError {
+ t.Errorf("expected error %v, got %v. %s", tc.expectedError, simulatedError, tc.description)
+ }
+
+ // Verify ErrGovernanceBypassNotPermitted is returned in the right case
+ if tc.name == "governance_bypass_without_permission" && simulatedError != ErrGovernanceBypassNotPermitted {
+ t.Errorf("Test case should return ErrGovernanceBypassNotPermitted but got %v", simulatedError)
+ }
+ })
+ }
+}
diff --git a/weed/s3api/s3api_object_handlers_delete.go b/weed/s3api/s3api_object_handlers_delete.go
index 35c842e6c..05c93a913 100644
--- a/weed/s3api/s3api_object_handlers_delete.go
+++ b/weed/s3api/s3api_object_handlers_delete.go
@@ -52,7 +52,7 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
// Check object lock permissions before deletion (only for versioned buckets)
if versioningEnabled {
bypassGovernance := r.Header.Get("x-amz-bypass-governance-retention") == "true"
- if err := s3a.checkObjectLockPermissions(bucket, object, versionId, bypassGovernance); err != nil {
+ if err := s3a.checkObjectLockPermissions(r, bucket, object, versionId, bypassGovernance); err != nil {
glog.V(2).Infof("DeleteObjectHandler: object lock check failed for %s/%s: %v", bucket, object, err)
s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied)
return
@@ -218,7 +218,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
// Check object lock permissions before deletion (only for versioned buckets)
if versioningEnabled {
- if err := s3a.checkObjectLockPermissions(bucket, object.Key, object.VersionId, bypassGovernance); err != nil {
+ if err := s3a.checkObjectLockPermissions(r, bucket, object.Key, object.VersionId, bypassGovernance); err != nil {
glog.V(2).Infof("DeleteMultipleObjectsHandler: object lock check failed for %s/%s (version: %s): %v", bucket, object.Key, object.VersionId, err)
deleteErrors = append(deleteErrors, DeleteError{
Code: s3err.GetAPIError(s3err.ErrAccessDenied).Code,
diff --git a/weed/s3api/s3api_object_handlers_put.go b/weed/s3api/s3api_object_handlers_put.go
index 371ab870f..29c31b6bd 100644
--- a/weed/s3api/s3api_object_handlers_put.go
+++ b/weed/s3api/s3api_object_handlers_put.go
@@ -87,7 +87,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
// Check object lock permissions before PUT operation (only for versioned buckets)
bypassGovernance := r.Header.Get("x-amz-bypass-governance-retention") == "true"
- if err := s3a.checkObjectLockPermissionsForPut(bucket, object, bypassGovernance, versioningEnabled); err != nil {
+ if err := s3a.checkObjectLockPermissionsForPut(r, bucket, object, bypassGovernance, versioningEnabled); err != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied)
return
}
diff --git a/weed/s3api/s3api_object_retention.go b/weed/s3api/s3api_object_retention.go
index bedf693ef..6747ac84c 100644
--- a/weed/s3api/s3api_object_retention.go
+++ b/weed/s3api/s3api_object_retention.go
@@ -6,6 +6,7 @@ import (
"fmt"
"net/http"
"strconv"
+ "strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
@@ -26,6 +27,12 @@ var (
ErrGovernanceModeActive = errors.New("object is under GOVERNANCE mode retention and cannot be deleted or modified without bypass")
)
+// Error definitions for Object Lock
+var (
+ ErrObjectUnderLegalHold = errors.New("object is under legal hold and cannot be deleted or modified")
+ ErrGovernanceBypassNotPermitted = errors.New("user does not have permission to bypass governance retention")
+)
+
const (
// Maximum retention period limits according to AWS S3 specifications
MaxRetentionDays = 36500 // Maximum number of days for object retention (100 years)
@@ -103,13 +110,13 @@ func (or *ObjectRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
// This approach is optimized for small XML payloads typical in S3 API requests
// (retention configurations, legal hold settings, etc.) where the overhead of
// streaming parsing is acceptable for the memory efficiency benefits.
-func parseXML[T any](r *http.Request, result *T) error {
- if r.Body == nil {
+func parseXML[T any](request *http.Request, result *T) error {
+ if request.Body == nil {
return fmt.Errorf("error parsing XML: empty request body")
}
- defer r.Body.Close()
+ defer request.Body.Close()
- decoder := xml.NewDecoder(r.Body)
+ decoder := xml.NewDecoder(request.Body)
if err := decoder.Decode(result); err != nil {
return fmt.Errorf("error parsing XML: %v", err)
}
@@ -118,27 +125,27 @@ func parseXML[T any](r *http.Request, result *T) error {
}
// parseObjectRetention parses XML retention configuration from request body
-func parseObjectRetention(r *http.Request) (*ObjectRetention, error) {
+func parseObjectRetention(request *http.Request) (*ObjectRetention, error) {
var retention ObjectRetention
- if err := parseXML(r, &retention); err != nil {
+ if err := parseXML(request, &retention); err != nil {
return nil, err
}
return &retention, nil
}
// parseObjectLegalHold parses XML legal hold configuration from request body
-func parseObjectLegalHold(r *http.Request) (*ObjectLegalHold, error) {
+func parseObjectLegalHold(request *http.Request) (*ObjectLegalHold, error) {
var legalHold ObjectLegalHold
- if err := parseXML(r, &legalHold); err != nil {
+ if err := parseXML(request, &legalHold); err != nil {
return nil, err
}
return &legalHold, nil
}
// parseObjectLockConfiguration parses XML object lock configuration from request body
-func parseObjectLockConfiguration(r *http.Request) (*ObjectLockConfiguration, error) {
+func parseObjectLockConfiguration(request *http.Request) (*ObjectLockConfiguration, error) {
var config ObjectLockConfiguration
- if err := parseXML(r, &config); err != nil {
+ if err := parseXML(request, &config); err != nil {
return nil, err
}
return &config, nil
@@ -514,8 +521,39 @@ func (s3a *S3ApiServer) isObjectLegalHoldActive(bucket, object, versionId string
return legalHold.Status == s3_constants.LegalHoldOn, nil
}
+// checkGovernanceBypassPermission checks if the user has permission to bypass governance retention
+func (s3a *S3ApiServer) checkGovernanceBypassPermission(request *http.Request, bucket, object string) bool {
+ // Use the existing IAM auth system to check the specific permission
+ // Create the governance bypass action with proper bucket/object concatenation
+ // Note: path.Join would drop bucket if object has leading slash, so use explicit formatting
+ resource := fmt.Sprintf("%s/%s", bucket, strings.TrimPrefix(object, "/"))
+ action := Action(fmt.Sprintf("%s:%s", s3_constants.ACTION_BYPASS_GOVERNANCE_RETENTION, resource))
+
+ // Use the IAM system to authenticate and authorize this specific action
+ identity, errCode := s3a.iam.authRequest(request, action)
+ if errCode != s3err.ErrNone {
+ glog.V(3).Infof("IAM auth failed for governance bypass: %v", errCode)
+ return false
+ }
+
+ // Verify that the authenticated identity can perform this action
+ if identity != nil && identity.canDo(action, bucket, object) {
+ return true
+ }
+
+ // Additional check: allow users with Admin action to bypass governance retention
+ // Use the proper S3 Admin action constant instead of generic isAdmin() method
+ adminAction := Action(fmt.Sprintf("%s:%s", s3_constants.ACTION_ADMIN, resource))
+ if identity != nil && identity.canDo(adminAction, bucket, object) {
+ glog.V(2).Infof("Admin user %s granted governance bypass permission for %s/%s", identity.Name, bucket, object)
+ return true
+ }
+
+ return false
+}
+
// checkObjectLockPermissions checks if an object can be deleted or modified
-func (s3a *S3ApiServer) checkObjectLockPermissions(bucket, object, versionId string, bypassGovernance bool) error {
+func (s3a *S3ApiServer) checkObjectLockPermissions(request *http.Request, bucket, object, versionId string, bypassGovernance bool) error {
// Get retention configuration and status in a single call to avoid duplicate fetches
retention, retentionActive, err := s3a.getObjectRetentionWithStatus(bucket, object, versionId)
if err != nil {
@@ -530,7 +568,7 @@ func (s3a *S3ApiServer) checkObjectLockPermissions(bucket, object, versionId str
// If object is under legal hold, it cannot be deleted or modified
if legalHoldActive {
- return fmt.Errorf("object is under legal hold and cannot be deleted or modified")
+ return ErrObjectUnderLegalHold
}
// If object is under retention, check the mode
@@ -539,8 +577,16 @@ func (s3a *S3ApiServer) checkObjectLockPermissions(bucket, object, versionId str
return ErrComplianceModeActive
}
- if retention.Mode == s3_constants.RetentionModeGovernance && !bypassGovernance {
- return ErrGovernanceModeActive
+ if retention.Mode == s3_constants.RetentionModeGovernance {
+ if !bypassGovernance {
+ return ErrGovernanceModeActive
+ }
+
+ // If bypass is requested, check if user has permission
+ if !s3a.checkGovernanceBypassPermission(request, bucket, object) {
+ glog.V(2).Infof("User does not have s3:BypassGovernanceRetention permission for %s/%s", bucket, object)
+ return ErrGovernanceBypassNotPermitted
+ }
}
}
@@ -567,14 +613,14 @@ func (s3a *S3ApiServer) isObjectLockAvailable(bucket string) error {
// checkObjectLockPermissionsForPut checks object lock permissions for PUT operations
// This is a shared helper to avoid code duplication in PUT handlers
-func (s3a *S3ApiServer) checkObjectLockPermissionsForPut(bucket, object string, bypassGovernance bool, versioningEnabled bool) error {
+func (s3a *S3ApiServer) checkObjectLockPermissionsForPut(request *http.Request, bucket, object string, bypassGovernance bool, versioningEnabled bool) error {
// Object Lock only applies to versioned buckets (AWS S3 requirement)
if !versioningEnabled {
return nil
}
// For PUT operations, we check permissions on the current object (empty versionId)
- if err := s3a.checkObjectLockPermissions(bucket, object, "", bypassGovernance); err != nil {
+ if err := s3a.checkObjectLockPermissions(request, bucket, object, "", bypassGovernance); err != nil {
glog.V(2).Infof("checkObjectLockPermissionsForPut: object lock check failed for %s/%s: %v", bucket, object, err)
return err
}
@@ -584,13 +630,13 @@ func (s3a *S3ApiServer) checkObjectLockPermissionsForPut(bucket, object string,
// handleObjectLockAvailabilityCheck is a helper function to check object lock availability
// and write the appropriate error response if not available. This reduces code duplication
// across all retention handlers.
-func (s3a *S3ApiServer) handleObjectLockAvailabilityCheck(w http.ResponseWriter, r *http.Request, bucket, handlerName string) bool {
+func (s3a *S3ApiServer) handleObjectLockAvailabilityCheck(w http.ResponseWriter, request *http.Request, bucket, handlerName string) bool {
if err := s3a.isObjectLockAvailable(bucket); err != nil {
glog.Errorf("%s: object lock not available for bucket %s: %v", handlerName, bucket, err)
if errors.Is(err, ErrBucketNotFound) {
- s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
+ s3err.WriteErrorResponse(w, request, s3err.ErrNoSuchBucket)
} else {
- s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
+ s3err.WriteErrorResponse(w, request, s3err.ErrInvalidRequest)
}
return false
}