aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/binaries_release4.yml60
-rw-r--r--.github/workflows/codeql.yml43
-rw-r--r--.github/workflows/container_release2.yml3
-rw-r--r--.github/workflows/container_release4.yml58
-rw-r--r--.github/workflows/container_release5.yml58
-rw-r--r--.github/workflows/depsreview.yml2
-rw-r--r--.github/workflows/go.yml2
-rw-r--r--README.md26
-rw-r--r--docker/Dockerfile.go_build3
-rw-r--r--docker/Dockerfile.go_build_large43
-rw-r--r--docker/Dockerfile.rocksdb_large2
-rw-r--r--docker/Makefile9
-rw-r--r--go.mod28
-rw-r--r--go.sum55
-rw-r--r--k8s/helm_charts2/Chart.yaml4
-rwxr-xr-xtest/s3/compatibility/run.sh6
-rw-r--r--unmaintained/fix_dat/fix_dat.go2
-rw-r--r--weed/command/autocomplete.go6
-rw-r--r--weed/command/backup.go2
-rw-r--r--weed/command/benchmark.go6
-rw-r--r--weed/command/filer.go1
-rw-r--r--weed/command/s3.go23
-rw-r--r--weed/command/scaffold/security.toml5
-rw-r--r--weed/command/server.go1
-rw-r--r--weed/pb/Makefile1
-rw-r--r--weed/pb/s3.proto25
-rw-r--r--weed/pb/s3_pb/s3.pb.go209
-rw-r--r--weed/pb/s3_pb/s3_grpc.pb.go101
-rw-r--r--weed/s3api/auth_credentials.go6
-rw-r--r--weed/s3api/auth_credentials_subscribe.go2
-rw-r--r--weed/s3api/http/header.go9
-rw-r--r--weed/s3api/s3api_object_copy_handlers.go124
-rw-r--r--weed/s3api/s3api_object_copy_handlers_test.go426
-rw-r--r--weed/s3api/s3api_server.go2
-rw-r--r--weed/s3api/s3api_server_grpc.go16
-rw-r--r--weed/shell/command_volume_fix_replication.go11
-rw-r--r--weed/shell/command_volume_fsck.go2
-rw-r--r--weed/util/constants.go2
38 files changed, 1250 insertions, 134 deletions
diff --git a/.github/workflows/binaries_release4.yml b/.github/workflows/binaries_release4.yml
new file mode 100644
index 000000000..7621c32e2
--- /dev/null
+++ b/.github/workflows/binaries_release4.yml
@@ -0,0 +1,60 @@
+# This is a basic workflow to help you get started with Actions
+
+name: "go: build versioned binaries for linux with all tags"
+
+on:
+ push:
+ tags:
+ - '*'
+
+ # Allows you to run this workflow manually from the Actions tab
+ workflow_dispatch:
+
+# A workflow run is made up of one or more jobs that can run sequentially or in parallel
+permissions:
+ contents: read
+
+jobs:
+
+ build-release-binaries_linux:
+ permissions:
+ contents: write # for wangyoucao577/go-release-action to upload release assets
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ goos: [linux]
+ goarch: [amd64]
+
+ # Steps represent a sequence of tasks that will be executed as part of the job
+ steps:
+ # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
+ - uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
+ - name: Go Release Binaries Normal Volume Size
+ uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ goos: ${{ matrix.goos }}
+ goarch: ${{ matrix.goarch }}
+ overwrite: true
+ build_flags: -tags elastic,ydb,gocdk,hdfs
+ pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
+ # build_flags: -tags 5BytesOffset # optional, default is
+ ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
+ # Where to run `go build .`
+ project_path: weed
+ binary_name: weed
+ asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full"
+ - name: Go Release Large Disk Binaries
+ uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ goos: ${{ matrix.goos }}
+ goarch: ${{ matrix.goarch }}
+ overwrite: true
+ pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
+ build_flags: -tags 5BytesOffset,elastic,ydb,gocdk,hdfs
+ ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
+ # Where to run `go build .`
+ project_path: weed
+ binary_name: weed
+ asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full_large_disk"
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 000000000..142e4e963
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,43 @@
+name: "Code Scanning - Action"
+
+on:
+ pull_request:
+
+jobs:
+ CodeQL-Build:
+ # CodeQL runs on ubuntu-latest, windows-latest, and macos-latest
+ runs-on: ubuntu-latest
+
+ permissions:
+ # required for all workflows
+ security-events: write
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v2
+ # Override language selection by uncommenting this and choosing your languages
+ with:
+ languages: go
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below).
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v2
+
+ # â„šī¸ Command-line programs to run using the OS shell.
+ # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+
+ # âœī¸ If the Autobuild fails above, remove it and uncomment the following
+ # three lines and modify them (or add more) to build your code if your
+ # project uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/container_release2.yml b/.github/workflows/container_release2.yml
index 6da882e38..a02ab4f87 100644
--- a/.github/workflows/container_release2.yml
+++ b/.github/workflows/container_release2.yml
@@ -52,7 +52,8 @@ jobs:
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
- file: ./docker/Dockerfile.go_build_large
+ file: ./docker/Dockerfile.go_build
+ build-args: TAGS=5BytesOffset
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}
diff --git a/.github/workflows/container_release4.yml b/.github/workflows/container_release4.yml
new file mode 100644
index 000000000..92d776f79
--- /dev/null
+++ b/.github/workflows/container_release4.yml
@@ -0,0 +1,58 @@
+name: "docker: build release containers for all tags"
+
+on:
+ push:
+ tags:
+ - '*'
+ workflow_dispatch: []
+
+permissions:
+ contents: read
+
+jobs:
+ build-default-release-container:
+ runs-on: [ubuntu-latest]
+
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
+ -
+ name: Docker meta
+ id: docker_meta
+ uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3
+ with:
+ images: |
+ chrislusf/seaweedfs
+ tags: |
+ type=ref,event=tag,suffix=_full
+ flavor: |
+ latest=false
+ labels: |
+ org.opencontainers.image.title=seaweedfs
+ org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
+ org.opencontainers.image.vendor=Chris Lu
+ -
+ name: Set up QEMU
+ uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1
+ -
+ name: Login to Docker Hub
+ if: github.event_name != 'pull_request'
+ uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+ -
+ name: Build
+ uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2
+ with:
+ context: ./docker
+ push: ${{ github.event_name != 'pull_request' }}
+ file: ./docker/Dockerfile.go_build
+ build-args: TAGS=elastic,ydb,gocdk,hdfs
+ platforms: linux/amd64
+ tags: ${{ steps.docker_meta.outputs.tags }}
+ labels: ${{ steps.docker_meta.outputs.labels }}
diff --git a/.github/workflows/container_release5.yml b/.github/workflows/container_release5.yml
new file mode 100644
index 000000000..820527f5b
--- /dev/null
+++ b/.github/workflows/container_release5.yml
@@ -0,0 +1,58 @@
+name: "docker: build release containers for all tags and large volume"
+
+on:
+ push:
+ tags:
+ - '*'
+ workflow_dispatch: []
+
+permissions:
+ contents: read
+
+jobs:
+ build-default-release-container:
+ runs-on: [ubuntu-latest]
+
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
+ -
+ name: Docker meta
+ id: docker_meta
+ uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3
+ with:
+ images: |
+ chrislusf/seaweedfs
+ tags: |
+ type=ref,event=tag,suffix=_large_disk_full
+ flavor: |
+ latest=false
+ labels: |
+ org.opencontainers.image.title=seaweedfs
+ org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
+ org.opencontainers.image.vendor=Chris Lu
+ -
+ name: Set up QEMU
+ uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1
+ -
+ name: Login to Docker Hub
+ if: github.event_name != 'pull_request'
+ uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+ -
+ name: Build
+ uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2
+ with:
+ context: ./docker
+ push: ${{ github.event_name != 'pull_request' }}
+ file: ./docker/Dockerfile.go_build
+ build-args: TAGS=5BytesOffset,elastic,ydb,gocdk,hdfs
+ platforms: linux/amd64
+ tags: ${{ steps.docker_meta.outputs.tags }}
+ labels: ${{ steps.docker_meta.outputs.labels }}
diff --git a/.github/workflows/depsreview.yml b/.github/workflows/depsreview.yml
index 626c5d154..5a865ab0c 100644
--- a/.github/workflows/depsreview.yml
+++ b/.github/workflows/depsreview.yml
@@ -11,4 +11,4 @@ jobs:
- name: 'Checkout Repository'
uses: actions/checkout@dcd71f646680f2efd8db4afa5ad64fdcba30e748
- name: 'Dependency Review'
- uses: actions/dependency-review-action@3f943b86c9a289f4e632c632695e2e0898d9d67d
+ uses: actions/dependency-review-action@39e692fa323107ef86d8fdac0067ce647f239bd7
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
index f40e759d3..cda96b954 100644
--- a/.github/workflows/go.yml
+++ b/.github/workflows/go.yml
@@ -21,7 +21,7 @@ jobs:
steps:
- name: Set up Go 1.x
- uses: actions/setup-go@f6164bd8c8acb4a71fb2791a8b6c4024ff038dab # v2
+ uses: actions/setup-go@fcdc43634adb5f7ae75a9d7a9b9361790f7293e2 # v2
with:
go-version: ^1.13
id: go
diff --git a/README.md b/README.md
index b04c5188c..a0fdd1492 100644
--- a/README.md
+++ b/README.md
@@ -31,13 +31,11 @@ Your support will be really appreciated by me and other supporters!
</p>
-->
-
### Gold Sponsors
- [![nodion](https://www.nodion.com/img/logo.svg)](https://www.nodion.com)
---
-
- [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest)
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS)
@@ -61,7 +59,7 @@ Table of Contents
* [Additional Features](#additional-features)
* [Filer Features](#filer-features)
* [Example: Using Seaweed Object Store](#example-Using-Seaweed-Object-Store)
-* [Architecture](#architecture)
+* [Architecture](#Object-Store-Architecture)
* [Compared to Other File Systems](#compared-to-other-file-systems)
* [Compared to HDFS](#compared-to-hdfs)
* [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph)
@@ -127,7 +125,7 @@ Faster and Cheaper than direct cloud storage!
## Additional Features ##
* Can choose no replication or different replication levels, rack and data center aware.
* Automatic master servers failover - no single point of failure (SPOF).
-* Automatic Gzip compression depending on file mime type.
+* Automatic Gzip compression depending on file MIME type.
* Automatic compaction to reclaim disk space after deletion or update.
* [Automatic entry TTL expiration][VolumeServerTTL].
* Any server with some disk spaces can add to the total storage space.
@@ -206,7 +204,7 @@ SeaweedFS uses HTTP REST operations to read, write, and delete. The responses ar
### Write File ###
-To upload a file: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server url:
+To upload a file: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server URL:
```
> curl http://localhost:9333/dir/assign
@@ -255,7 +253,7 @@ First look up the volume server's URLs by the file's volumeId:
Since (usually) there are not too many volume servers, and volumes don't move often, you can cache the results most of the time. Depending on the replication type, one volume can have multiple replica locations. Just randomly pick one location to read.
-Now you can take the public url, render the url or directly read from the volume server via url:
+Now you can take the public URL, render the URL or directly read from the volume server via URL:
```
http://localhost:8080/3,01637037d6.jpg
@@ -356,9 +354,9 @@ On each write request, the master server also generates a file key, which is a g
### Write and Read files ###
-When a client sends a write request, the master server returns (volume id, file key, file cookie, volume node url) for the file. The client then contacts the volume node and POSTs the file content.
+When a client sends a write request, the master server returns (volume id, file key, file cookie, volume node URL) for the file. The client then contacts the volume node and POSTs the file content.
-When a client needs to read a file based on (volume id, file key, file cookie), it asks the master server by the volume id for the (volume node url, volume node public url), or retrieves this from a cache. Then the client can GET the content, or just render the URL on web pages and let browsers fetch the content.
+When a client needs to read a file based on (volume id, file key, file cookie), it asks the master server by the volume id for the (volume node URL, volume node public URL), or retrieves this from a cache. Then the client can GET the content, or just render the URL on web pages and let browsers fetch the content.
Please see the example for details on the write-read process.
@@ -412,7 +410,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
-* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Redis, Cassandra, HBase, Mongodb, Elastic Search, MySql, Postgres, Sqlite, MemSql, TiDB, CockroachDB, Etcd, YDB etc, and is easy to customized.
+* SeaweedFS Filer metadata store can be any well-known and proven data store, e.g., Redis, Cassandra, HBase, Mongodb, Elastic Search, MySql, Postgres, Sqlite, MemSql, TiDB, CockroachDB, Etcd, YDB etc, and is easy to customize.
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
| System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files |
@@ -448,9 +446,9 @@ Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more com
SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage.
-Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews.
+Ceph, like SeaweedFS, is based on the object store RADOS. Ceph is rather complicated with mixed reviews.
-Ceph uses CRUSH hashing to automatically manage the data placement, which is efficient to locate the data. But the data has to be placed according to the CRUSH algorithm. Any wrong configuration would cause data loss. Topology changes, such as adding new servers to increase capacity, will cause data migration with high IO cost to fit the CRUSH algorithm. SeaweedFS places data by assigning them to any writable volumes. If writes to one volume failed, just pick another volume to write. Adding more volumes are also as simple as it can be.
+Ceph uses CRUSH hashing to automatically manage data placement, which is efficient to locate the data. But the data has to be placed according to the CRUSH algorithm. Any wrong configuration would cause data loss. Topology changes, such as adding new servers to increase capacity, will cause data migration with high IO cost to fit the CRUSH algorithm. SeaweedFS places data by assigning them to any writable volumes. If writes to one volume failed, just pick another volume to write. Adding more volumes is also as simple as it can be.
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
@@ -499,7 +497,7 @@ Step 1: install go on your machine and setup the environment by following the in
https://golang.org/doc/install
-make sure you set up your $GOPATH
+make sure to define your $GOPATH
Step 2: checkout this repo:
@@ -536,7 +534,7 @@ Write 1 million 1KB file:
```
Concurrency Level: 16
Time taken for tests: 66.753 seconds
-Complete requests: 1048576
+Completed requests: 1048576
Failed requests: 0
Total transferred: 1106789009 bytes
Requests per second: 15708.23 [#/sec]
@@ -562,7 +560,7 @@ Randomly read 1 million files:
```
Concurrency Level: 16
Time taken for tests: 22.301 seconds
-Complete requests: 1048576
+Completed requests: 1048576
Failed requests: 0
Total transferred: 1106812873 bytes
Requests per second: 47019.38 [#/sec]
diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build
index b90c65069..c917ec556 100644
--- a/docker/Dockerfile.go_build
+++ b/docker/Dockerfile.go_build
@@ -3,10 +3,11 @@ RUN apk add git g++ fuse
RUN mkdir -p /go/src/github.com/chrislusf/
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
ARG BRANCH=${BRANCH:-master}
+ARG TAGS
RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \
&& export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
- && CGO_ENABLED=0 go install -ldflags "-extldflags -static ${LDFLAGS}"
+ && CGO_ENABLED=0 go install -tags "$TAGS" -ldflags "-extldflags -static ${LDFLAGS}"
FROM alpine AS final
LABEL author="Chris Lu"
diff --git a/docker/Dockerfile.go_build_large b/docker/Dockerfile.go_build_large
deleted file mode 100644
index 5c5e84233..000000000
--- a/docker/Dockerfile.go_build_large
+++ /dev/null
@@ -1,43 +0,0 @@
-FROM golang:1.18-alpine as builder
-RUN apk add git g++ fuse
-RUN mkdir -p /go/src/github.com/chrislusf/
-RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
-ARG BRANCH=${BRANCH:-master}
-RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH
-RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \
- && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
- && CGO_ENABLED=0 go install -tags 5BytesOffset -ldflags "-extldflags -static ${LDFLAGS}"
-
-FROM alpine AS final
-LABEL author="Chris Lu"
-COPY --from=builder /go/bin/weed /usr/bin/
-RUN mkdir -p /etc/seaweedfs
-COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
-COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
-RUN apk add fuse # for weed mount
-
-# volume server gprc port
-EXPOSE 18080
-# volume server http port
-EXPOSE 8080
-# filer server gprc port
-EXPOSE 18888
-# filer server http port
-EXPOSE 8888
-# master server shared gprc port
-EXPOSE 19333
-# master server shared http port
-EXPOSE 9333
-# s3 server http port
-EXPOSE 8333
-# webdav server http port
-EXPOSE 7333
-
-RUN mkdir -p /data/filerldb2
-
-VOLUME /data
-WORKDIR /data
-
-RUN chmod +x /entrypoint.sh
-
-ENTRYPOINT ["/entrypoint.sh"]
diff --git a/docker/Dockerfile.rocksdb_large b/docker/Dockerfile.rocksdb_large
index a1a84f884..0025eb116 100644
--- a/docker/Dockerfile.rocksdb_large
+++ b/docker/Dockerfile.rocksdb_large
@@ -3,7 +3,7 @@ FROM golang:1.18-buster as builder
RUN apt-get update
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
-ENV ROCKSDB_VERSION v7.0.4
+ENV ROCKSDB_VERSION v7.2.2
# build RocksDB
RUN cd /tmp && \
diff --git a/docker/Makefile b/docker/Makefile
index c8603309d..dbc82fde4 100644
--- a/docker/Makefile
+++ b/docker/Makefile
@@ -7,14 +7,17 @@ gen: dev
binary:
export SWCOMMIT=$(shell git rev-parse --short HEAD)
export SWLDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(SWCOMMIT)"
- cd ../weed; CGO_ENABLED=0 GOOS=linux go build --tags "$(tags)" -ldflags "-extldflags -static $(SWLDFLAGS)"; mv weed ../docker/
+ cd ../weed; CGO_ENABLED=0 GOOS=linux go build -tags "$(tags)" -ldflags "-extldflags -static $(SWLDFLAGS)"; mv weed ../docker/
build: binary
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
rm ./weed
-build_gorocksdb:
- docker build --no-cache -t chrislusf/gorocksdb -f Dockerfile.go_rocksdb .
+go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset
+ docker build --build-arg TAGS=$(tags) --no-cache -t chrislusf/seaweedfs:go_build -f Dockerfile.go_build .
+
+go_build_large_disk:
+ docker build --build-arg TAGS=large_disk --no-cache -t chrislusf/seaweedfs:large_disk -f Dockerfile.go_build .
build_rocksdb:
docker build --no-cache -t chrislusf/seaweedfs:rocksdb -f Dockerfile.rocksdb_large .
diff --git a/go.mod b/go.mod
index 38f50e7b4..688a68049 100644
--- a/go.mod
+++ b/go.mod
@@ -9,8 +9,8 @@ require (
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.15.0
github.com/OneOfOne/xxhash v1.2.8
- github.com/Shopify/sarama v1.32.0
- github.com/aws/aws-sdk-go v1.44.9
+ github.com/Shopify/sarama v1.33.0
+ github.com/aws/aws-sdk-go v1.44.14
github.com/beorn7/perks v1.0.1 // indirect
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/bwmarrin/snowflake v0.3.0
@@ -54,7 +54,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
- github.com/hashicorp/go-multierror v1.1.0 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jcmturner/gofork v1.0.0 // indirect
@@ -85,7 +85,7 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/posener/complete v1.2.3
github.com/pquerna/cachecontrol v0.1.0
- github.com/prometheus/client_golang v1.12.1
+ github.com/prometheus/client_golang v1.12.2
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
@@ -112,8 +112,8 @@ require (
github.com/viant/ptrie v0.3.0
github.com/viant/toolbox v0.33.2 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
- github.com/xdg-go/scram v1.1.0 // indirect
- github.com/xdg-go/stringprep v1.0.2 // indirect
+ github.com/xdg-go/scram v1.1.1 // indirect
+ github.com/xdg-go/stringprep v1.0.3 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
go.etcd.io/etcd/client/v3 v3.5.4
go.mongodb.org/mongo-driver v1.9.1
@@ -126,13 +126,13 @@ require (
golang.org/x/image v0.0.0-20200119044424-58c23975cae1
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect
- golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba
+ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6
golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
- google.golang.org/api v0.78.0
+ google.golang.org/api v0.79.0
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
+ google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3 // indirect
google.golang.org/grpc v1.46.0
google.golang.org/protobuf v1.28.0
gopkg.in/inf.v0 v0.9.1 // indirect
@@ -144,7 +144,7 @@ require (
modernc.org/memory v1.1.1 // indirect
modernc.org/opt v0.1.1 // indirect
modernc.org/sqlite v1.17.2
- modernc.org/strutil v1.1.1 // indirect
+ modernc.org/strutil v1.1.1
modernc.org/token v1.0.0 // indirect
)
@@ -155,8 +155,8 @@ require (
github.com/hanwen/go-fuse/v2 v2.1.0
github.com/hashicorp/raft v1.3.9
github.com/hashicorp/raft-boltdb v0.0.0-20220329195025-15018e9b97e0
- github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.0
- github.com/ydb-platform/ydb-go-sdk/v3 v3.25.0
+ github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2
+ github.com/ydb-platform/ydb-go-sdk/v3 v3.25.3
)
require (
@@ -207,8 +207,8 @@ require (
github.com/tinylib/msgp v1.1.6 // indirect
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect
github.com/ydb-platform/ydb-go-genproto v0.0.0-20220203104745-929cf9c248bc // indirect
- github.com/ydb-platform/ydb-go-yc v0.6.1 // indirect
- github.com/ydb-platform/ydb-go-yc-metadata v0.0.9 // indirect
+ github.com/ydb-platform/ydb-go-yc v0.8.3 // indirect
+ github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 // indirect
go.etcd.io/etcd/api/v3 v3.5.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect
go.uber.org/atomic v1.9.0 // indirect
diff --git a/go.sum b/go.sum
index 93b366cd9..7347090a3 100644
--- a/go.sum
+++ b/go.sum
@@ -128,8 +128,8 @@ github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
-github.com/Shopify/sarama v1.32.0 h1:P+RUjEaRU0GMMbYexGMDyrMkLhbbBVUVISDywi+IlFU=
-github.com/Shopify/sarama v1.32.0/go.mod h1:+EmJJKZWVT/faR9RcOxJerP+LId4iWdQPBGLy1Y1Njs=
+github.com/Shopify/sarama v1.33.0 h1:2K4mB9M4fo46sAM7t6QTsmSO8dLX1OqznLM7vn3OjZ8=
+github.com/Shopify/sarama v1.33.0/go.mod h1:lYO7LwEBkE0iAeTl94UfPSrDaavFzSFlmn+5isARATQ=
github.com/Shopify/toxiproxy/v2 v2.3.0 h1:62YkpiP4bzdhKMH+6uC5E95y608k3zDwdzuBMsnn3uQ=
github.com/Shopify/toxiproxy/v2 v2.3.0/go.mod h1:KvQTtB6RjCJY4zqNJn7C7JDFgsG5uoHYDirfUfpIm0c=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -150,8 +150,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb
github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
-github.com/aws/aws-sdk-go v1.44.9 h1:s3lsEFbc8i7ghQmcEpcdyvoO/WMwyCVa9pUq3Lq//Ok=
-github.com/aws/aws-sdk-go v1.44.9/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go v1.44.14 h1:qd7/muV1rElsbvkK9D1nHUzBoDlEw2etfeo4IE82eSQ=
+github.com/aws/aws-sdk-go v1.44.14/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA=
github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM=
@@ -498,8 +498,9 @@ github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iP
github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs=
github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
@@ -601,7 +602,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/cpuid/v2 v2.0.6 h1:dQ5ueTiftKxp0gyjKSx5+8BtPWkyQbd95m8Gys/RarI=
@@ -742,8 +743,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
+github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -857,23 +858,23 @@ github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdms
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
-github.com/xdg-go/scram v1.1.0 h1:d70R37I0HrDLsafRrMBXyrD4lmQbCHE873t00Vr0gm0=
-github.com/xdg-go/scram v1.1.0/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
-github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc=
+github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E=
+github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
+github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs=
+github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e h1:9LPdmD1vqadsDQUva6t2O9MbnyvoOgo8nFNPaOIH5U8=
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20220203104745-929cf9c248bc h1:xvTP0fhYNm+Ws+xC34jzF9EdorPUKkucJr0TyybqVSk=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20220203104745-929cf9c248bc/go.mod h1:cc138nptTn9eKptCQl/grxP6pBKpo/bnXDiOxuVZtps=
-github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.0 h1:74zGbvLn5kwLkkVoicJWzmvWhaIGdIWLUr1tfaMul08=
-github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.0/go.mod h1:9E5eeVy08G/cu5azQVYGHduqxa6hz/dyZzJDjDTE010=
-github.com/ydb-platform/ydb-go-sdk/v3 v3.9.0/go.mod h1:/KCidORSzHOsn+j46Iqb8Tf6UXNhPWRMcrMieh+i9Xw=
-github.com/ydb-platform/ydb-go-sdk/v3 v3.25.0 h1:3uh36kf09NJHRex+LEDnuajNdzKwPPJIvePjtIUra1U=
-github.com/ydb-platform/ydb-go-sdk/v3 v3.25.0/go.mod h1:PFizF/vJsdAgEwjK3DVSBD52kdmRkWfSIS2q2pA+e88=
-github.com/ydb-platform/ydb-go-yc v0.6.1 h1:DBw32JwTOsfFGnMlwri2f7C2joYvFnLNYEQAopID/Qg=
-github.com/ydb-platform/ydb-go-yc v0.6.1/go.mod h1:iMalotfQEHibqPDNkwn0oT2UC5ieS5j6teIuGgPzaSE=
-github.com/ydb-platform/ydb-go-yc-metadata v0.0.9 h1:jymJK3FVUphDa5q6oyjLODXLc34AR+aFTMiybacMLy0=
-github.com/ydb-platform/ydb-go-yc-metadata v0.0.9/go.mod h1:L7zqxXrf3DXY2CWd9T9JBiPVpdooGBxJr4CPtWiMLCg=
+github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 h1:EYSI1kulnHb0H0zt3yOw4cRj4ABMSMGwNe43D+fX7e4=
+github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2/go.mod h1:Xfjce+VMU9yJVr1lj60yK2fFPWjB4jr/4cp3K7cjzi4=
+github.com/ydb-platform/ydb-go-sdk/v3 v3.25.3 h1:yyMw+sTDSqGVDG8ivAw8V/kXfaulXsgftSG+4nCmseA=
+github.com/ydb-platform/ydb-go-sdk/v3 v3.25.3/go.mod h1:PFizF/vJsdAgEwjK3DVSBD52kdmRkWfSIS2q2pA+e88=
+github.com/ydb-platform/ydb-go-yc v0.8.3 h1:92UUUMsfvtMl6mho8eQ9lbkiPrF3a9CT+RrVRAKNRwo=
+github.com/ydb-platform/ydb-go-yc v0.8.3/go.mod h1:zUolAFGzJ5XG8uwiseTLr9Lapm7L7hdVdZgLSuv9FXE=
+github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 h1:nMtixUijP0Z7iHJNT9fOL+dbmEzZxqU6Xk87ll7hqXg=
+github.com/ydb-platform/ydb-go-yc-metadata v0.5.2/go.mod h1:82SQ4L3PewiEmFW4oTMc1sfPjODasIYxD/SKGsbK74s=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -1052,6 +1053,7 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220420153159-1850ba15e1be/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1180,8 +1182,8 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba h1:AyHWHCBVlIYI5rgEM3o+1PLd0sLPcIAoaUckGQMaWtw=
-golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -1322,8 +1324,8 @@ google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
google.golang.org/api v0.76.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
-google.golang.org/api v0.78.0 h1:5ewPyCwP43C3i8B6C2Kb+eVAevbnke2xR8VbcSWjS4I=
-google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.79.0 h1:vaOcm0WdXvhGkci9a0+CcQVZqSRjN8ksSBlWv99f8Pg=
+google.golang.org/api v0.79.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1424,9 +1426,8 @@ google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
-google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
-google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
-google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3 h1:q1kiSVscqoDeqTF27eQ2NnLLDmqF0I373qQNXYMy0fo=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
diff --git a/k8s/helm_charts2/Chart.yaml b/k8s/helm_charts2/Chart.yaml
index 479d7736c..966a00ba8 100644
--- a/k8s/helm_charts2/Chart.yaml
+++ b/k8s/helm_charts2/Chart.yaml
@@ -1,5 +1,5 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
-appVersion: "3.02"
-version: "3.02"
+appVersion: "3.04"
+version: "3.04"
diff --git a/test/s3/compatibility/run.sh b/test/s3/compatibility/run.sh
index 96d630dd7..990599df5 100755
--- a/test/s3/compatibility/run.sh
+++ b/test/s3/compatibility/run.sh
@@ -8,9 +8,9 @@ mkdir tmp
docker stop s3test-instance || echo "already stopped"
ulimit -n 10000
-../../../weed/weed server -filer -s3 -volume.max 0 -master.volumeSizeLimitMB 5 -dir "$(pwd)/tmp" 1>&2>weed.log &
+../../../weed/weed server -filer -s3 -volume.max 0 -master.volumeSizeLimitMB 5 -dir "$(pwd)/tmp" 1>&2>weed.log &
-until $(curl --output /dev/null --silent --head --fail http://127.0.0.1:9333); do
+until curl --output /dev/null --silent --head --fail http://127.0.0.1:9333; do
printf '.'
sleep 5
done
@@ -18,7 +18,7 @@ sleep 3
rm -Rf logs-full.txt logs-summary.txt
# docker run --name s3test-instance --rm -e S3TEST_CONF=s3tests.conf -v `pwd`/s3tests.conf:/s3-tests/s3tests.conf -it s3tests ./virtualenv/bin/nosetests s3tests_boto3/functional/test_s3.py:test_get_obj_tagging -v -a 'resource=object,!bucket-policy,!versioning,!encryption'
-docker run --name s3test-instance --rm -e S3TEST_CONF=s3tests.conf -v `pwd`/s3tests.conf:/s3-tests/s3tests.conf -it s3tests ./virtualenv/bin/nosetests s3tests_boto3/functional/test_s3.py -v -a 'resource=object,!bucket-policy,!versioning,!encryption' | sed -n -e '/botocore.hooks/!p;//q' | tee logs-summary.txt
+docker run --name s3test-instance --rm -e S3TEST_CONF=s3tests.conf -v "$(pwd)"/s3tests.conf:/s3-tests/s3tests.conf -it s3tests ./virtualenv/bin/nosetests s3tests_boto3/functional/test_s3.py -v -a 'resource=object,!bucket-policy,!versioning,!encryption' | sed -n -e '/botocore.hooks/!p;//q' | tee logs-summary.txt
docker stop s3test-instance || echo "already stopped"
killall -9 weed
diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go
index 70bce3bf9..457c5c592 100644
--- a/unmaintained/fix_dat/fix_dat.go
+++ b/unmaintained/fix_dat/fix_dat.go
@@ -24,7 +24,7 @@ var (
/*
This is to resolve an one-time issue that caused inconsistency with .dat and .idx files.
-In this case, the .dat file contains all data, but some of deletion caused incorrect offset.
+In this case, the .dat file contains all data, but some deletion caused incorrect offset.
The .idx has all correct offsets.
1. fix the .dat file, a new .dat_fixed file will be generated.
diff --git a/weed/command/autocomplete.go b/weed/command/autocomplete.go
index 9a545a183..955ce4006 100644
--- a/weed/command/autocomplete.go
+++ b/weed/command/autocomplete.go
@@ -41,7 +41,7 @@ func AutocompleteMain(commands []*Command) bool {
func installAutoCompletion() bool {
if runtime.GOOS == "windows" {
- fmt.Println("windows is not supported")
+ fmt.Println("Windows is not supported")
return false
}
@@ -56,7 +56,7 @@ func installAutoCompletion() bool {
func uninstallAutoCompletion() bool {
if runtime.GOOS == "windows" {
- fmt.Println("windows is not supported")
+ fmt.Println("Windows is not supported")
return false
}
@@ -65,7 +65,7 @@ func uninstallAutoCompletion() bool {
fmt.Printf("uninstall failed! %s\n", err)
return false
}
- fmt.Printf("autocompletion is disable. Please restart your shell.\n")
+ fmt.Printf("autocompletion is disabled. Please restart your shell.\n")
return true
}
diff --git a/weed/command/backup.go b/weed/command/backup.go
index ba1b0d287..c43b0d351 100644
--- a/weed/command/backup.go
+++ b/weed/command/backup.go
@@ -120,7 +120,7 @@ func runBackup(cmd *Command, args []string) bool {
}
if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
- if err = v.Compact2(30*1024*1024*1024, 0, nil); err != nil {
+ if err = v.Compact2(0, 0, nil); err != nil {
fmt.Printf("Compact Volume before synchronizing %v\n", err)
return true
}
diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go
index 82821f579..9f18cc5b9 100644
--- a/weed/command/benchmark.go
+++ b/weed/command/benchmark.go
@@ -74,14 +74,14 @@ func init() {
var cmdBenchmark = &Command{
UsageLine: "benchmark -master=localhost:9333 -c=10 -n=100000",
- Short: "benchmark on writing millions of files and read out",
+ Short: "benchmark by writing millions of files and reading them out",
Long: `benchmark on an empty SeaweedFS file system.
Two tests during benchmark:
1) write lots of small files to the system
2) read the files out
- The file content is mostly zero, but no compression is done.
+ The file content is mostly zeros, but no compression is done.
You can choose to only benchmark read or write.
During write, the list of uploaded file ids is stored in "-list" specified file.
@@ -468,7 +468,7 @@ func (s *stats) printStats() {
timeTaken := float64(int64(s.end.Sub(s.start))) / 1000000000
fmt.Printf("\nConcurrency Level: %d\n", *b.concurrency)
fmt.Printf("Time taken for tests: %.3f seconds\n", timeTaken)
- fmt.Printf("Complete requests: %d\n", completed)
+ fmt.Printf("Completed requests: %d\n", completed)
fmt.Printf("Failed requests: %d\n", failed)
fmt.Printf("Total transferred: %d bytes\n", transferred)
fmt.Printf("Requests per second: %.2f [#/sec]\n", float64(completed)/timeTaken)
diff --git a/weed/command/filer.go b/weed/command/filer.go
index 0935feb76..42de11f08 100644
--- a/weed/command/filer.go
+++ b/weed/command/filer.go
@@ -86,6 +86,7 @@ func init() {
// start s3 on filer
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
filerS3Options.port = cmdFiler.Flag.Int("s3.port", 8333, "s3 server http listen port")
+ filerS3Options.portGrpc = cmdFiler.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port")
filerS3Options.domainName = cmdFiler.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}")
filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file")
filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
diff --git a/weed/command/s3.go b/weed/command/s3.go
index c28f3016e..42e447d90 100644
--- a/weed/command/s3.go
+++ b/weed/command/s3.go
@@ -4,11 +4,13 @@ import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "google.golang.org/grpc/reflection"
"net/http"
"time"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/gorilla/mux"
@@ -27,6 +29,7 @@ type S3Options struct {
filer *string
bindIp *string
port *int
+ portGrpc *int
config *string
domainName *string
tlsPrivateKey *string
@@ -43,6 +46,7 @@ func init() {
s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address")
s3StandaloneOptions.bindIp = cmdS3.Flag.String("ip.bind", "", "ip address to bind to. Default to localhost.")
s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port")
+ s3StandaloneOptions.portGrpc = cmdS3.Flag.Int("port.grpc", 0, "s3 server grpc listen port")
s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}")
s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file")
s3StandaloneOptions.auditLogConfig = cmdS3.Flag.String("auditLogConfig", "", "path to the audit log config file")
@@ -179,7 +183,7 @@ func (s3opt *S3Options) startS3Server() bool {
router := mux.NewRouter().SkipClean(true)
- _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
+ s3ApiServer, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
Filer: filerAddress,
Port: *s3opt.port,
Config: *s3opt.config,
@@ -196,6 +200,9 @@ func (s3opt *S3Options) startS3Server() bool {
httpS := &http.Server{Handler: router}
+ if *s3opt.portGrpc == 0 {
+ *s3opt.portGrpc = 10000 + *s3opt.port
+ }
if *s3opt.bindIp == "" {
*s3opt.bindIp = "localhost"
}
@@ -213,6 +220,20 @@ func (s3opt *S3Options) startS3Server() bool {
}
}
+ // starting grpc server
+ grpcPort := *s3opt.portGrpc
+ grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*s3opt.bindIp, grpcPort, 0)
+ if err != nil {
+ glog.Fatalf("s3 failed to listen on grpc port %d: %v", grpcPort, err)
+ }
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.s3"))
+ s3_pb.RegisterSeaweedS3Server(grpcS, s3ApiServer)
+ reflection.Register(grpcS)
+ if grpcLocalL != nil {
+ go grpcS.Serve(grpcLocalL)
+ }
+ go grpcS.Serve(grpcL)
+
if *s3opt.tlsPrivateKey != "" {
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
if s3ApiLocalListner != nil {
diff --git a/weed/command/scaffold/security.toml b/weed/command/scaffold/security.toml
index 38a803dd6..e5452cdff 100644
--- a/weed/command/scaffold/security.toml
+++ b/weed/command/scaffold/security.toml
@@ -67,6 +67,11 @@ cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
+[grpc.s3]
+cert = ""
+key = ""
+allowed_commonNames = "" # comma-separated SSL certificate common names
+
[grpc.msg_broker]
cert = ""
key = ""
diff --git a/weed/command/server.go b/weed/command/server.go
index d26376c1a..4b6b6c642 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -133,6 +133,7 @@ func init() {
serverOptions.v.enableTcp = cmdServer.Flag.Bool("volume.tcp", false, "<exprimental> enable tcp port")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
+ s3Options.portGrpc = cmdServer.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port")
s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}")
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
diff --git a/weed/pb/Makefile b/weed/pb/Makefile
index 954b4cb98..a8992bde2 100644
--- a/weed/pb/Makefile
+++ b/weed/pb/Makefile
@@ -9,6 +9,7 @@ gen:
protoc remote.proto --go_out=./remote_pb --go-grpc_out=./remote_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
protoc iam.proto --go_out=./iam_pb --go-grpc_out=./iam_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
protoc mount.proto --go_out=./mount_pb --go-grpc_out=./mount_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
+ protoc s3.proto --go_out=./s3_pb --go-grpc_out=./s3_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
protoc messaging.proto --go_out=./messaging_pb --go-grpc_out=./messaging_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
# protoc filer.proto --java_out=../../other/java/client/src/main/java
cp filer.proto ../../other/java/client/src/main/proto
diff --git a/weed/pb/s3.proto b/weed/pb/s3.proto
new file mode 100644
index 000000000..4f129b817
--- /dev/null
+++ b/weed/pb/s3.proto
@@ -0,0 +1,25 @@
+syntax = "proto3";
+
+package messaging_pb;
+
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/s3_pb";
+option java_package = "seaweedfs.client";
+option java_outer_classname = "S3Proto";
+
+//////////////////////////////////////////////////
+
+service SeaweedS3 {
+
+ rpc Configure (S3ConfigureRequest) returns (S3ConfigureResponse) {
+ }
+
+}
+
+//////////////////////////////////////////////////
+
+message S3ConfigureRequest {
+ bytes s3_configuration_file_content = 1;
+}
+
+message S3ConfigureResponse {
+}
diff --git a/weed/pb/s3_pb/s3.pb.go b/weed/pb/s3_pb/s3.pb.go
new file mode 100644
index 000000000..53f174f02
--- /dev/null
+++ b/weed/pb/s3_pb/s3.pb.go
@@ -0,0 +1,209 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.17.3
+// source: s3.proto
+
+package s3_pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type S3ConfigureRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ S3ConfigurationFileContent []byte `protobuf:"bytes,1,opt,name=s3_configuration_file_content,json=s3ConfigurationFileContent,proto3" json:"s3_configuration_file_content,omitempty"`
+}
+
+func (x *S3ConfigureRequest) Reset() {
+ *x = S3ConfigureRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_s3_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *S3ConfigureRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*S3ConfigureRequest) ProtoMessage() {}
+
+func (x *S3ConfigureRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_s3_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use S3ConfigureRequest.ProtoReflect.Descriptor instead.
+func (*S3ConfigureRequest) Descriptor() ([]byte, []int) {
+ return file_s3_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *S3ConfigureRequest) GetS3ConfigurationFileContent() []byte {
+ if x != nil {
+ return x.S3ConfigurationFileContent
+ }
+ return nil
+}
+
+type S3ConfigureResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *S3ConfigureResponse) Reset() {
+ *x = S3ConfigureResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_s3_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *S3ConfigureResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*S3ConfigureResponse) ProtoMessage() {}
+
+func (x *S3ConfigureResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_s3_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use S3ConfigureResponse.ProtoReflect.Descriptor instead.
+func (*S3ConfigureResponse) Descriptor() ([]byte, []int) {
+ return file_s3_proto_rawDescGZIP(), []int{1}
+}
+
+var File_s3_proto protoreflect.FileDescriptor
+
+var file_s3_proto_rawDesc = []byte{
+ 0x0a, 0x08, 0x73, 0x33, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, 0x57, 0x0a, 0x12, 0x53, 0x33, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41,
+ 0x0a, 0x1d, 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1a, 0x73, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x5f, 0x0a, 0x09, 0x53, 0x65, 0x61, 0x77,
+ 0x65, 0x65, 0x64, 0x53, 0x33, 0x12, 0x52, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70,
+ 0x62, 0x2e, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67,
+ 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x49, 0x0a, 0x10, 0x73, 0x65, 0x61,
+ 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x07, 0x53,
+ 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61,
+ 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x73,
+ 0x33, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_s3_proto_rawDescOnce sync.Once
+ file_s3_proto_rawDescData = file_s3_proto_rawDesc
+)
+
+func file_s3_proto_rawDescGZIP() []byte {
+ file_s3_proto_rawDescOnce.Do(func() {
+ file_s3_proto_rawDescData = protoimpl.X.CompressGZIP(file_s3_proto_rawDescData)
+ })
+ return file_s3_proto_rawDescData
+}
+
+var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_s3_proto_goTypes = []interface{}{
+ (*S3ConfigureRequest)(nil), // 0: messaging_pb.S3ConfigureRequest
+ (*S3ConfigureResponse)(nil), // 1: messaging_pb.S3ConfigureResponse
+}
+var file_s3_proto_depIdxs = []int32{
+ 0, // 0: messaging_pb.SeaweedS3.Configure:input_type -> messaging_pb.S3ConfigureRequest
+ 1, // 1: messaging_pb.SeaweedS3.Configure:output_type -> messaging_pb.S3ConfigureResponse
+ 1, // [1:2] is the sub-list for method output_type
+ 0, // [0:1] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_s3_proto_init() }
+func file_s3_proto_init() {
+ if File_s3_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_s3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*S3ConfigureRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_s3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*S3ConfigureResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_s3_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_s3_proto_goTypes,
+ DependencyIndexes: file_s3_proto_depIdxs,
+ MessageInfos: file_s3_proto_msgTypes,
+ }.Build()
+ File_s3_proto = out.File
+ file_s3_proto_rawDesc = nil
+ file_s3_proto_goTypes = nil
+ file_s3_proto_depIdxs = nil
+}
diff --git a/weed/pb/s3_pb/s3_grpc.pb.go b/weed/pb/s3_pb/s3_grpc.pb.go
new file mode 100644
index 000000000..1bc956be6
--- /dev/null
+++ b/weed/pb/s3_pb/s3_grpc.pb.go
@@ -0,0 +1,101 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+
+package s3_pb
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// SeaweedS3Client is the client API for SeaweedS3 service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type SeaweedS3Client interface {
+ Configure(ctx context.Context, in *S3ConfigureRequest, opts ...grpc.CallOption) (*S3ConfigureResponse, error)
+}
+
+type seaweedS3Client struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewSeaweedS3Client(cc grpc.ClientConnInterface) SeaweedS3Client {
+ return &seaweedS3Client{cc}
+}
+
+func (c *seaweedS3Client) Configure(ctx context.Context, in *S3ConfigureRequest, opts ...grpc.CallOption) (*S3ConfigureResponse, error) {
+ out := new(S3ConfigureResponse)
+ err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedS3/Configure", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// SeaweedS3Server is the server API for SeaweedS3 service.
+// All implementations must embed UnimplementedSeaweedS3Server
+// for forward compatibility
+type SeaweedS3Server interface {
+ Configure(context.Context, *S3ConfigureRequest) (*S3ConfigureResponse, error)
+ mustEmbedUnimplementedSeaweedS3Server()
+}
+
+// UnimplementedSeaweedS3Server must be embedded to have forward compatible implementations.
+type UnimplementedSeaweedS3Server struct {
+}
+
+func (UnimplementedSeaweedS3Server) Configure(context.Context, *S3ConfigureRequest) (*S3ConfigureResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented")
+}
+func (UnimplementedSeaweedS3Server) mustEmbedUnimplementedSeaweedS3Server() {}
+
+// UnsafeSeaweedS3Server may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to SeaweedS3Server will
+// result in compilation errors.
+type UnsafeSeaweedS3Server interface {
+ mustEmbedUnimplementedSeaweedS3Server()
+}
+
+func RegisterSeaweedS3Server(s grpc.ServiceRegistrar, srv SeaweedS3Server) {
+ s.RegisterService(&SeaweedS3_ServiceDesc, srv)
+}
+
+func _SeaweedS3_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(S3ConfigureRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedS3Server).Configure(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedS3/Configure",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedS3Server).Configure(ctx, req.(*S3ConfigureRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// SeaweedS3_ServiceDesc is the grpc.ServiceDesc for SeaweedS3 service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var SeaweedS3_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "messaging_pb.SeaweedS3",
+ HandlerType: (*SeaweedS3Server)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Configure",
+ Handler: _SeaweedS3_Configure_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "s3.proto",
+}
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
index 6a7d83919..53a55617f 100644
--- a/weed/s3api/auth_credentials.go
+++ b/weed/s3api/auth_credentials.go
@@ -91,7 +91,7 @@ func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3A
if err != nil {
return fmt.Errorf("read S3 config: %v", err)
}
- return iam.loadS3ApiConfigurationFromBytes(content)
+ return iam.LoadS3ApiConfigurationFromBytes(content)
}
func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error {
@@ -100,10 +100,10 @@ func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName str
glog.Warningf("fail to read %s : %v", fileName, readErr)
return fmt.Errorf("fail to read %s : %v", fileName, readErr)
}
- return iam.loadS3ApiConfigurationFromBytes(content)
+ return iam.LoadS3ApiConfigurationFromBytes(content)
}
-func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromBytes(content []byte) error {
+func (iam *IdentityAccessManagement) LoadS3ApiConfigurationFromBytes(content []byte) error {
s3ApiConfiguration := &iam_pb.S3ApiConfiguration{}
if err := filer.ParseS3ConfigurationFromBytes(content, s3ApiConfiguration); err != nil {
glog.Warningf("unmarshal error: %v", err)
diff --git a/weed/s3api/auth_credentials_subscribe.go b/weed/s3api/auth_credentials_subscribe.go
index bd0b1016d..2cea739c6 100644
--- a/weed/s3api/auth_credentials_subscribe.go
+++ b/weed/s3api/auth_credentials_subscribe.go
@@ -23,7 +23,7 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la
dir = message.NewParentPath
}
if dir == filer.IamConfigDirecotry && message.NewEntry.Name == filer.IamIdentityFile {
- if err := s3a.iam.loadS3ApiConfigurationFromBytes(message.NewEntry.Content); err != nil {
+ if err := s3a.iam.LoadS3ApiConfigurationFromBytes(message.NewEntry.Content); err != nil {
return err
}
glog.V(0).Infof("updated %s/%s", filer.IamConfigDirecotry, filer.IamIdentityFile)
diff --git a/weed/s3api/http/header.go b/weed/s3api/http/header.go
index d63d50443..30fc8eefa 100644
--- a/weed/s3api/http/header.go
+++ b/weed/s3api/http/header.go
@@ -28,11 +28,14 @@ const (
AmzStorageClass = "x-amz-storage-class"
// S3 user-defined metadata
- AmzUserMetaPrefix = "X-Amz-Meta-"
+ AmzUserMetaPrefix = "X-Amz-Meta-"
+ AmzUserMetaDirective = "X-Amz-Metadata-Directive"
// S3 object tagging
- AmzObjectTagging = "X-Amz-Tagging"
- AmzTagCount = "x-amz-tagging-count"
+ AmzObjectTagging = "X-Amz-Tagging"
+ AmzObjectTaggingPrefix = "X-Amz-Tagging-"
+ AmzObjectTaggingDirective = "X-Amz-Tagging-Directive"
+ AmzTagCount = "x-amz-tagging-count"
)
// Non-Standard S3 HTTP request constants
diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go
index f62db9c31..c44ca7ddf 100644
--- a/weed/s3api/s3api_object_copy_handlers.go
+++ b/weed/s3api/s3api_object_copy_handlers.go
@@ -3,9 +3,10 @@ package s3api
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
+ headers "github.com/chrislusf/seaweedfs/weed/s3api/http"
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
- weed_server "github.com/chrislusf/seaweedfs/weed/server"
+ "modernc.org/strutil"
"net/http"
"net/url"
"strconv"
@@ -15,6 +16,11 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
)
+const (
+ DirectiveCopy = "COPY"
+ DirectiveReplace = "REPLACE"
+)
+
func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
dstBucket, dstObject := xhttp.GetBucketAndObject(r)
@@ -30,7 +36,9 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
glog.V(3).Infof("CopyObjectHandler %s %s => %s %s", srcBucket, srcObject, dstBucket, dstObject)
- if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && isReplace(r) {
+ replaceMeta, replaceTagging := replaceDirective(r.Header)
+
+ if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && (replaceMeta || replaceTagging) {
fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject))
dir, name := fullPath.DirAndName()
entry, err := s3a.getEntry(dir, name)
@@ -38,7 +46,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return
}
- entry.Extended = weed_server.SaveAmzMetaData(r, entry.Extended, isReplace(r))
+ entry.Extended = processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging)
err = s3a.touch(dir, name, entry)
if err != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
@@ -80,6 +88,11 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
}
defer util.CloseResponse(resp)
+ tagErr := processMetadata(r.Header, resp.Header, replaceMeta, replaceTagging, s3a.getTags, dir, name)
+ if tagErr != nil {
+ s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
+ return
+ }
glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body)
@@ -182,6 +195,107 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
}
-func isReplace(r *http.Request) bool {
- return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE"
+func replaceDirective(reqHeader http.Header) (replaceMeta, replaceTagging bool) {
+ return reqHeader.Get(headers.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(headers.AmzObjectTaggingDirective) == DirectiveReplace
+}
+
+func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTagging bool, getTags func(parentDirectoryPath string, entryName string) (tags map[string]string, err error), dir, name string) (err error) {
+ if sc := reqHeader.Get(xhttp.AmzStorageClass); len(sc) == 0 {
+ if sc := existing[xhttp.AmzStorageClass]; len(sc) > 0 {
+ reqHeader[xhttp.AmzStorageClass] = sc
+ }
+ }
+
+ if !replaceMeta {
+ for header, _ := range reqHeader {
+ if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) {
+ delete(reqHeader, header)
+ }
+ }
+ for k, v := range existing {
+ if strings.HasPrefix(k, xhttp.AmzUserMetaPrefix) {
+ reqHeader[k] = v
+ }
+ }
+ }
+
+ if !replaceTagging {
+ for header, _ := range reqHeader {
+ if strings.HasPrefix(header, xhttp.AmzObjectTagging) {
+ delete(reqHeader, header)
+ }
+ }
+
+ found := false
+ for k, _ := range existing {
+ if strings.HasPrefix(k, xhttp.AmzObjectTaggingPrefix) {
+ found = true
+ break
+ }
+ }
+
+ if found {
+ tags, err := getTags(dir, name)
+ if err != nil {
+ return err
+ }
+
+ var tagArr []string
+ for k, v := range tags {
+ tagArr = append(tagArr, fmt.Sprintf("%s=%s", k, v))
+ }
+ tagStr := strutil.JoinFields(tagArr, "&")
+ reqHeader.Set(xhttp.AmzObjectTagging, tagStr)
+ }
+ }
+ return
+}
+
+func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, replaceMeta, replaceTagging bool) (metadata map[string][]byte) {
+ metadata = make(map[string][]byte)
+
+ if sc := existing[xhttp.AmzStorageClass]; len(sc) > 0 {
+ metadata[xhttp.AmzStorageClass] = sc
+ }
+ if sc := reqHeader.Get(xhttp.AmzStorageClass); len(sc) > 0 {
+ metadata[xhttp.AmzStorageClass] = []byte(sc)
+ }
+
+ if replaceMeta {
+ for header, values := range reqHeader {
+ if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) {
+ for _, value := range values {
+ metadata[header] = []byte(value)
+ }
+ }
+ }
+ } else {
+ for k, v := range existing {
+ if strings.HasPrefix(k, xhttp.AmzUserMetaPrefix) {
+ metadata[k] = v
+ }
+ }
+ }
+
+ if replaceTagging {
+ if tags := reqHeader.Get(xhttp.AmzObjectTagging); tags != "" {
+ for _, v := range strings.Split(tags, "&") {
+ tag := strings.Split(v, "=")
+ if len(tag) == 2 {
+ metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1])
+ } else if len(tag) == 1 {
+ metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = nil
+ }
+ }
+ }
+ } else {
+ for k, v := range existing {
+ if strings.HasPrefix(k, xhttp.AmzObjectTagging) {
+ metadata[k] = v
+ }
+ }
+ delete(metadata, xhttp.AmzTagCount)
+ }
+
+ return
}
diff --git a/weed/s3api/s3api_object_copy_handlers_test.go b/weed/s3api/s3api_object_copy_handlers_test.go
new file mode 100644
index 000000000..d2c8e488b
--- /dev/null
+++ b/weed/s3api/s3api_object_copy_handlers_test.go
@@ -0,0 +1,426 @@
+package s3api
+
+import (
+ "fmt"
+ headers "github.com/chrislusf/seaweedfs/weed/s3api/http"
+ "net/http"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+)
+
+type H map[string]string
+
+func (h H) String() string {
+ pairs := make([]string, 0, len(h))
+ for k, v := range h {
+ pairs = append(pairs, fmt.Sprintf("%s : %s", k, v))
+ }
+ sort.Strings(pairs)
+ join := strings.Join(pairs, "\n")
+ return "\n" + join + "\n"
+}
+
+var processMetadataTestCases = []struct {
+ caseId int
+ request H
+ existing H
+ getTags H
+ want H
+}{
+ {
+ 201,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-Type": "existing",
+ },
+ H{
+ "A": "B",
+ "a": "b",
+ "type": "existing",
+ },
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging": "A=B&a=b&type=existing",
+ },
+ },
+ {
+ 202,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-Type": "existing",
+ },
+ H{
+ "A": "B",
+ "a": "b",
+ "type": "existing",
+ },
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=existing",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ },
+ },
+
+ {
+ 203,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-Type": "existing",
+ },
+ H{
+ "A": "B",
+ "a": "b",
+ "type": "existing",
+ },
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ },
+
+ {
+ 204,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-Type": "existing",
+ },
+ H{
+ "A": "B",
+ "a": "b",
+ "type": "existing",
+ },
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ },
+
+ {
+ 205,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ H{},
+ H{},
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ },
+
+ {
+ 206,
+ H{
+ "User-Agent": "firefox",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-Type": "existing",
+ },
+ H{
+ "A": "B",
+ "a": "b",
+ "type": "existing",
+ },
+ H{
+ "User-Agent": "firefox",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ },
+
+ {
+ 207,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-Type": "existing",
+ },
+ H{
+ "A": "B",
+ "a": "b",
+ "type": "existing",
+ },
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ },
+}
+var processMetadataBytesTestCases = []struct {
+ caseId int
+ request H
+ existing H
+ want H
+}{
+ {
+ 101,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-type": "existing",
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-type": "existing",
+ },
+ },
+
+ {
+ 102,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-type": "existing",
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-type": "existing",
+ },
+ },
+
+ {
+ 103,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-type": "existing",
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-type": "request",
+ },
+ },
+
+ {
+ 104,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-type": "existing",
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-type": "request",
+ },
+ },
+
+ {
+ 105,
+ H{
+ "User-Agent": "firefox",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ H{
+ "X-Amz-Meta-My-Meta": "existing",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-type": "existing",
+ },
+ H{},
+ },
+
+ {
+ 107,
+ H{
+ "User-Agent": "firefox",
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging": "A=B&a=b&type=request",
+ headers.AmzUserMetaDirective: DirectiveReplace,
+ headers.AmzObjectTaggingDirective: DirectiveReplace,
+ },
+ H{},
+ H{
+ "X-Amz-Meta-My-Meta": "request",
+ "X-Amz-Tagging-A": "B",
+ "X-Amz-Tagging-a": "b",
+ "X-Amz-Tagging-type": "request",
+ },
+ },
+}
+
+func TestProcessMetadata(t *testing.T) {
+ for _, tc := range processMetadataTestCases {
+ reqHeader := transferHToHeader(tc.request)
+ existing := transferHToHeader(tc.existing)
+ replaceMeta, replaceTagging := replaceDirective(reqHeader)
+
+ err := processMetadata(reqHeader, existing, replaceMeta, replaceTagging, func(_ string, _ string) (tags map[string]string, err error) {
+ return tc.getTags, nil
+ }, "", "")
+ if err != nil {
+ t.Error(err)
+ }
+
+ result := transferHeaderToH(reqHeader)
+ fmtTagging(result, tc.want)
+
+ if !reflect.DeepEqual(result, tc.want) {
+ t.Error(fmt.Errorf("\n### CaseID: %d ###"+
+ "\nRequest:%v"+
+ "\nExisting:%v"+
+ "\nGetTags:%v"+
+ "\nWant:%v"+
+ "\nActual:%v",
+ tc.caseId, tc.request, tc.existing, tc.getTags, tc.want, result))
+ }
+ }
+}
+
+func TestProcessMetadataBytes(t *testing.T) {
+ for _, tc := range processMetadataBytesTestCases {
+ reqHeader := transferHToHeader(tc.request)
+ existing := transferHToBytesArr(tc.existing)
+ replaceMeta, replaceTagging := replaceDirective(reqHeader)
+ extends := processMetadataBytes(reqHeader, existing, replaceMeta, replaceTagging)
+
+ result := transferBytesArrToH(extends)
+ fmtTagging(result, tc.want)
+
+ if !reflect.DeepEqual(result, tc.want) {
+ t.Error(fmt.Errorf("\n### CaseID: %d ###"+
+ "\nRequest:%v"+
+ "\nExisting:%v"+
+ "\nWant:%v"+
+ "\nActual:%v",
+ tc.caseId, tc.request, tc.existing, tc.want, result))
+ }
+ }
+}
+
+func fmtTagging(maps ...map[string]string) {
+ for _, m := range maps {
+ if tagging := m[headers.AmzObjectTagging]; len(tagging) > 0 {
+ split := strings.Split(tagging, "&")
+ sort.Strings(split)
+ m[headers.AmzObjectTagging] = strings.Join(split, "&")
+ }
+ }
+}
+
+func transferHToHeader(data map[string]string) http.Header {
+ header := http.Header{}
+ for k, v := range data {
+ header.Add(k, v)
+ }
+ return header
+}
+
+func transferHToBytesArr(data map[string]string) map[string][]byte {
+ m := make(map[string][]byte, len(data))
+ for k, v := range data {
+ m[k] = []byte(v)
+ }
+ return m
+}
+
+func transferBytesArrToH(data map[string][]byte) H {
+ m := make(map[string]string, len(data))
+ for k, v := range data {
+ m[k] = string(v)
+ }
+ return m
+}
+
+func transferHeaderToH(data map[string][]string) H {
+ m := make(map[string]string, len(data))
+ for k, v := range data {
+ m[k] = v[len(v)-1]
+ }
+ return m
+}
diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go
index b0b8e27e4..657fa8171 100644
--- a/weed/s3api/s3api_server.go
+++ b/weed/s3api/s3api_server.go
@@ -3,6 +3,7 @@ package s3api
import (
"context"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
"net"
"net/http"
"strings"
@@ -31,6 +32,7 @@ type S3ApiServerOption struct {
}
type S3ApiServer struct {
+ s3_pb.UnimplementedSeaweedS3Server
option *S3ApiServerOption
iam *IdentityAccessManagement
randomClientId int32
diff --git a/weed/s3api/s3api_server_grpc.go b/weed/s3api/s3api_server_grpc.go
new file mode 100644
index 000000000..e93d0056f
--- /dev/null
+++ b/weed/s3api/s3api_server_grpc.go
@@ -0,0 +1,16 @@
+package s3api
+
+import (
+ "context"
+ "github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
+)
+
+func (s3a *S3ApiServer) Configure(ctx context.Context, request *s3_pb.S3ConfigureRequest) (*s3_pb.S3ConfigureResponse, error) {
+
+ if err := s3a.iam.LoadS3ApiConfigurationFromBytes(request.S3ConfigurationFileContent); err != nil {
+ return nil, err
+ }
+
+ return &s3_pb.S3ConfigureResponse{}, nil
+
+}
diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go
index c4bef5925..2afcd9cba 100644
--- a/weed/shell/command_volume_fix_replication.go
+++ b/weed/shell/command_volume_fix_replication.go
@@ -200,6 +200,17 @@ func (c *commandVolumeFixReplication) deleteOneVolume(commandEnv *CommandEnv, wr
}
}
+ collectionIsMismatch := false
+ for _, volumeReplica := range replicas {
+ if volumeReplica.info.Collection != replica.info.Collection {
+ fmt.Fprintf(writer, "skip delete volume %d as collection %s is mismatch: %s\n", replica.info.Id, replica.info.Collection, volumeReplica.info.Collection)
+ collectionIsMismatch = true
+ }
+ }
+ if collectionIsMismatch {
+ continue
+ }
+
fmt.Fprintf(writer, "deleting volume %d from %s ...\n", replica.info.Id, replica.location.dataNode.Id)
if !takeAction {
diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
index 584ce722b..2b1daf97c 100644
--- a/weed/shell/command_volume_fsck.go
+++ b/weed/shell/command_volume_fsck.go
@@ -68,7 +68,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
findMissingChunksInFiler := fsckCommand.Bool("findMissingChunksInFiler", false, "see \"help volume.fsck\"")
findMissingChunksInFilerPath := fsckCommand.String("findMissingChunksInFilerPath", "/", "used together with findMissingChunksInFiler")
findMissingChunksInVolumeId := fsckCommand.Int("findMissingChunksInVolumeId", 0, "used together with findMissingChunksInFiler")
- applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, "<expert only!> after detection, delete missing data from volumes / delete missing file entries from filer")
+ applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, "<expert only!> after detection, delete missing data from volumes / delete missing file entries from filer. Currently this only works with default filerGroup.")
c.forcePurging = fsckCommand.Bool("forcePurging", false, "delete missing data from volumes in one replica used together with applyPurging")
purgeAbsent := fsckCommand.Bool("reallyDeleteFilerEntries", false, "<expert only!> delete missing file entries from filer if the corresponding volume is missing for any reason, please ensure all still existing/expected volumes are connected! used together with findMissingChunksInFiler")
tempPath := fsckCommand.String("tempPath", path.Join(os.TempDir()), "path for temporary idx files")
diff --git a/weed/util/constants.go b/weed/util/constants.go
index 66bfca982..03d2b395e 100644
--- a/weed/util/constants.go
+++ b/weed/util/constants.go
@@ -5,7 +5,7 @@ import (
)
var (
- VERSION_NUMBER = fmt.Sprintf("%.02f", 3.02)
+ VERSION_NUMBER = fmt.Sprintf("%.02f", 3.04)
VERSION = sizeLimit + " " + VERSION_NUMBER
COMMIT = ""
)