diff options
25 files changed, 336 insertions, 162 deletions
diff --git a/.github/workflows/depsreview.yml b/.github/workflows/depsreview.yml index b84b27d15..e13bacb2a 100644 --- a/.github/workflows/depsreview.yml +++ b/.github/workflows/depsreview.yml @@ -11,4 +11,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@dcd71f646680f2efd8db4afa5ad64fdcba30e748 - name: 'Dependency Review' - uses: actions/dependency-review-action@1c59cdf2a9c7f29c90e8da32237eb04b81bad9f0 + uses: actions/dependency-review-action@94145f3150bfabdc97540cbd5f7e926306ea7744 @@ -10,7 +10,7 @@ require ( github.com/Azure/azure-storage-blob-go v0.15.0 github.com/OneOfOne/xxhash v1.2.8 github.com/Shopify/sarama v1.34.1 - github.com/aws/aws-sdk-go v1.44.51 + github.com/aws/aws-sdk-go v1.44.56 github.com/beorn7/perks v1.0.1 // indirect github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 github.com/bwmarrin/snowflake v0.3.0 @@ -37,7 +37,6 @@ require ( github.com/go-redis/redis/v8 v8.11.5 github.com/go-redsync/redsync/v4 v4.5.1 github.com/go-sql-driver/mysql v1.6.0 - github.com/go-stack/stack v1.8.1 // indirect github.com/go-zookeeper/zk v1.0.2 // indirect github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d github.com/golang-jwt/jwt v3.2.2+incompatible @@ -113,12 +112,12 @@ require ( github.com/xdg-go/stringprep v1.0.3 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect go.etcd.io/etcd/client/v3 v3.5.4 - go.mongodb.org/mongo-driver v1.9.1 + go.mongodb.org/mongo-driver v1.10.0 go.opencensus.io v0.23.0 // indirect gocloud.dev v0.25.0 gocloud.dev/pubsub/natspubsub v0.25.0 gocloud.dev/pubsub/rabbitpubsub v0.25.0 - golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd golang.org/x/image v0.0.0-20200119044424-58c23975cae1 golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e @@ -127,10 +126,10 @@ require ( golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect - google.golang.org/api v0.86.0 + google.golang.org/api v0.87.0 google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect - google.golang.org/grpc v1.47.0 + google.golang.org/grpc v1.48.0 google.golang.org/protobuf v1.28.0 gopkg.in/inf.v0 v0.9.1 // indirect modernc.org/b v1.0.0 // indirect @@ -154,7 +153,7 @@ require ( github.com/hashicorp/raft-boltdb v0.0.0-20220329195025-15018e9b97e0 github.com/tikv/client-go/v2 v2.0.1 github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 - github.com/ydb-platform/ydb-go-sdk/v3 v3.28.0 + github.com/ydb-platform/ydb-go-sdk/v3 v3.28.3 google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1 ) @@ -200,6 +199,7 @@ require ( github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-runewidth v0.0.7 // indirect github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect + github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d // indirect github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nuid v1.0.1 // indirect @@ -157,8 +157,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.44.51 h1:jO9hoLynZOrMM4dj0KjeKIK+c6PA+HQbKoHOkAEye2Y= -github.com/aws/aws-sdk-go v1.44.51/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.56 h1:bT+lExwagH7djxb6InKUVkEKGPAj5aAPnV85/m1fKro= +github.com/aws/aws-sdk-go v1.44.56/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA= github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= @@ -346,8 +346,6 @@ github.com/go-redsync/redsync/v4 v4.5.1/go.mod h1:AfhgO1E6W3rlUTs6Zmz/B6qBZJFasV github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= @@ -703,6 +701,7 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -910,10 +909,8 @@ github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlV github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e h1:9LPdmD1vqadsDQUva6t2O9MbnyvoOgo8nFNPaOIH5U8= @@ -924,8 +921,8 @@ github.com/ydb-platform/ydb-go-genproto v0.0.0-20220531094121-36ca6bddb9f7/go.mo github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 h1:EYSI1kulnHb0H0zt3yOw4cRj4ABMSMGwNe43D+fX7e4= github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2/go.mod h1:Xfjce+VMU9yJVr1lj60yK2fFPWjB4jr/4cp3K7cjzi4= github.com/ydb-platform/ydb-go-sdk/v3 v3.25.3/go.mod h1:PFizF/vJsdAgEwjK3DVSBD52kdmRkWfSIS2q2pA+e88= -github.com/ydb-platform/ydb-go-sdk/v3 v3.28.0 h1:F394Kkl+QPLrl0+fWpoSafdfgGqQCZOyzvXLxzVUWfs= -github.com/ydb-platform/ydb-go-sdk/v3 v3.28.0/go.mod h1:vXjmbeEAWlkVE5/ym3XHhtnWk7aDGGqFMKrfgwbRUkQ= +github.com/ydb-platform/ydb-go-sdk/v3 v3.28.3 h1:bD3Xfj8XUby/rbl6hUye96eKApSmNQ8/vYMImd6W9Dc= +github.com/ydb-platform/ydb-go-sdk/v3 v3.28.3/go.mod h1:bsYHcRuCdelVeIwNsJicIz60flewCwp8Kg9gfwMPR/Q= github.com/ydb-platform/ydb-go-yc v0.8.3 h1:92UUUMsfvtMl6mho8eQ9lbkiPrF3a9CT+RrVRAKNRwo= github.com/ydb-platform/ydb-go-yc v0.8.3/go.mod h1:zUolAFGzJ5XG8uwiseTLr9Lapm7L7hdVdZgLSuv9FXE= github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 h1:nMtixUijP0Z7iHJNT9fOL+dbmEzZxqU6Xk87ll7hqXg= @@ -947,8 +944,8 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3 go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o= go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= -go.mongodb.org/mongo-driver v1.9.1 h1:m078y9v7sBItkt1aaoe2YlvWEXcD263e1a4E1fBrJ1c= -go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -1006,7 +1003,6 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -1017,8 +1013,8 @@ golang.org/x/crypto v0.0.0-20211115234514-b4de73f9ece8/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1296,7 +1292,6 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1412,8 +1407,8 @@ google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6r google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.86.0 h1:ZAnyOHQFIuWso1BodVfSaRyffD74T9ERGFa3k1fNk/U= -google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.87.0 h1:pUQVF/F+X7Tl1lo4LJoJf5BOpjtmINU80p9XpYTU2p4= +google.golang.org/api v0.87.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1554,8 +1549,9 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5 google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b h1:NuxyvVZoDfHZwYW9LD4GJiF5/nhiSyP4/InTrvw9Ibk= google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b/go.mod h1:IBqQ7wSUJ2Ep09a8rMWFsg4fmI2r38zwsq8a0GgxXpM= diff --git a/k8s/helm_charts2/Chart.yaml b/k8s/helm_charts2/Chart.yaml index 2ce8c3db9..a92ba313e 100644 --- a/k8s/helm_charts2/Chart.yaml +++ b/k8s/helm_charts2/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -appVersion: "3.15" -version: "3.15" +appVersion: "3.16" +version: "3.16" diff --git a/weed/filer/filer.go b/weed/filer/filer.go index 86827c50e..15fe69116 100644 --- a/weed/filer/filer.go +++ b/weed/filer/filer.go @@ -79,7 +79,7 @@ func (f *Filer) MaybeBootstrapFromPeers(self pb.ServerAddress, existingNodes []* return } - glog.V(0).Infof("bootstrap from %v", earliestNode.Address) + glog.V(0).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFileId) err = pb.FollowMetadata(pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, "bootstrap", int32(f.UniqueFileId), "/", nil, 0, snapshotTime.UnixNano(), f.Signature, func(resp *filer_pb.SubscribeMetadataResponse) error { return Replay(f.Store, resp) diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go index 1a805bde3..5799e247e 100644 --- a/weed/filer/meta_aggregator.go +++ b/weed/filer/meta_aggregator.go @@ -7,6 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/util" "io" + "strings" "sync" "time" @@ -57,7 +58,7 @@ func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, star if update.IsAdd { // every filer should subscribe to a new filer if ma.setActive(address, true) { - go ma.loopSubscribeToOnefiler(ma.filer, ma.self, address, startFrom) + go ma.loopSubscribeToOneFiler(ma.filer, ma.self, address, startFrom) } } else { ma.setActive(address, false) @@ -89,17 +90,21 @@ func (ma *MetaAggregator) isActive(address pb.ServerAddress) (isActive bool) { return count > 0 && isActive } -func (ma *MetaAggregator) loopSubscribeToOnefiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time) { +func (ma *MetaAggregator) loopSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time) { lastTsNs := startFrom.UnixNano() for { - glog.V(0).Infof("loopSubscribeToOnefiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs) + glog.V(0).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs) nextLastTsNs, err := ma.doSubscribeToOneFiler(f, self, peer, lastTsNs) if !ma.isActive(peer) { glog.V(0).Infof("stop subscribing remote %s meta change", peer) return } if err != nil { - glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err) + errLvl := glog.Level(0) + if strings.Contains(err.Error(), "duplicated local subscription detected") { + errLvl = glog.Level(1) + } + glog.V(errLvl).Infof("subscribing remote %s meta change: %v", peer, err) } if lastTsNs < nextLastTsNs { lastTsNs = nextLastTsNs @@ -185,7 +190,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, return nil } - glog.V(4).Infof("subscribing remote %s meta change: %v", peer, time.Unix(0, lastTsNs)) + glog.V(0).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFileId) err = pb.WithFilerClient(true, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/weed/filer/reader_cache.go b/weed/filer/reader_cache.go index c319f6c78..4f375e764 100644 --- a/weed/filer/reader_cache.go +++ b/weed/filer/reader_cache.go @@ -18,7 +18,7 @@ type ReaderCache struct { } type SingleChunkCacher struct { - sync.RWMutex + sync.Mutex cond *sync.Cond parent *ReaderCache chunkFileId string @@ -76,7 +76,9 @@ func (rc *ReaderCache) ReadChunkAt(buffer []byte, fileId string, cipherKey []byt rc.Lock() defer rc.Unlock() if cacher, found := rc.downloaders[fileId]; found { - return cacher.readChunkAt(buffer, offset) + if n, err := cacher.readChunkAt(buffer, offset); n != 0 && err == nil { + return n, err + } } if shouldCache || rc.lookupFileIdFn == nil { n, err := rc.chunkCache.ReadChunkAt(buffer, fileId, uint64(offset)) @@ -176,6 +178,9 @@ func (s *SingleChunkCacher) startCaching() { } func (s *SingleChunkCacher) destroy() { + s.Lock() + defer s.Unlock() + if s.data != nil { mem.Free(s.data) s.data = nil @@ -183,8 +188,8 @@ func (s *SingleChunkCacher) destroy() { } func (s *SingleChunkCacher) readChunkAt(buf []byte, offset int64) (int, error) { - s.RLock() - defer s.RUnlock() + s.Lock() + defer s.Unlock() for s.completedTime.IsZero() { s.cond.Wait() @@ -194,6 +199,10 @@ func (s *SingleChunkCacher) readChunkAt(buf []byte, offset int64) (int, error) { return 0, s.err } + if len(s.data) == 0 { + return 0, nil + } + return copy(buf, s.data[offset:]), nil } diff --git a/weed/filer/reader_pattern.go b/weed/filer/reader_pattern.go index b860bc577..ec73c59a2 100644 --- a/weed/filer/reader_pattern.go +++ b/weed/filer/reader_pattern.go @@ -1,8 +1,8 @@ package filer type ReaderPattern struct { - isStreaming bool - lastReadOffset int64 + isSequentialCounter int64 + lastReadStopOffset int64 } // For streaming read: only cache the first chunk @@ -10,29 +10,20 @@ type ReaderPattern struct { func NewReaderPattern() *ReaderPattern { return &ReaderPattern{ - isStreaming: true, - lastReadOffset: -1, + isSequentialCounter: 0, + lastReadStopOffset: 0, } } func (rp *ReaderPattern) MonitorReadAt(offset int64, size int) { - isStreaming := true - if rp.lastReadOffset > offset { - isStreaming = false + if rp.lastReadStopOffset == offset { + rp.isSequentialCounter++ + } else { + rp.isSequentialCounter-- } - if rp.lastReadOffset == -1 { - if offset != 0 { - isStreaming = false - } - } - rp.lastReadOffset = offset - rp.isStreaming = isStreaming -} - -func (rp *ReaderPattern) IsStreamingMode() bool { - return rp.isStreaming + rp.lastReadStopOffset = offset + int64(size) } func (rp *ReaderPattern) IsRandomMode() bool { - return !rp.isStreaming + return rp.isSequentialCounter >= 0 } diff --git a/weed/filer/s3iam_conf.go b/weed/filer/s3iam_conf.go index 891bf925b..d8f3c2445 100644 --- a/weed/filer/s3iam_conf.go +++ b/weed/filer/s3iam_conf.go @@ -2,9 +2,12 @@ package filer import ( "bytes" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" - "io" ) func ParseS3ConfigurationFromBytes[T proto.Message](content []byte, config T) error { @@ -23,3 +26,18 @@ func ProtoToText(writer io.Writer, config proto.Message) error { return m.Marshal(writer, config) } + +// CheckDuplicateAccessKey returns an error message when s3cfg has duplicate access keys +func CheckDuplicateAccessKey(s3cfg *iam_pb.S3ApiConfiguration) error { + accessKeySet := make(map[string]string) + for _, ident := range s3cfg.Identities { + for _, cred := range ident.Credentials { + if userName, found := accessKeySet[cred.AccessKey]; !found { + accessKeySet[cred.AccessKey] = ident.Name + } else { + return fmt.Errorf("duplicate accessKey[%s], already configured in user[%s]", cred.AccessKey, userName) + } + } + } + return nil +} diff --git a/weed/filer/s3iam_conf_test.go b/weed/filer/s3iam_conf_test.go index da7d9c9f1..bd9eb85ae 100644 --- a/weed/filer/s3iam_conf_test.go +++ b/weed/filer/s3iam_conf_test.go @@ -2,9 +2,10 @@ package filer import ( "bytes" - . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "testing" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" "github.com/stretchr/testify/assert" @@ -55,3 +56,93 @@ func TestS3Conf(t *testing.T) { assert.Equal(t, "some_access_key1", s3ConfSaved.Identities[0].Credentials[0].AccessKey) assert.Equal(t, "some_secret_key2", s3ConfSaved.Identities[1].Credentials[0].SecretKey) } + +func TestCheckDuplicateAccessKey(t *testing.T) { + var tests = []struct { + s3cfg *iam_pb.S3ApiConfiguration + err string + }{ + { + &iam_pb.S3ApiConfiguration{ + Identities: []*iam_pb.Identity{ + { + Name: "some_name", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + }, + }, + Actions: []string{ + ACTION_ADMIN, + ACTION_READ, + ACTION_WRITE, + }, + }, + { + Name: "some_read_only_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key2", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_READ, + ACTION_TAGGING, + ACTION_LIST, + }, + }, + }, + }, + "", + }, + { + &iam_pb.S3ApiConfiguration{ + Identities: []*iam_pb.Identity{ + { + Name: "some_name", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + }, + }, + Actions: []string{ + ACTION_ADMIN, + ACTION_READ, + ACTION_WRITE, + }, + }, + { + Name: "some_read_only_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + }, + }, + Actions: []string{ + ACTION_READ, + ACTION_TAGGING, + ACTION_LIST, + }, + }, + }, + }, + "duplicate accessKey[some_access_key1], already configured in user[some_name]", + }, + } + for i, test := range tests { + err := CheckDuplicateAccessKey(test.s3cfg) + var errString string + if err == nil { + errString = "" + } else { + errString = err.Error() + } + if errString != test.err { + t.Errorf("[%d]: got: %s expected: %s", i, errString, test.err) + } + } +} diff --git a/weed/filer/stream.go b/weed/filer/stream.go index 7da9fd0a0..d1b66e88d 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -18,6 +18,12 @@ import ( "github.com/chrislusf/seaweedfs/weed/wdclient" ) +var getLookupFileIdBackoffSchedule = []time.Duration{ + 150 * time.Millisecond, + 600 * time.Millisecond, + 1800 * time.Millisecond, +} + func HasData(entry *filer_pb.Entry) bool { if len(entry.Content) > 0 { @@ -69,14 +75,22 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, writer io.Writ fileId2Url := make(map[string][]string) for _, chunkView := range chunkViews { - - urlStrings, err := masterClient.GetLookupFileIdFunction()(chunkView.FileId) + var urlStrings []string + var err error + for _, backoff := range getLookupFileIdBackoffSchedule { + urlStrings, err = masterClient.GetLookupFileIdFunction()(chunkView.FileId) + if err == nil && len(urlStrings) > 0 { + time.Sleep(backoff) + break + } + } if err != nil { glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err } else if len(urlStrings) == 0 { - glog.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) - return fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) + errUrlNotFound := fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) + glog.Error(errUrlNotFound) + return errUrlNotFound } fileId2Url[chunkView.FileId] = urlStrings } diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go index e1f215bd3..8a42aa936 100644 --- a/weed/iamapi/iamapi_management_handlers.go +++ b/weed/iamapi/iamapi_management_handlers.go @@ -219,8 +219,16 @@ func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values if userName != ident.Name { continue } + + existedActions := make(map[string]bool, len(ident.Actions)) + for _, action := range ident.Actions { + existedActions[action] = true + } + for _, action := range actions { - ident.Actions = append(ident.Actions, action) + if !existedActions[action] { + ident.Actions = append(ident.Actions, action) + } } return resp, nil } @@ -349,7 +357,8 @@ func (iama *IamApiServer) CreateAccessKey(s3cfg *iam_pb.S3ApiConfiguration, valu } if !changed { s3cfg.Identities = append(s3cfg.Identities, - &iam_pb.Identity{Name: userName, + &iam_pb.Identity{ + Name: userName, Credentials: []*iam_pb.Credential{ { AccessKey: accessKeyId, diff --git a/weed/mount/page_writer.go b/weed/mount/page_writer.go index 016c4841a..7e3db8e28 100644 --- a/weed/mount/page_writer.go +++ b/weed/mount/page_writer.go @@ -29,14 +29,14 @@ func newPageWriter(fh *FileHandle, chunkSize int64) *PageWriter { return pw } -func (pw *PageWriter) AddPage(offset int64, data []byte, isSequentail bool) { +func (pw *PageWriter) AddPage(offset int64, data []byte, isSequential bool) { glog.V(4).Infof("%v AddPage [%d, %d)", pw.fh.fh, offset, offset+int64(len(data))) chunkIndex := offset / pw.chunkSize for i := chunkIndex; len(data) > 0; i++ { writeSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset) - pw.addToOneChunk(i, offset, data[:writeSize], isSequentail) + pw.addToOneChunk(i, offset, data[:writeSize], isSequential) offset += writeSize data = data[writeSize:] } diff --git a/weed/mount/page_writer_pattern.go b/weed/mount/page_writer_pattern.go index 665056b36..1ec9c9d4c 100644 --- a/weed/mount/page_writer_pattern.go +++ b/weed/mount/page_writer_pattern.go @@ -1,9 +1,9 @@ package mount type WriterPattern struct { - isStreaming bool - lastWriteOffset int64 - chunkSize int64 + isSequentialCounter int64 + lastWriteStopOffset int64 + chunkSize int64 } // For streaming write: only cache the first chunk @@ -12,33 +12,21 @@ type WriterPattern struct { func NewWriterPattern(chunkSize int64) *WriterPattern { return &WriterPattern{ - isStreaming: true, - lastWriteOffset: -1, - chunkSize: chunkSize, + isSequentialCounter: 0, + lastWriteStopOffset: 0, + chunkSize: chunkSize, } } func (rp *WriterPattern) MonitorWriteAt(offset int64, size int) { - if rp.lastWriteOffset > offset { - rp.isStreaming = false + if rp.lastWriteStopOffset == offset { + rp.isSequentialCounter++ + } else { + rp.isSequentialCounter-- } - if rp.lastWriteOffset == -1 { - if offset != 0 { - rp.isStreaming = false - } - } - rp.lastWriteOffset = offset -} - -func (rp *WriterPattern) IsStreamingMode() bool { - return rp.isStreaming -} - -func (rp *WriterPattern) IsRandomMode() bool { - return !rp.isStreaming + rp.lastWriteStopOffset = offset + int64(size) } -func (rp *WriterPattern) Reset() { - rp.isStreaming = true - rp.lastWriteOffset = -1 +func (rp *WriterPattern) IsSequentialMode() bool { + return rp.isSequentialCounter >= 0 } diff --git a/weed/mount/weedfs_file_write.go b/weed/mount/weedfs_file_write.go index d14680752..2b7a6cea2 100644 --- a/weed/mount/weedfs_file_write.go +++ b/weed/mount/weedfs_file_write.go @@ -58,7 +58,7 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr entry.Attributes.FileSize = uint64(max(offset+int64(len(data)), int64(entry.Attributes.FileSize))) // glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data)) - fh.dirtyPages.AddPage(offset, data, fh.dirtyPages.writerPattern.IsStreamingMode()) + fh.dirtyPages.AddPage(offset, data, fh.dirtyPages.writerPattern.IsSequentialMode()) written = uint32(len(data)) diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go index fb23d9ce9..f9e97ea22 100644 --- a/weed/s3api/auth_credentials.go +++ b/weed/s3api/auth_credentials.go @@ -109,6 +109,11 @@ func (iam *IdentityAccessManagement) LoadS3ApiConfigurationFromBytes(content []b glog.Warningf("unmarshal error: %v", err) return fmt.Errorf("unmarshal error: %v", err) } + + if err := filer.CheckDuplicateAccessKey(s3ApiConfiguration); err != nil { + return err + } + if err := iam.loadS3ApiConfiguration(s3ApiConfiguration); err != nil { return err } diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 4ad3454ba..e49d613c4 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -445,9 +445,9 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader func setEtag(w http.ResponseWriter, etag string) { if etag != "" { if strings.HasPrefix(etag, "\"") { - w.Header().Set("ETag", etag) + w.Header()["ETag"] = []string{etag} } else { - w.Header().Set("ETag", "\""+etag+"\"") + w.Header()["ETag"] = []string{"\"" + etag + "\""} } } } diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go index 745379e7c..82261ca51 100644 --- a/weed/server/filer_grpc_server_sub_meta.go +++ b/weed/server/filer_grpc_server_sub_meta.go @@ -90,14 +90,20 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq peerAddress := findClientAddress(stream.Context(), 0) + // use negative client id to differentiate from addClient()/deleteClient() used in SubscribeMetadata() + req.ClientId = -req.ClientId + alreadyKnown, clientName := fs.addClient(req.ClientName, peerAddress, req.ClientId) if alreadyKnown { - return fmt.Errorf("duplicated local subscription detected for client %s id %d", clientName, req.ClientId) + return fmt.Errorf("duplicated local subscription detected for client %s clientId:%d", clientName, req.ClientId) } - defer fs.deleteClient(clientName, req.ClientId) + defer func() { + glog.V(0).Infof(" - %v local subscribe %s clientId:%d", clientName, req.PathPrefix, req.ClientId) + fs.deleteClient(clientName, req.ClientId) + }() lastReadTime := time.Unix(0, req.SinceNs) - glog.V(0).Infof(" %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + glog.V(0).Infof(" + %v local subscribe %s from %+v clientId:%d", clientName, req.PathPrefix, lastReadTime, req.ClientId) eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName) diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 9bf840f08..0fdc3944f 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -65,8 +65,8 @@ type MasterServer struct { boundedLeaderChan chan int - onPeerUpdatDoneCn chan string - onPeerUpdatDoneCnExist bool + onPeerUpdateDoneCn chan string + onPeerUpdateDoneCnExist bool // notifying clients clientChansLock sync.RWMutex @@ -118,7 +118,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers map[string]pb.Se Cluster: cluster.NewCluster(), } ms.boundedLeaderChan = make(chan int, 16) - ms.onPeerUpdatDoneCn = make(chan string) + ms.onPeerUpdateDoneCn = make(chan string) ms.MasterClient.OnPeerUpdate = ms.OnPeerUpdate @@ -366,14 +366,15 @@ func (ms *MasterServer) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startF hashicorpRaft.ServerAddress(peerAddress.ToGrpcAddress()), 0, 0) } } - if ms.onPeerUpdatDoneCnExist { - ms.onPeerUpdatDoneCn <- peerName + if ms.onPeerUpdateDoneCnExist { + ms.onPeerUpdateDoneCn <- peerName } } else if isLeader { go func(peerName string) { + raftServerRemovalTimeAfter := time.After(RaftServerRemovalTime) for { select { - case <-time.After(RaftServerRemovalTime): + case <-raftServerRemovalTimeAfter: err := ms.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { _, err := client.RaftRemoveServer(context.Background(), &master_pb.RaftRemoveServerRequest{ Id: peerName, @@ -384,14 +385,16 @@ func (ms *MasterServer) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startF if err != nil { glog.Warningf("failed to removing old raft server %s: %v", peerName, err) } + glog.V(0).Infof("old raft server %s removed", peerName) return - case peerDone := <-ms.onPeerUpdatDoneCn: + case peerDone := <-ms.onPeerUpdateDoneCn: if peerName == peerDone { + glog.V(0).Infof("raft server %s remove canceled", peerName) return } } } }(peerName) - ms.onPeerUpdatDoneCnExist = true + ms.onPeerUpdateDoneCnExist = true } } diff --git a/weed/shell/command_s3_configure.go b/weed/shell/command_s3_configure.go index ddcafd847..422df2e75 100644 --- a/weed/shell/command_s3_configure.go +++ b/weed/shell/command_s3_configure.go @@ -4,11 +4,12 @@ import ( "bytes" "flag" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer" "io" "sort" "strings" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" ) @@ -164,6 +165,10 @@ func (c *commandS3Configure) Do(args []string, commandEnv *CommandEnv, writer io s3cfg.Identities = append(s3cfg.Identities, &identity) } + if err = filer.CheckDuplicateAccessKey(s3cfg); err != nil { + return err + } + buf.Reset() filer.ProtoToText(&buf, s3cfg) diff --git a/weed/shell/command_volume_server_evacuate.go b/weed/shell/command_volume_server_evacuate.go index ffbee0302..f72d73230 100644 --- a/weed/shell/command_volume_server_evacuate.go +++ b/weed/shell/command_volume_server_evacuate.go @@ -18,7 +18,9 @@ func init() { } type commandVolumeServerEvacuate struct { + topologyInfo *master_pb.TopologyInfo targetServer string + volumeRack string } func (c *commandVolumeServerEvacuate) Name() string { @@ -47,7 +49,8 @@ func (c *commandVolumeServerEvacuate) Do(args []string, commandEnv *CommandEnv, vsEvacuateCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) volumeServer := vsEvacuateCommand.String("node", "", "<host>:<port> of the volume server") - c.targetServer = *vsEvacuateCommand.String("target", "", "<host>:<port> of target volume") + volumeRack := vsEvacuateCommand.String("rack", "", "source rack for the volume servers") + targetServer := vsEvacuateCommand.String("target", "", "<host>:<port> of target volume") skipNonMoveable := vsEvacuateCommand.Bool("skipNonMoveable", false, "skip volumes that can not be moved") applyChange := vsEvacuateCommand.Bool("force", false, "actually apply the changes") retryCount := vsEvacuateCommand.Int("retry", 0, "how many times to retry") @@ -56,12 +59,18 @@ func (c *commandVolumeServerEvacuate) Do(args []string, commandEnv *CommandEnv, } infoAboutSimulationMode(writer, *applyChange, "-force") - if err = commandEnv.confirmIsLocked(args); err != nil { + if err = commandEnv.confirmIsLocked(args); err != nil && *applyChange { return } - if *volumeServer == "" { - return fmt.Errorf("need to specify volume server by -node=<host>:<port>") + if *volumeServer == "" && *volumeRack == "" { + return fmt.Errorf("need to specify volume server by -node=<host>:<port> or source rack") + } + if *targetServer != "" { + c.targetServer = *targetServer + } + if *volumeRack != "" { + c.volumeRack = *volumeRack } for i := 0; i < *retryCount+1; i++ { if err = c.volumeServerEvacuate(commandEnv, *volumeServer, *skipNonMoveable, *applyChange, writer); err == nil { @@ -80,44 +89,59 @@ func (c *commandVolumeServerEvacuate) volumeServerEvacuate(commandEnv *CommandEn // list all the volumes // collect topology information - topologyInfo, _, err := collectTopologyInfo(commandEnv, 0) + c.topologyInfo, _, err = collectTopologyInfo(commandEnv, 0) if err != nil { return err } - if err := c.evacuateNormalVolumes(commandEnv, topologyInfo, volumeServer, skipNonMoveable, applyChange, writer); err != nil { + if err := c.evacuateNormalVolumes(commandEnv, volumeServer, skipNonMoveable, applyChange, writer); err != nil { return err } - if err := c.evacuateEcVolumes(commandEnv, topologyInfo, volumeServer, skipNonMoveable, applyChange, writer); err != nil { + if err := c.evacuateEcVolumes(commandEnv, volumeServer, skipNonMoveable, applyChange, writer); err != nil { return err } return nil } -func (c *commandVolumeServerEvacuate) evacuateNormalVolumes(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error { +func (c *commandVolumeServerEvacuate) evacuateNormalVolumes(commandEnv *CommandEnv, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error { // find this volume server - volumeServers := collectVolumeServersByDc(topologyInfo, "") - thisNode, otherNodes := nodesOtherThan(volumeServers, volumeServer) - if thisNode == nil { + volumeServers := collectVolumeServersByDc(c.topologyInfo, "") + thisNodes, otherNodes := c.nodesOtherThan(volumeServers, volumeServer) + if len(thisNodes) == 0 { return fmt.Errorf("%s is not found in this cluster", volumeServer) } // move away normal volumes - volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo) - for _, diskInfo := range thisNode.info.DiskInfos { - for _, vol := range diskInfo.VolumeInfos { - hasMoved, err := moveAwayOneNormalVolume(commandEnv, volumeReplicas, vol, thisNode, otherNodes, applyChange) - if err != nil { - return fmt.Errorf("move away volume %d from %s: %v", vol.Id, volumeServer, err) - } - if !hasMoved { - if skipNonMoveable { - replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(vol.ReplicaPlacement)) - fmt.Fprintf(writer, "skipping non moveable volume %d replication:%s\n", vol.Id, replicaPlacement.String()) + for _, thisNode := range thisNodes { + for _, diskInfo := range thisNode.info.DiskInfos { + if applyChange { + if topologyInfo, _, err := collectTopologyInfo(commandEnv, 0); err != nil { + fmt.Fprintf(writer, "update topologyInfo %v", err) } else { - return fmt.Errorf("failed to move volume %d from %s", vol.Id, volumeServer) + _, otherNodesNew := c.nodesOtherThan( + collectVolumeServersByDc(topologyInfo, ""), volumeServer) + if len(otherNodesNew) > 0 { + otherNodes = otherNodesNew + c.topologyInfo = topologyInfo + fmt.Fprintf(writer, "topologyInfo updated %v\n", len(otherNodes)) + } + } + } + volumeReplicas, _ := collectVolumeReplicaLocations(c.topologyInfo) + for _, vol := range diskInfo.VolumeInfos { + hasMoved, err := moveAwayOneNormalVolume(commandEnv, volumeReplicas, vol, thisNode, otherNodes, applyChange) + if err != nil { + fmt.Fprintf(writer, "move away volume %d from %s: %v", vol.Id, volumeServer, err) + } + if !hasMoved { + if skipNonMoveable { + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(vol.ReplicaPlacement)) + fmt.Fprintf(writer, "skipping non moveable volume %d replication:%s\n", vol.Id, replicaPlacement.String()) + } else { + return fmt.Errorf("failed to move volume %d from %s", vol.Id, volumeServer) + } } } } @@ -125,26 +149,28 @@ func (c *commandVolumeServerEvacuate) evacuateNormalVolumes(commandEnv *CommandE return nil } -func (c *commandVolumeServerEvacuate) evacuateEcVolumes(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error { +func (c *commandVolumeServerEvacuate) evacuateEcVolumes(commandEnv *CommandEnv, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error { // find this ec volume server - ecNodes, _ := collectEcVolumeServersByDc(topologyInfo, "") - thisNode, otherNodes := ecNodesOtherThan(ecNodes, volumeServer) - if thisNode == nil { + ecNodes, _ := collectEcVolumeServersByDc(c.topologyInfo, "") + thisNodes, otherNodes := c.ecNodesOtherThan(ecNodes, volumeServer) + if len(thisNodes) == 0 { return fmt.Errorf("%s is not found in this cluster\n", volumeServer) } // move away ec volumes - for _, diskInfo := range thisNode.info.DiskInfos { - for _, ecShardInfo := range diskInfo.EcShardInfos { - hasMoved, err := c.moveAwayOneEcVolume(commandEnv, ecShardInfo, thisNode, otherNodes, applyChange) - if err != nil { - return fmt.Errorf("move away volume %d from %s: %v", ecShardInfo.Id, volumeServer, err) - } - if !hasMoved { - if skipNonMoveable { - fmt.Fprintf(writer, "failed to move away ec volume %d from %s\n", ecShardInfo.Id, volumeServer) - } else { - return fmt.Errorf("failed to move away ec volume %d from %s", ecShardInfo.Id, volumeServer) + for _, thisNode := range thisNodes { + for _, diskInfo := range thisNode.info.DiskInfos { + for _, ecShardInfo := range diskInfo.EcShardInfos { + hasMoved, err := c.moveAwayOneEcVolume(commandEnv, ecShardInfo, thisNode, otherNodes, applyChange) + if err != nil { + fmt.Fprintf(writer, "move away volume %d from %s: %v", ecShardInfo.Id, volumeServer, err) + } + if !hasMoved { + if skipNonMoveable { + fmt.Fprintf(writer, "failed to move away ec volume %d from %s\n", ecShardInfo.Id, volumeServer) + } else { + return fmt.Errorf("failed to move away ec volume %d from %s", ecShardInfo.Id, volumeServer) + } } } } @@ -160,9 +186,6 @@ func (c *commandVolumeServerEvacuate) moveAwayOneEcVolume(commandEnv *CommandEnv }) for i := 0; i < len(otherNodes); i++ { emptyNode := otherNodes[i] - if c.targetServer != "" && c.targetServer != emptyNode.info.Id { - continue - } collectionPrefix := "" if ecShardInfo.Collection != "" { collectionPrefix = ecShardInfo.Collection + "_" @@ -207,10 +230,16 @@ func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][ return } -func nodesOtherThan(volumeServers []*Node, thisServer string) (thisNode *Node, otherNodes []*Node) { +func (c *commandVolumeServerEvacuate) nodesOtherThan(volumeServers []*Node, thisServer string) (thisNodes []*Node, otherNodes []*Node) { for _, node := range volumeServers { - if node.info.Id == thisServer { - thisNode = node + if node.info.Id == thisServer || (c.volumeRack != "" && node.rack == c.volumeRack) { + thisNodes = append(thisNodes, node) + continue + } + if c.volumeRack != "" && c.volumeRack == node.rack { + continue + } + if c.targetServer != "" && c.targetServer != node.info.Id { continue } otherNodes = append(otherNodes, node) @@ -218,10 +247,16 @@ func nodesOtherThan(volumeServers []*Node, thisServer string) (thisNode *Node, o return } -func ecNodesOtherThan(volumeServers []*EcNode, thisServer string) (thisNode *EcNode, otherNodes []*EcNode) { +func (c *commandVolumeServerEvacuate) ecNodesOtherThan(volumeServers []*EcNode, thisServer string) (thisNodes []*EcNode, otherNodes []*EcNode) { for _, node := range volumeServers { - if node.info.Id == thisServer { - thisNode = node + if node.info.Id == thisServer || (c.volumeRack != "" && string(node.rack) == c.volumeRack) { + thisNodes = append(thisNodes, node) + continue + } + if c.volumeRack != "" && c.volumeRack == string(node.rack) { + continue + } + if c.targetServer != "" && c.targetServer != node.info.Id { continue } otherNodes = append(otherNodes, node) diff --git a/weed/shell/command_volume_server_evacuate_test.go b/weed/shell/command_volume_server_evacuate_test.go index 2cdb94a60..4563f38ba 100644 --- a/weed/shell/command_volume_server_evacuate_test.go +++ b/weed/shell/command_volume_server_evacuate_test.go @@ -6,12 +6,11 @@ import ( ) func TestVolumeServerEvacuate(t *testing.T) { - topologyInfo := parseOutput(topoData) + c := commandVolumeServerEvacuate{} + c.topologyInfo = parseOutput(topoData) volumeServer := "192.168.1.4:8080" - - c := commandVolumeServerEvacuate{} - if err := c.evacuateNormalVolumes(nil, topologyInfo, volumeServer, true, false, os.Stdout); err != nil { + if err := c.evacuateNormalVolumes(nil, volumeServer, true, false, os.Stdout); err != nil { t.Errorf("evacuate: %v", err) } diff --git a/weed/util/constants.go b/weed/util/constants.go index 8438624bf..b6bb3810c 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,7 +5,7 @@ import ( ) var ( - VERSION_NUMBER = fmt.Sprintf("%.02f", 3.15) + VERSION_NUMBER = fmt.Sprintf("%.02f", 3.16) VERSION = sizeLimit + " " + VERSION_NUMBER COMMIT = "" ) diff --git a/weed/util/retry.go b/weed/util/retry.go index b7cd278b3..892341dc1 100644 --- a/weed/util/retry.go +++ b/weed/util/retry.go @@ -32,7 +32,7 @@ func Retry(name string, job func() error) (err error) { return err } -func RetryForever(name string, job func() error, onErrFn func(err error) bool) { +func RetryForever(name string, job func() error, onErrFn func(err error) (shouldContinue bool)) { waitTime := time.Second for { err := job() diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 3e76dc0c5..d6a06bb57 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -2,6 +2,7 @@ package wdclient import ( "context" + "fmt" "github.com/chrislusf/seaweedfs/weed/stats" "math/rand" "time" @@ -44,7 +45,7 @@ func (mc *MasterClient) GetLookupFileIdFunction() LookupFileIdFunctionType { func (mc *MasterClient) LookupFileIdWithFallback(fileId string) (fullUrls []string, err error) { fullUrls, err = mc.vidMap.LookupFileId(fileId) - if err == nil { + if err == nil && len(fullUrls) > 0 { return } err = pb.WithMasterClient(false, mc.currentMaster, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { @@ -52,7 +53,7 @@ func (mc *MasterClient) LookupFileIdWithFallback(fileId string) (fullUrls []stri VolumeOrFileIds: []string{fileId}, }) if err != nil { - return err + return fmt.Errorf("LookupVolume failed: %v", err) } for vid, vidLocation := range resp.VolumeIdLocations { for _, vidLoc := range vidLocation.Locations { @@ -65,7 +66,6 @@ func (mc *MasterClient) LookupFileIdWithFallback(fileId string) (fullUrls []stri fullUrls = append(fullUrls, "http://"+loc.Url+"/"+fileId) } } - return nil }) return |
