Fix dependencies
This commit is contained in:
parent
8d1a3f4fd7
commit
0e2449482f
27 changed files with 1117 additions and 548 deletions
7
go.mod
7
go.mod
|
@ -19,6 +19,7 @@ module code.vikunja.io/api
|
|||
require (
|
||||
4d63.com/embedfiles v1.0.0 // indirect
|
||||
4d63.com/tz v1.1.0
|
||||
cloud.google.com/go v0.37.4 // indirect
|
||||
code.vikunja.io/web v0.0.0-20200208214421-c90649369427
|
||||
gitea.com/xorm/tests v0.5.6 // indirect
|
||||
gitea.com/xorm/xorm-redis-cache v0.0.0-20191113062523-5a6a9e2ab9f2
|
||||
|
@ -32,14 +33,15 @@ require (
|
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/fzipp/gocyclo v0.0.0-20150627053110-6acd4345c835
|
||||
github.com/garyburd/redigo v1.6.0 // indirect
|
||||
github.com/go-logfmt/logfmt v0.4.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.3 // indirect
|
||||
github.com/go-openapi/spec v0.19.4 // indirect
|
||||
github.com/go-redis/redis v6.15.7+incompatible
|
||||
github.com/go-redis/redis/v7 v7.2.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.5.0
|
||||
github.com/go-testfixtures/testfixtures/v3 v3.1.1
|
||||
github.com/go-xorm/core v0.6.2 // indirect
|
||||
github.com/go-xorm/xorm v0.7.9 // indirect
|
||||
github.com/golang/protobuf v1.3.2 // indirect
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf
|
||||
github.com/imdario/mergo v0.3.9
|
||||
github.com/jgautheron/goconst v0.0.0-20200227150835-cda7ea3bf591
|
||||
|
@ -51,6 +53,8 @@ require (
|
|||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
|
||||
github.com/olekukonko/tablewriter v0.0.4
|
||||
github.com/onsi/ginkgo v1.10.1 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/pelletier/go-toml v1.4.0 // indirect
|
||||
github.com/prometheus/client_golang v0.9.4
|
||||
|
@ -68,6 +72,7 @@ require (
|
|||
golang.org/x/crypto v0.0.0-20200406173513-056763e48d71
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
gopkg.in/d4l3k/messagediff.v1 v1.2.1
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a
|
||||
|
|
60
go.sum
60
go.sum
|
@ -43,8 +43,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLM
|
|||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae h1:2Zmk+8cNvAGuY8AyvZuWpUdpQUAXwfom4ReVMe/CTIo=
|
||||
github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
|
||||
github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9Er7cxvBk8DHLWhEux0SxayC8dP6I=
|
||||
github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
|
@ -55,7 +53,6 @@ github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMe
|
|||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
|
@ -120,11 +117,8 @@ github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tF
|
|||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-redis/redis v6.14.0+incompatible h1:AMPZkM7PbsJbilelrJUAyC4xQbGROTOLSuDd7fnMXCI=
|
||||
github.com/go-redis/redis v6.14.0+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4=
|
||||
github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
|
||||
github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
|
@ -163,9 +157,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
|||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc h1:cJlkeAx1QYgO5N80aF5xRGstVsRQwgLR7uA2FnP1ZjY=
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf h1:vc7Dmrk4JwS0ZPS6WZvWlwDflgDTA26jItmbSj83nug=
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
|
@ -179,8 +172,6 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
|||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
|
||||
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
|
||||
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
|
@ -188,8 +179,6 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
|
|||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
||||
github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jgautheron/goconst v0.0.0-20170703170152-9740945f5dcb h1:D5s1HIu80AcMGcqmk7fNIVptmAubVHHaj3v5Upex6Zs=
|
||||
github.com/jgautheron/goconst v0.0.0-20170703170152-9740945f5dcb/go.mod h1:82TxjOpWQiPmywlbIaB2ZkqJoSYJdLGPgAJDvM3PbKc=
|
||||
github.com/jgautheron/goconst v0.0.0-20200227150835-cda7ea3bf591 h1:x/BpEhm6aL26o4TLtcU0loJ7B3+69jielrGc70V7Yb4=
|
||||
github.com/jgautheron/goconst v0.0.0-20200227150835-cda7ea3bf591/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
|
||||
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
||||
|
@ -200,6 +189,7 @@ github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
|
|||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
|
@ -218,8 +208,6 @@ github.com/labstack/echo v3.3.10+incompatible h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8
|
|||
github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s=
|
||||
github.com/labstack/echo/v4 v4.1.7-0.20190627175217-8fb7b5be270f h1:fNJtR+TNyxTdYCZU40fc8Or8RyBqMOKYNv+Zay5gjvk=
|
||||
github.com/labstack/echo/v4 v4.1.7-0.20190627175217-8fb7b5be270f/go.mod h1:kU/7PwzgNxZH4das4XNsSpBSOD09XIF5YEPzjpkGnGE=
|
||||
github.com/labstack/echo/v4 v4.1.14 h1:h8XP66UfB3tUm+L3QPw7tmwAu3pJaA/nyfHPCcz46ic=
|
||||
github.com/labstack/echo/v4 v4.1.14/go.mod h1:Q5KZ1vD3V5FEzjM79hjwVrC3ABr7F5IdM23bXQMRDGg=
|
||||
github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o=
|
||||
github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI=
|
||||
github.com/labstack/gommon v0.2.9 h1:heVeuAYtevIQVYkGj6A41dtfT91LrvFG220lavpWhrU=
|
||||
|
@ -244,8 +232,6 @@ github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM
|
|||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
|
||||
|
@ -255,11 +241,8 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
|
|||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
|
||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
|
@ -280,17 +263,17 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9
|
|||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8=
|
||||
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
|
@ -337,8 +320,6 @@ github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0
|
|||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b h1:4kg1wyftSKxLtnPAvcRWakIPpokB9w780/KwrNLnfPA=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
|
@ -346,7 +327,9 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
|
|||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
|
@ -356,8 +339,6 @@ github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
|||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU=
|
||||
github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
|
@ -368,11 +349,7 @@ github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
|||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
|
||||
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
||||
github.com/spf13/viper v1.6.3 h1:pDDu1OyEDTKzpJwdq4TiuLyMsUgRa/BT5cn5O62NoHs=
|
||||
github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
@ -398,7 +375,6 @@ github.com/ugorji/go v1.1.1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJ
|
|||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0=
|
||||
github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI=
|
||||
github.com/ulule/limiter/v3 v3.3.0 h1:DuMRthpkl1wW9Em6xOVw5HMHnbDumSIDydiMqP0PTXs=
|
||||
github.com/ulule/limiter/v3 v3.3.0/go.mod h1:E6sfg3hfRgW+yFvkE/rZf6YLqXYFMWTmZaZKvdEiQsA=
|
||||
|
@ -423,7 +399,6 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
|
|||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
@ -431,9 +406,6 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4 h1:ydJNl0ENAG67pFbB+9tfhiL2pYqLhfoaZFw/cjLhY4A=
|
||||
golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200208060501-ecb85df21340 h1:KOcEaR10tFr7gdJV2GCKw8Os5yED1u1aOqHjOAb6d2Y=
|
||||
golang.org/x/crypto v0.0.0-20200208060501-ecb85df21340/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200406173513-056763e48d71 h1:DOmugCavvUtnUD114C1Wh+UgTgQZ4pMLzXxi1pSt+/Y=
|
||||
golang.org/x/crypto v0.0.0-20200406173513-056763e48d71/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
@ -442,8 +414,6 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk
|
|||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
|
@ -470,10 +440,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
@ -490,7 +456,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -503,12 +468,7 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
|
@ -533,11 +493,8 @@ golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgw
|
|||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628034336-212fb13d595e h1:ZlQjfVdpDxeqxRfmO30CdqWWzTvgRCj0MxaUVfxEG1k=
|
||||
golang.org/x/tools v0.0.0-20190628034336-212fb13d595e/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d h1:/iIZNFGxc/a7C3yWjGcnboV+Tkc7mxr+p6fDztwoxuM=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
|
@ -561,6 +518,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+
|
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/d4l3k/messagediff.v1 v1.2.1 h1:70AthpjunwzUiarMHyED52mj9UwtAnE89l1Gmrt3EU0=
|
||||
gopkg.in/d4l3k/messagediff.v1 v1.2.1/go.mod h1:EUzikiKadqXWcD1AzJLagx0j/BeeWGtn++04Xniyg44=
|
||||
|
@ -588,10 +546,6 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a h1:LJwr7TCTghdatWv40WobzlKXc9c4s8oGa7QKJUtHhWA=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
src.techknowlogick.com/xgo v0.0.0-20190507142556-a5b29ecb0ff4 h1:DZKMg4qnT7UIyB5ZaC6ZqltF2K5KhA1oQ2PdxOLZ3jg=
|
||||
src.techknowlogick.com/xgo v0.0.0-20190507142556-a5b29ecb0ff4/go.mod h1:Ood88figJtEukTnU20P1IrXhyAkbOIGi4YzmeHVtGH0=
|
||||
src.techknowlogick.com/xgo v0.0.0-20200319213703-c43d4c44a6ac h1:7yl6vmo+RWjT/qFoMlJE1smvs83j/c7x9sDNTz3n/BM=
|
||||
src.techknowlogick.com/xgo v0.0.0-20200319213703-c43d4c44a6ac/go.mod h1:31CE1YKtDOrKTk9PSnjTpe6YbO6W/0LTYZ1VskL09oU=
|
||||
src.techknowlogick.com/xgo v0.0.0-20200408234745-bb0faa361273 h1:dE6ry9rVwDn3soD4wPCXqEG60AZTuhniZzHdnj3c+74=
|
||||
src.techknowlogick.com/xgo v0.0.0-20200408234745-bb0faa361273/go.mod h1:31CE1YKtDOrKTk9PSnjTpe6YbO6W/0LTYZ1VskL09oU=
|
||||
src.techknowlogick.com/xormigrate v1.1.0 h1:Ob79c1pOO+voMB9roa2eHZByT+TODwC51+Mn9e3HoTI=
|
||||
|
|
4
vendor/github.com/go-redis/redis/.travis.yml
generated
vendored
4
vendor/github.com/go-redis/redis/.travis.yml
generated
vendored
|
@ -5,10 +5,10 @@ services:
|
|||
- redis-server
|
||||
|
||||
go:
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
|
|
4
vendor/github.com/go-redis/redis/CHANGELOG.md
generated
vendored
4
vendor/github.com/go-redis/redis/CHANGELOG.md
generated
vendored
|
@ -1,5 +1,9 @@
|
|||
# Changelog
|
||||
|
||||
## Unreleased
|
||||
|
||||
- Cluster and Ring pipelines process commands for each node in its own goroutine.
|
||||
|
||||
## 6.14
|
||||
|
||||
- Added Options.MinIdleConns.
|
||||
|
|
4
vendor/github.com/go-redis/redis/Makefile
generated
vendored
4
vendor/github.com/go-redis/redis/Makefile
generated
vendored
|
@ -3,6 +3,8 @@ all: testdeps
|
|||
go test ./... -short -race
|
||||
env GOOS=linux GOARCH=386 go test ./...
|
||||
go vet
|
||||
go get github.com/gordonklaus/ineffassign
|
||||
ineffassign .
|
||||
|
||||
testdeps: testdata/redis/src/redis-server
|
||||
|
||||
|
@ -13,7 +15,7 @@ bench: testdeps
|
|||
|
||||
testdata/redis:
|
||||
mkdir -p $@
|
||||
wget -qO- https://github.com/antirez/redis/archive/unstable.tar.gz | tar xvz --strip-components=1 -C $@
|
||||
wget -qO- https://github.com/antirez/redis/archive/5.0.tar.gz | tar xvz --strip-components=1 -C $@
|
||||
|
||||
testdata/redis/src/redis-server: testdata/redis
|
||||
sed -i.bak 's/libjemalloc.a/libjemalloc.a -lrt/g' $</src/Makefile
|
||||
|
|
4
vendor/github.com/go-redis/redis/README.md
generated
vendored
4
vendor/github.com/go-redis/redis/README.md
generated
vendored
|
@ -9,7 +9,7 @@ Supports:
|
|||
- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC.
|
||||
- Automatic connection pooling with [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
|
||||
- [Pub/Sub](https://godoc.org/github.com/go-redis/redis#PubSub).
|
||||
- [Transactions](https://godoc.org/github.com/go-redis/redis#Multi).
|
||||
- [Transactions](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
|
||||
- [Pipeline](https://godoc.org/github.com/go-redis/redis#example-Client-Pipeline) and [TxPipeline](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
|
||||
- [Scripting](https://godoc.org/github.com/go-redis/redis#Script).
|
||||
- [Timeouts](https://godoc.org/github.com/go-redis/redis#Options).
|
||||
|
@ -143,4 +143,4 @@ BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op
|
|||
|
||||
- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
|
||||
- [Golang msgpack](https://github.com/vmihailenco/msgpack)
|
||||
- [Golang message task queue](https://github.com/go-msgqueue/msgqueue)
|
||||
- [Golang message task queue](https://github.com/vmihailenco/taskq)
|
||||
|
|
365
vendor/github.com/go-redis/redis/cluster.go
generated
vendored
365
vendor/github.com/go-redis/redis/cluster.go
generated
vendored
|
@ -3,7 +3,6 @@ package redis
|
|||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
|
@ -18,7 +17,6 @@ import (
|
|||
"github.com/go-redis/redis/internal/hashtag"
|
||||
"github.com/go-redis/redis/internal/pool"
|
||||
"github.com/go-redis/redis/internal/proto"
|
||||
"github.com/go-redis/redis/internal/singleflight"
|
||||
)
|
||||
|
||||
var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
|
||||
|
@ -50,6 +48,9 @@ type ClusterOptions struct {
|
|||
// and Cluster.ReloadState to manually trigger state reloading.
|
||||
ClusterSlots func() ([]ClusterSlot, error)
|
||||
|
||||
// Optional hook that is called when a new node is created.
|
||||
OnNewNode func(*Client)
|
||||
|
||||
// Following options are copied from Options struct.
|
||||
|
||||
OnConnect func(*Conn) error
|
||||
|
@ -82,7 +83,7 @@ func (opt *ClusterOptions) init() {
|
|||
opt.MaxRedirects = 8
|
||||
}
|
||||
|
||||
if opt.RouteByLatency || opt.RouteRandomly {
|
||||
if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
|
||||
opt.ReadOnly = true
|
||||
}
|
||||
|
||||
|
@ -166,6 +167,10 @@ func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
|
|||
go node.updateLatency()
|
||||
}
|
||||
|
||||
if clOpt.OnNewNode != nil {
|
||||
clOpt.OnNewNode(node.Client)
|
||||
}
|
||||
|
||||
return &node
|
||||
}
|
||||
|
||||
|
@ -237,8 +242,6 @@ type clusterNodes struct {
|
|||
clusterAddrs []string
|
||||
closed bool
|
||||
|
||||
nodeCreateGroup singleflight.Group
|
||||
|
||||
_generation uint32 // atomic
|
||||
}
|
||||
|
||||
|
@ -341,11 +344,6 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
|
|||
return node, nil
|
||||
}
|
||||
|
||||
v, err := c.nodeCreateGroup.Do(addr, func() (interface{}, error) {
|
||||
node := newClusterNode(c.opt, addr)
|
||||
return node, nil
|
||||
})
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
@ -355,15 +353,13 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
|
|||
|
||||
node, ok := c.allNodes[addr]
|
||||
if ok {
|
||||
_ = v.(*clusterNode).Close()
|
||||
return node, err
|
||||
}
|
||||
node = v.(*clusterNode)
|
||||
|
||||
node = newClusterNode(c.opt, addr)
|
||||
|
||||
c.allAddrs = appendIfNotExists(c.allAddrs, addr)
|
||||
if err == nil {
|
||||
c.clusterAddrs = append(c.clusterAddrs, addr)
|
||||
}
|
||||
c.clusterAddrs = append(c.clusterAddrs, addr)
|
||||
c.allNodes[addr] = node
|
||||
|
||||
return node, err
|
||||
|
@ -533,10 +529,12 @@ func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
|
|||
n := rand.Intn(len(nodes)-1) + 1
|
||||
slave = nodes[n]
|
||||
if !slave.Loading() {
|
||||
break
|
||||
return slave, nil
|
||||
}
|
||||
}
|
||||
return slave, nil
|
||||
|
||||
// All slaves are loading - use master.
|
||||
return nodes[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -580,23 +578,12 @@ func (c *clusterState) slotNodes(slot int) []*clusterNode {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *clusterState) IsConsistent() bool {
|
||||
if c.nodes.opt.ClusterSlots != nil {
|
||||
return true
|
||||
}
|
||||
return len(c.Masters) <= len(c.Slaves)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type clusterStateHolder struct {
|
||||
load func() (*clusterState, error)
|
||||
|
||||
state atomic.Value
|
||||
|
||||
firstErrMu sync.RWMutex
|
||||
firstErr error
|
||||
|
||||
state atomic.Value
|
||||
reloading uint32 // atomic
|
||||
}
|
||||
|
||||
|
@ -607,24 +594,8 @@ func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder
|
|||
}
|
||||
|
||||
func (c *clusterStateHolder) Reload() (*clusterState, error) {
|
||||
state, err := c.reload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !state.IsConsistent() {
|
||||
time.AfterFunc(time.Second, c.LazyReload)
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func (c *clusterStateHolder) reload() (*clusterState, error) {
|
||||
state, err := c.load()
|
||||
if err != nil {
|
||||
c.firstErrMu.Lock()
|
||||
if c.firstErr == nil {
|
||||
c.firstErr = err
|
||||
}
|
||||
c.firstErrMu.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
c.state.Store(state)
|
||||
|
@ -638,16 +609,11 @@ func (c *clusterStateHolder) LazyReload() {
|
|||
go func() {
|
||||
defer atomic.StoreUint32(&c.reloading, 0)
|
||||
|
||||
for {
|
||||
state, err := c.reload()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if state.IsConsistent() {
|
||||
return
|
||||
}
|
||||
_, err := c.Reload()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -660,15 +626,7 @@ func (c *clusterStateHolder) Get() (*clusterState, error) {
|
|||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
c.firstErrMu.RLock()
|
||||
err := c.firstErr
|
||||
c.firstErrMu.RUnlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, errors.New("redis: cluster has no state")
|
||||
return c.Reload()
|
||||
}
|
||||
|
||||
func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) {
|
||||
|
@ -716,10 +674,6 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
|
|||
c.processTxPipeline = c.defaultProcessTxPipeline
|
||||
|
||||
c.init()
|
||||
|
||||
_, _ = c.state.Reload()
|
||||
_, _ = c.cmdsInfoCache.Get()
|
||||
|
||||
if opt.IdleCheckFrequency > 0 {
|
||||
go c.reaper(opt.IdleCheckFrequency)
|
||||
}
|
||||
|
@ -727,17 +681,17 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
|
|||
return c
|
||||
}
|
||||
|
||||
// ReloadState reloads cluster state. It calls ClusterSlots func
|
||||
func (c *ClusterClient) init() {
|
||||
c.cmdable.setProcessor(c.Process)
|
||||
}
|
||||
|
||||
// ReloadState reloads cluster state. If available it calls ClusterSlots func
|
||||
// to get cluster slots information.
|
||||
func (c *ClusterClient) ReloadState() error {
|
||||
_, err := c.state.Reload()
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterClient) init() {
|
||||
c.cmdable.setProcessor(c.Process)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) Context() context.Context {
|
||||
if c.ctx != nil {
|
||||
return c.ctx
|
||||
|
@ -749,12 +703,12 @@ func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
|
|||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
c2 := c.copy()
|
||||
c2 := c.clone()
|
||||
c2.ctx = ctx
|
||||
return c2
|
||||
}
|
||||
|
||||
func (c *ClusterClient) copy() *ClusterClient {
|
||||
func (c *ClusterClient) clone() *ClusterClient {
|
||||
cp := *c
|
||||
cp.init()
|
||||
return &cp
|
||||
|
@ -818,6 +772,11 @@ func cmdSlot(cmd Cmder, pos int) int {
|
|||
}
|
||||
|
||||
func (c *ClusterClient) cmdSlot(cmd Cmder) int {
|
||||
args := cmd.Args()
|
||||
if args[0] == "cluster" && args[1] == "getkeysinslot" {
|
||||
return args[2].(int)
|
||||
}
|
||||
|
||||
cmdInfo := c.cmdInfo(cmd.Name())
|
||||
return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
|
||||
}
|
||||
|
@ -829,9 +788,9 @@ func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) {
|
|||
}
|
||||
|
||||
cmdInfo := c.cmdInfo(cmd.Name())
|
||||
slot := cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
|
||||
slot := c.cmdSlot(cmd)
|
||||
|
||||
if cmdInfo != nil && cmdInfo.ReadOnly && c.opt.ReadOnly {
|
||||
if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
|
||||
if c.opt.RouteByLatency {
|
||||
node, err := state.slotClosestNode(slot)
|
||||
return slot, node, err
|
||||
|
@ -890,15 +849,12 @@ func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
|
|||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if internal.IsRetryableError(err, true) {
|
||||
if err != Nil {
|
||||
c.state.LazyReload()
|
||||
continue
|
||||
}
|
||||
|
||||
moved, ask, addr := internal.IsMovedError(err)
|
||||
if moved || ask {
|
||||
c.state.LazyReload()
|
||||
node, err = c.nodes.GetOrCreate(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -906,7 +862,7 @@ func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
|
|||
continue
|
||||
}
|
||||
|
||||
if err == pool.ErrClosed {
|
||||
if err == pool.ErrClosed || internal.IsReadOnlyError(err) {
|
||||
node, err = c.slotMasterNode(slot)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -914,6 +870,10 @@ func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
|
|||
continue
|
||||
}
|
||||
|
||||
if internal.IsRetryableError(err, true) {
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -978,16 +938,34 @@ func (c *ClusterClient) defaultProcess(cmd Cmder) error {
|
|||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err != Nil {
|
||||
c.state.LazyReload()
|
||||
}
|
||||
|
||||
// If slave is loading - read from master.
|
||||
// If slave is loading - pick another node.
|
||||
if c.opt.ReadOnly && internal.IsLoadingError(err) {
|
||||
node.MarkAsLoading()
|
||||
node = nil
|
||||
continue
|
||||
}
|
||||
|
||||
var moved bool
|
||||
var addr string
|
||||
moved, ask, addr = internal.IsMovedError(err)
|
||||
if moved || ask {
|
||||
node, err = c.nodes.GetOrCreate(addr)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err == pool.ErrClosed || internal.IsReadOnlyError(err) {
|
||||
node = nil
|
||||
continue
|
||||
}
|
||||
|
||||
if internal.IsRetryableError(err, true) {
|
||||
c.state.LazyReload()
|
||||
|
||||
// First retry the same node.
|
||||
if attempt == 0 {
|
||||
continue
|
||||
|
@ -1001,24 +979,6 @@ func (c *ClusterClient) defaultProcess(cmd Cmder) error {
|
|||
continue
|
||||
}
|
||||
|
||||
var moved bool
|
||||
var addr string
|
||||
moved, ask, addr = internal.IsMovedError(err)
|
||||
if moved || ask {
|
||||
c.state.LazyReload()
|
||||
|
||||
node, err = c.nodes.GetOrCreate(addr)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err == pool.ErrClosed {
|
||||
node = nil
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -1238,10 +1198,12 @@ func (c *ClusterClient) WrapProcessPipeline(
|
|||
fn func(oldProcess func([]Cmder) error) func([]Cmder) error,
|
||||
) {
|
||||
c.processPipeline = fn(c.processPipeline)
|
||||
c.processTxPipeline = fn(c.processTxPipeline)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) defaultProcessPipeline(cmds []Cmder) error {
|
||||
cmdsMap, err := c.mapCmdsByNode(cmds)
|
||||
cmdsMap := newCmdsMap()
|
||||
err := c.mapCmdsByNode(cmds, cmdsMap)
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return err
|
||||
|
@ -1252,28 +1214,31 @@ func (c *ClusterClient) defaultProcessPipeline(cmds []Cmder) error {
|
|||
time.Sleep(c.retryBackoff(attempt))
|
||||
}
|
||||
|
||||
failedCmds := make(map[*clusterNode][]Cmder)
|
||||
failedCmds := newCmdsMap()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for node, cmds := range cmdsMap {
|
||||
cn, err := node.Client.getConn()
|
||||
if err != nil {
|
||||
if err == pool.ErrClosed {
|
||||
c.remapCmds(cmds, failedCmds)
|
||||
} else {
|
||||
setCmdsErr(cmds, err)
|
||||
for node, cmds := range cmdsMap.m {
|
||||
wg.Add(1)
|
||||
go func(node *clusterNode, cmds []Cmder) {
|
||||
defer wg.Done()
|
||||
|
||||
cn, err := node.Client.getConn()
|
||||
if err != nil {
|
||||
if err == pool.ErrClosed {
|
||||
c.mapCmdsByNode(cmds, failedCmds)
|
||||
} else {
|
||||
setCmdsErr(cmds, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = c.pipelineProcessCmds(node, cn, cmds, failedCmds)
|
||||
if err == nil || internal.IsRedisError(err) {
|
||||
node.Client.connPool.Put(cn)
|
||||
} else {
|
||||
node.Client.connPool.Remove(cn)
|
||||
}
|
||||
err = c.pipelineProcessCmds(node, cn, cmds, failedCmds)
|
||||
node.Client.releaseConnStrict(cn, err)
|
||||
}(node, cmds)
|
||||
}
|
||||
|
||||
if len(failedCmds) == 0 {
|
||||
wg.Wait()
|
||||
if len(failedCmds.m) == 0 {
|
||||
break
|
||||
}
|
||||
cmdsMap = failedCmds
|
||||
|
@ -1282,14 +1247,24 @@ func (c *ClusterClient) defaultProcessPipeline(cmds []Cmder) error {
|
|||
return cmdsFirstErr(cmds)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, error) {
|
||||
type cmdsMap struct {
|
||||
mu sync.Mutex
|
||||
m map[*clusterNode][]Cmder
|
||||
}
|
||||
|
||||
func newCmdsMap() *cmdsMap {
|
||||
return &cmdsMap{
|
||||
m: make(map[*clusterNode][]Cmder),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ClusterClient) mapCmdsByNode(cmds []Cmder, cmdsMap *cmdsMap) error {
|
||||
state, err := c.state.Get()
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
cmdsMap := make(map[*clusterNode][]Cmder)
|
||||
cmdsAreReadOnly := c.cmdsAreReadOnly(cmds)
|
||||
for _, cmd := range cmds {
|
||||
var node *clusterNode
|
||||
|
@ -1301,11 +1276,13 @@ func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, e
|
|||
node, err = state.slotMasterNode(slot)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
cmdsMap[node] = append(cmdsMap[node], cmd)
|
||||
cmdsMap.mu.Lock()
|
||||
cmdsMap.m[node] = append(cmdsMap.m[node], cmd)
|
||||
cmdsMap.mu.Unlock()
|
||||
}
|
||||
return cmdsMap, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
|
||||
|
@ -1318,39 +1295,30 @@ func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (c *ClusterClient) remapCmds(cmds []Cmder, failedCmds map[*clusterNode][]Cmder) {
|
||||
remappedCmds, err := c.mapCmdsByNode(cmds)
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return
|
||||
}
|
||||
|
||||
for node, cmds := range remappedCmds {
|
||||
failedCmds[node] = cmds
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ClusterClient) pipelineProcessCmds(
|
||||
node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
|
||||
node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
|
||||
) error {
|
||||
err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error {
|
||||
return writeCmd(wr, cmds...)
|
||||
})
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
failedCmds[node] = cmds
|
||||
failedCmds.mu.Lock()
|
||||
failedCmds.m[node] = cmds
|
||||
failedCmds.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error {
|
||||
return c.pipelineReadCmds(rd, cmds, failedCmds)
|
||||
return c.pipelineReadCmds(node, rd, cmds, failedCmds)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterClient) pipelineReadCmds(
|
||||
rd *proto.Reader, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
|
||||
node *clusterNode, rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
|
||||
) error {
|
||||
var firstErr error
|
||||
for _, cmd := range cmds {
|
||||
err := cmd.readReply(rd)
|
||||
if err == nil {
|
||||
|
@ -1365,13 +1333,18 @@ func (c *ClusterClient) pipelineReadCmds(
|
|||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
failedCmds.mu.Lock()
|
||||
failedCmds.m[node] = append(failedCmds.m[node], cmd)
|
||||
failedCmds.mu.Unlock()
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return firstErr
|
||||
}
|
||||
|
||||
func (c *ClusterClient) checkMovedErr(
|
||||
cmd Cmder, err error, failedCmds map[*clusterNode][]Cmder,
|
||||
cmd Cmder, err error, failedCmds *cmdsMap,
|
||||
) bool {
|
||||
moved, ask, addr := internal.IsMovedError(err)
|
||||
|
||||
|
@ -1383,7 +1356,9 @@ func (c *ClusterClient) checkMovedErr(
|
|||
return false
|
||||
}
|
||||
|
||||
failedCmds[node] = append(failedCmds[node], cmd)
|
||||
failedCmds.mu.Lock()
|
||||
failedCmds.m[node] = append(failedCmds.m[node], cmd)
|
||||
failedCmds.mu.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -1393,7 +1368,9 @@ func (c *ClusterClient) checkMovedErr(
|
|||
return false
|
||||
}
|
||||
|
||||
failedCmds[node] = append(failedCmds[node], NewCmd("ASKING"), cmd)
|
||||
failedCmds.mu.Lock()
|
||||
failedCmds.m[node] = append(failedCmds.m[node], NewCmd("ASKING"), cmd)
|
||||
failedCmds.mu.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -1433,31 +1410,34 @@ func (c *ClusterClient) defaultProcessTxPipeline(cmds []Cmder) error {
|
|||
time.Sleep(c.retryBackoff(attempt))
|
||||
}
|
||||
|
||||
failedCmds := make(map[*clusterNode][]Cmder)
|
||||
failedCmds := newCmdsMap()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for node, cmds := range cmdsMap {
|
||||
cn, err := node.Client.getConn()
|
||||
if err != nil {
|
||||
if err == pool.ErrClosed {
|
||||
c.remapCmds(cmds, failedCmds)
|
||||
} else {
|
||||
setCmdsErr(cmds, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(node *clusterNode, cmds []Cmder) {
|
||||
defer wg.Done()
|
||||
|
||||
err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds)
|
||||
if err == nil || internal.IsRedisError(err) {
|
||||
node.Client.connPool.Put(cn)
|
||||
} else {
|
||||
node.Client.connPool.Remove(cn)
|
||||
}
|
||||
cn, err := node.Client.getConn()
|
||||
if err != nil {
|
||||
if err == pool.ErrClosed {
|
||||
c.mapCmdsByNode(cmds, failedCmds)
|
||||
} else {
|
||||
setCmdsErr(cmds, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds)
|
||||
node.Client.releaseConnStrict(cn, err)
|
||||
}(node, cmds)
|
||||
}
|
||||
|
||||
if len(failedCmds) == 0 {
|
||||
wg.Wait()
|
||||
if len(failedCmds.m) == 0 {
|
||||
break
|
||||
}
|
||||
cmdsMap = failedCmds
|
||||
cmdsMap = failedCmds.m
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1474,14 +1454,16 @@ func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
|
|||
}
|
||||
|
||||
func (c *ClusterClient) txPipelineProcessCmds(
|
||||
node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
|
||||
node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
|
||||
) error {
|
||||
err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error {
|
||||
return txPipelineWriteMulti(wr, cmds)
|
||||
})
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
failedCmds[node] = cmds
|
||||
failedCmds.mu.Lock()
|
||||
failedCmds.m[node] = cmds
|
||||
failedCmds.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1497,7 +1479,7 @@ func (c *ClusterClient) txPipelineProcessCmds(
|
|||
}
|
||||
|
||||
func (c *ClusterClient) txPipelineReadQueued(
|
||||
rd *proto.Reader, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
|
||||
rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
|
||||
) error {
|
||||
// Parse queued replies.
|
||||
var statusCmd StatusCmd
|
||||
|
@ -1546,40 +1528,51 @@ func (c *ClusterClient) txPipelineReadQueued(
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) pubSub(channels []string) *PubSub {
|
||||
func (c *ClusterClient) pubSub() *PubSub {
|
||||
var node *clusterNode
|
||||
pubsub := &PubSub{
|
||||
opt: c.opt.clientOptions(),
|
||||
|
||||
newConn: func(channels []string) (*pool.Conn, error) {
|
||||
if node == nil {
|
||||
var slot int
|
||||
if len(channels) > 0 {
|
||||
slot = hashtag.Slot(channels[0])
|
||||
} else {
|
||||
slot = -1
|
||||
}
|
||||
|
||||
masterNode, err := c.slotMasterNode(slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node = masterNode
|
||||
if node != nil {
|
||||
panic("node != nil")
|
||||
}
|
||||
return node.Client.newConn()
|
||||
|
||||
var err error
|
||||
if len(channels) > 0 {
|
||||
slot := hashtag.Slot(channels[0])
|
||||
node, err = c.slotMasterNode(slot)
|
||||
} else {
|
||||
node, err = c.nodes.Random()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cn, err := node.Client.newConn()
|
||||
if err != nil {
|
||||
node = nil
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cn, nil
|
||||
},
|
||||
closeConn: func(cn *pool.Conn) error {
|
||||
return node.Client.connPool.CloseConn(cn)
|
||||
err := node.Client.connPool.CloseConn(cn)
|
||||
node = nil
|
||||
return err
|
||||
},
|
||||
}
|
||||
pubsub.init()
|
||||
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// Subscribe subscribes the client to the specified channels.
|
||||
// Channels can be omitted to create empty subscription.
|
||||
func (c *ClusterClient) Subscribe(channels ...string) *PubSub {
|
||||
pubsub := c.pubSub(channels)
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.Subscribe(channels...)
|
||||
}
|
||||
|
@ -1589,7 +1582,7 @@ func (c *ClusterClient) Subscribe(channels ...string) *PubSub {
|
|||
// PSubscribe subscribes the client to the given patterns.
|
||||
// Patterns can be omitted to create empty subscription.
|
||||
func (c *ClusterClient) PSubscribe(channels ...string) *PubSub {
|
||||
pubsub := c.pubSub(channels)
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.PSubscribe(channels...)
|
||||
}
|
||||
|
|
112
vendor/github.com/go-redis/redis/command.go
generated
vendored
112
vendor/github.com/go-redis/redis/command.go
generated
vendored
|
@ -183,7 +183,7 @@ func (cmd *Cmd) Int() (int, error) {
|
|||
case string:
|
||||
return strconv.Atoi(val)
|
||||
default:
|
||||
err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
|
||||
err := fmt.Errorf("redis: unexpected type=%T for Int", val)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
@ -218,6 +218,25 @@ func (cmd *Cmd) Uint64() (uint64, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (cmd *Cmd) Float32() (float32, error) {
|
||||
if cmd.err != nil {
|
||||
return 0, cmd.err
|
||||
}
|
||||
switch val := cmd.val.(type) {
|
||||
case int64:
|
||||
return float32(val), nil
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(val, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float32(f), nil
|
||||
default:
|
||||
err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *Cmd) Float64() (float64, error) {
|
||||
if cmd.err != nil {
|
||||
return 0, cmd.err
|
||||
|
@ -585,6 +604,17 @@ func (cmd *StringCmd) Uint64() (uint64, error) {
|
|||
return strconv.ParseUint(cmd.Val(), 10, 64)
|
||||
}
|
||||
|
||||
func (cmd *StringCmd) Float32() (float32, error) {
|
||||
if cmd.err != nil {
|
||||
return 0, cmd.err
|
||||
}
|
||||
f, err := strconv.ParseFloat(cmd.Val(), 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float32(f), nil
|
||||
}
|
||||
|
||||
func (cmd *StringCmd) Float64() (float64, error) {
|
||||
if cmd.err != nil {
|
||||
return 0, cmd.err
|
||||
|
@ -687,12 +717,12 @@ func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
|
|||
func stringSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
|
||||
ss := make([]string, 0, n)
|
||||
for i := int64(0); i < n; i++ {
|
||||
s, err := rd.ReadString()
|
||||
if err == Nil {
|
||||
switch s, err := rd.ReadString(); {
|
||||
case err == Nil:
|
||||
ss = append(ss, "")
|
||||
} else if err != nil {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
} else {
|
||||
default:
|
||||
ss = append(ss, s)
|
||||
}
|
||||
}
|
||||
|
@ -969,14 +999,20 @@ func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var values map[string]interface{}
|
||||
|
||||
v, err := rd.ReadArrayReply(stringInterfaceMapParser)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if err != proto.Nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
values = v.(map[string]interface{})
|
||||
}
|
||||
|
||||
msgs = append(msgs, XMessage{
|
||||
ID: id,
|
||||
Values: v.(map[string]interface{}),
|
||||
Values: values,
|
||||
})
|
||||
return nil, nil
|
||||
})
|
||||
|
@ -1337,6 +1373,68 @@ func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
|
|||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type ZWithKeyCmd struct {
|
||||
baseCmd
|
||||
|
||||
val ZWithKey
|
||||
}
|
||||
|
||||
var _ Cmder = (*ZWithKeyCmd)(nil)
|
||||
|
||||
func NewZWithKeyCmd(args ...interface{}) *ZWithKeyCmd {
|
||||
return &ZWithKeyCmd{
|
||||
baseCmd: baseCmd{_args: args},
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *ZWithKeyCmd) Val() ZWithKey {
|
||||
return cmd.val
|
||||
}
|
||||
|
||||
func (cmd *ZWithKeyCmd) Result() (ZWithKey, error) {
|
||||
return cmd.Val(), cmd.Err()
|
||||
}
|
||||
|
||||
func (cmd *ZWithKeyCmd) String() string {
|
||||
return cmdString(cmd, cmd.val)
|
||||
}
|
||||
|
||||
func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
|
||||
var v interface{}
|
||||
v, cmd.err = rd.ReadArrayReply(zWithKeyParser)
|
||||
if cmd.err != nil {
|
||||
return cmd.err
|
||||
}
|
||||
cmd.val = v.(ZWithKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements proto.MultiBulkParse
|
||||
func zWithKeyParser(rd *proto.Reader, n int64) (interface{}, error) {
|
||||
if n != 3 {
|
||||
return nil, fmt.Errorf("got %d elements, expected 3", n)
|
||||
}
|
||||
|
||||
var z ZWithKey
|
||||
var err error
|
||||
|
||||
z.Key, err = rd.ReadString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
z.Member, err = rd.ReadString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
z.Score, err = rd.ReadFloatReply()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return z, nil
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type ScanCmd struct {
|
||||
baseCmd
|
||||
|
||||
|
|
146
vendor/github.com/go-redis/redis/commands.go
generated
vendored
146
vendor/github.com/go-redis/redis/commands.go
generated
vendored
|
@ -8,13 +8,6 @@ import (
|
|||
"github.com/go-redis/redis/internal"
|
||||
)
|
||||
|
||||
func readTimeout(timeout time.Duration) time.Duration {
|
||||
if timeout == 0 {
|
||||
return 0
|
||||
}
|
||||
return timeout + 10*time.Second
|
||||
}
|
||||
|
||||
func usePrecise(dur time.Duration) bool {
|
||||
return dur < time.Second || dur%time.Second != 0
|
||||
}
|
||||
|
@ -173,6 +166,7 @@ type Cmdable interface {
|
|||
SUnion(keys ...string) *StringSliceCmd
|
||||
SUnionStore(destination string, keys ...string) *IntCmd
|
||||
XAdd(a *XAddArgs) *StringCmd
|
||||
XDel(stream string, ids ...string) *IntCmd
|
||||
XLen(stream string) *IntCmd
|
||||
XRange(stream, start, stop string) *XMessageSliceCmd
|
||||
XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd
|
||||
|
@ -181,6 +175,7 @@ type Cmdable interface {
|
|||
XRead(a *XReadArgs) *XStreamSliceCmd
|
||||
XReadStreams(streams ...string) *XStreamSliceCmd
|
||||
XGroupCreate(stream, group, start string) *StatusCmd
|
||||
XGroupCreateMkStream(stream, group, start string) *StatusCmd
|
||||
XGroupSetID(stream, group, start string) *StatusCmd
|
||||
XGroupDestroy(stream, group string) *IntCmd
|
||||
XGroupDelConsumer(stream, group, consumer string) *IntCmd
|
||||
|
@ -192,6 +187,8 @@ type Cmdable interface {
|
|||
XClaimJustID(a *XClaimArgs) *StringSliceCmd
|
||||
XTrim(key string, maxLen int64) *IntCmd
|
||||
XTrimApprox(key string, maxLen int64) *IntCmd
|
||||
BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd
|
||||
BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd
|
||||
ZAdd(key string, members ...Z) *IntCmd
|
||||
ZAddNX(key string, members ...Z) *IntCmd
|
||||
ZAddXX(key string, members ...Z) *IntCmd
|
||||
|
@ -206,6 +203,8 @@ type Cmdable interface {
|
|||
ZLexCount(key, min, max string) *IntCmd
|
||||
ZIncrBy(key string, increment float64, member string) *FloatCmd
|
||||
ZInterStore(destination string, store ZStore, keys ...string) *IntCmd
|
||||
ZPopMax(key string, count ...int64) *ZSliceCmd
|
||||
ZPopMin(key string, count ...int64) *ZSliceCmd
|
||||
ZRange(key string, start, stop int64) *StringSliceCmd
|
||||
ZRangeWithScores(key string, start, stop int64) *ZSliceCmd
|
||||
ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd
|
||||
|
@ -233,6 +232,7 @@ type Cmdable interface {
|
|||
ClientKillByFilter(keys ...string) *IntCmd
|
||||
ClientList() *StringCmd
|
||||
ClientPause(dur time.Duration) *BoolCmd
|
||||
ClientID() *IntCmd
|
||||
ConfigGet(parameter string) *SliceCmd
|
||||
ConfigResetStat() *StatusCmd
|
||||
ConfigSet(parameter, value string) *StatusCmd
|
||||
|
@ -270,6 +270,7 @@ type Cmdable interface {
|
|||
ClusterResetHard() *StatusCmd
|
||||
ClusterInfo() *StringCmd
|
||||
ClusterKeySlot(key string) *IntCmd
|
||||
ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd
|
||||
ClusterCountFailureReports(nodeID string) *IntCmd
|
||||
ClusterCountKeysInSlot(slot int) *IntCmd
|
||||
ClusterDelSlots(slots ...int) *StatusCmd
|
||||
|
@ -1342,6 +1343,16 @@ func (c *cmdable) XAdd(a *XAddArgs) *StringCmd {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) XDel(stream string, ids ...string) *IntCmd {
|
||||
args := []interface{}{"xdel", stream}
|
||||
for _, id := range ids {
|
||||
args = append(args, id)
|
||||
}
|
||||
cmd := NewIntCmd(args...)
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) XLen(stream string) *IntCmd {
|
||||
cmd := NewIntCmd("xlen", stream)
|
||||
c.process(cmd)
|
||||
|
@ -1395,6 +1406,9 @@ func (c *cmdable) XRead(a *XReadArgs) *XStreamSliceCmd {
|
|||
}
|
||||
|
||||
cmd := NewXStreamSliceCmd(args...)
|
||||
if a.Block >= 0 {
|
||||
cmd.setReadTimeout(a.Block)
|
||||
}
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
@ -1412,6 +1426,12 @@ func (c *cmdable) XGroupCreate(stream, group, start string) *StatusCmd {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) XGroupCreateMkStream(stream, group, start string) *StatusCmd {
|
||||
cmd := NewStatusCmd("xgroup", "create", stream, group, start, "mkstream")
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) XGroupSetID(stream, group, start string) *StatusCmd {
|
||||
cmd := NewStatusCmd("xgroup", "setid", stream, group, start)
|
||||
c.process(cmd)
|
||||
|
@ -1433,9 +1453,11 @@ func (c *cmdable) XGroupDelConsumer(stream, group, consumer string) *IntCmd {
|
|||
type XReadGroupArgs struct {
|
||||
Group string
|
||||
Consumer string
|
||||
Streams []string
|
||||
Count int64
|
||||
Block time.Duration
|
||||
// List of streams and ids.
|
||||
Streams []string
|
||||
Count int64
|
||||
Block time.Duration
|
||||
NoAck bool
|
||||
}
|
||||
|
||||
func (c *cmdable) XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd {
|
||||
|
@ -1447,12 +1469,18 @@ func (c *cmdable) XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd {
|
|||
if a.Block >= 0 {
|
||||
args = append(args, "block", int64(a.Block/time.Millisecond))
|
||||
}
|
||||
if a.NoAck {
|
||||
args = append(args, "noack")
|
||||
}
|
||||
args = append(args, "streams")
|
||||
for _, s := range a.Streams {
|
||||
args = append(args, s)
|
||||
}
|
||||
|
||||
cmd := NewXStreamSliceCmd(args...)
|
||||
if a.Block >= 0 {
|
||||
cmd.setReadTimeout(a.Block)
|
||||
}
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
@ -1549,6 +1577,12 @@ type Z struct {
|
|||
Member interface{}
|
||||
}
|
||||
|
||||
// ZWithKey represents sorted set member including the name of the key where it was popped.
|
||||
type ZWithKey struct {
|
||||
Z
|
||||
Key string
|
||||
}
|
||||
|
||||
// ZStore is used as an arg to ZInterStore and ZUnionStore.
|
||||
type ZStore struct {
|
||||
Weights []float64
|
||||
|
@ -1556,6 +1590,34 @@ type ZStore struct {
|
|||
Aggregate string
|
||||
}
|
||||
|
||||
// Redis `BZPOPMAX key [key ...] timeout` command.
|
||||
func (c *cmdable) BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd {
|
||||
args := make([]interface{}, 1+len(keys)+1)
|
||||
args[0] = "bzpopmax"
|
||||
for i, key := range keys {
|
||||
args[1+i] = key
|
||||
}
|
||||
args[len(args)-1] = formatSec(timeout)
|
||||
cmd := NewZWithKeyCmd(args...)
|
||||
cmd.setReadTimeout(timeout)
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Redis `BZPOPMIN key [key ...] timeout` command.
|
||||
func (c *cmdable) BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd {
|
||||
args := make([]interface{}, 1+len(keys)+1)
|
||||
args[0] = "bzpopmin"
|
||||
for i, key := range keys {
|
||||
args[1+i] = key
|
||||
}
|
||||
args[len(args)-1] = formatSec(timeout)
|
||||
cmd := NewZWithKeyCmd(args...)
|
||||
cmd.setReadTimeout(timeout)
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) zAdd(a []interface{}, n int, members ...Z) *IntCmd {
|
||||
for i, m := range members {
|
||||
a[n+2*i] = m.Score
|
||||
|
@ -1694,6 +1756,46 @@ func (c *cmdable) ZInterStore(destination string, store ZStore, keys ...string)
|
|||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) ZPopMax(key string, count ...int64) *ZSliceCmd {
|
||||
args := []interface{}{
|
||||
"zpopmax",
|
||||
key,
|
||||
}
|
||||
|
||||
switch len(count) {
|
||||
case 0:
|
||||
break
|
||||
case 1:
|
||||
args = append(args, count[0])
|
||||
default:
|
||||
panic("too many arguments")
|
||||
}
|
||||
|
||||
cmd := NewZSliceCmd(args...)
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) ZPopMin(key string, count ...int64) *ZSliceCmd {
|
||||
args := []interface{}{
|
||||
"zpopmin",
|
||||
key,
|
||||
}
|
||||
|
||||
switch len(count) {
|
||||
case 0:
|
||||
break
|
||||
case 1:
|
||||
args = append(args, count[0])
|
||||
default:
|
||||
panic("too many arguments")
|
||||
}
|
||||
|
||||
cmd := NewZSliceCmd(args...)
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd {
|
||||
args := []interface{}{
|
||||
"zrange",
|
||||
|
@ -1969,6 +2071,24 @@ func (c *cmdable) ClientPause(dur time.Duration) *BoolCmd {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) ClientID() *IntCmd {
|
||||
cmd := NewIntCmd("client", "id")
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) ClientUnblock(id int64) *IntCmd {
|
||||
cmd := NewIntCmd("client", "unblock", id)
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) ClientUnblockWithError(id int64) *IntCmd {
|
||||
cmd := NewIntCmd("client", "unblock", id, "error")
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// ClientSetName assigns a name to the connection.
|
||||
func (c *statefulCmdable) ClientSetName(name string) *BoolCmd {
|
||||
cmd := NewBoolCmd("client", "setname", name)
|
||||
|
@ -2284,6 +2404,12 @@ func (c *cmdable) ClusterKeySlot(key string) *IntCmd {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd {
|
||||
cmd := NewStringSliceCmd("cluster", "getkeysinslot", slot, count)
|
||||
c.process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *cmdable) ClusterCountFailureReports(nodeID string) *IntCmd {
|
||||
cmd := NewIntCmd("cluster", "count-failure-reports", nodeID)
|
||||
c.process(cmd)
|
||||
|
|
10
vendor/github.com/go-redis/redis/internal/error.go
generated
vendored
10
vendor/github.com/go-redis/redis/internal/error.go
generated
vendored
|
@ -9,6 +9,9 @@ import (
|
|||
)
|
||||
|
||||
func IsRetryableError(err error, retryTimeout bool) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if err == io.EOF {
|
||||
return true
|
||||
}
|
||||
|
@ -44,7 +47,8 @@ func IsBadConn(err error, allowTimeout bool) bool {
|
|||
return false
|
||||
}
|
||||
if IsRedisError(err) {
|
||||
return strings.HasPrefix(err.Error(), "READONLY ")
|
||||
// #790
|
||||
return IsReadOnlyError(err)
|
||||
}
|
||||
if allowTimeout {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||
|
@ -79,3 +83,7 @@ func IsMovedError(err error) (moved bool, ask bool, addr string) {
|
|||
func IsLoadingError(err error) bool {
|
||||
return strings.HasPrefix(err.Error(), "LOADING ")
|
||||
}
|
||||
|
||||
func IsReadOnlyError(err error) bool {
|
||||
return strings.HasPrefix(err.Error(), "READONLY ")
|
||||
}
|
||||
|
|
10
vendor/github.com/go-redis/redis/internal/pool/conn.go
generated
vendored
10
vendor/github.com/go-redis/redis/internal/pool/conn.go
generated
vendored
|
@ -17,14 +17,16 @@ type Conn struct {
|
|||
rdLocked bool
|
||||
wr *proto.Writer
|
||||
|
||||
InitedAt time.Time
|
||||
pooled bool
|
||||
usedAt atomic.Value
|
||||
Inited bool
|
||||
pooled bool
|
||||
createdAt time.Time
|
||||
usedAt atomic.Value
|
||||
}
|
||||
|
||||
func NewConn(netConn net.Conn) *Conn {
|
||||
cn := &Conn{
|
||||
netConn: netConn,
|
||||
netConn: netConn,
|
||||
createdAt: time.Now(),
|
||||
}
|
||||
cn.rd = proto.NewReader(netConn)
|
||||
cn.wr = proto.NewWriter(netConn)
|
||||
|
|
8
vendor/github.com/go-redis/redis/internal/pool/pool.go
generated
vendored
8
vendor/github.com/go-redis/redis/internal/pool/pool.go
generated
vendored
|
@ -38,7 +38,7 @@ type Pooler interface {
|
|||
|
||||
Get() (*Conn, error)
|
||||
Put(*Conn)
|
||||
Remove(*Conn)
|
||||
Remove(*Conn, error)
|
||||
|
||||
Len() int
|
||||
IdleLen() int
|
||||
|
@ -289,7 +289,7 @@ func (p *ConnPool) popIdle() *Conn {
|
|||
|
||||
func (p *ConnPool) Put(cn *Conn) {
|
||||
if !cn.pooled {
|
||||
p.Remove(cn)
|
||||
p.Remove(cn, nil)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -300,7 +300,7 @@ func (p *ConnPool) Put(cn *Conn) {
|
|||
p.freeTurn()
|
||||
}
|
||||
|
||||
func (p *ConnPool) Remove(cn *Conn) {
|
||||
func (p *ConnPool) Remove(cn *Conn, reason error) {
|
||||
p.removeConn(cn)
|
||||
p.freeTurn()
|
||||
_ = p.closeConn(cn)
|
||||
|
@ -468,7 +468,7 @@ func (p *ConnPool) isStaleConn(cn *Conn) bool {
|
|||
if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
|
||||
return true
|
||||
}
|
||||
if p.opt.MaxConnAge > 0 && now.Sub(cn.InitedAt) >= p.opt.MaxConnAge {
|
||||
if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
184
vendor/github.com/go-redis/redis/internal/pool/pool_single.go
generated
vendored
184
vendor/github.com/go-redis/redis/internal/pool/pool_single.go
generated
vendored
|
@ -1,53 +1,203 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
stateDefault = 0
|
||||
stateInited = 1
|
||||
stateClosed = 2
|
||||
)
|
||||
|
||||
type BadConnError struct {
|
||||
wrapped error
|
||||
}
|
||||
|
||||
var _ error = (*BadConnError)(nil)
|
||||
|
||||
func (e BadConnError) Error() string {
|
||||
return "pg: Conn is in a bad state"
|
||||
}
|
||||
|
||||
func (e BadConnError) Unwrap() error {
|
||||
return e.wrapped
|
||||
}
|
||||
|
||||
type SingleConnPool struct {
|
||||
cn *Conn
|
||||
pool Pooler
|
||||
level int32 // atomic
|
||||
|
||||
state uint32 // atomic
|
||||
ch chan *Conn
|
||||
|
||||
_badConnError atomic.Value
|
||||
}
|
||||
|
||||
var _ Pooler = (*SingleConnPool)(nil)
|
||||
|
||||
func NewSingleConnPool(cn *Conn) *SingleConnPool {
|
||||
return &SingleConnPool{
|
||||
cn: cn,
|
||||
func NewSingleConnPool(pool Pooler) *SingleConnPool {
|
||||
p, ok := pool.(*SingleConnPool)
|
||||
if !ok {
|
||||
p = &SingleConnPool{
|
||||
pool: pool,
|
||||
ch: make(chan *Conn, 1),
|
||||
}
|
||||
}
|
||||
atomic.AddInt32(&p.level, 1)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) SetConn(cn *Conn) {
|
||||
if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
|
||||
p.ch <- cn
|
||||
} else {
|
||||
panic("not reached")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) NewConn() (*Conn, error) {
|
||||
panic("not implemented")
|
||||
return p.pool.NewConn()
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) CloseConn(*Conn) error {
|
||||
panic("not implemented")
|
||||
func (p *SingleConnPool) CloseConn(cn *Conn) error {
|
||||
return p.pool.CloseConn(cn)
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Get() (*Conn, error) {
|
||||
return p.cn, nil
|
||||
// In worst case this races with Close which is not a very common operation.
|
||||
for i := 0; i < 1000; i++ {
|
||||
switch atomic.LoadUint32(&p.state) {
|
||||
case stateDefault:
|
||||
cn, err := p.pool.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
|
||||
return cn, nil
|
||||
}
|
||||
p.pool.Remove(cn, ErrClosed)
|
||||
case stateInited:
|
||||
if err := p.badConnError(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cn, ok := <-p.ch
|
||||
if !ok {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
return cn, nil
|
||||
case stateClosed:
|
||||
return nil, ErrClosed
|
||||
default:
|
||||
panic("not reached")
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("pg: SingleConnPool.Get: infinite loop")
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Put(cn *Conn) {
|
||||
if p.cn != cn {
|
||||
panic("p.cn != cn")
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
p.freeConn(cn)
|
||||
}
|
||||
}()
|
||||
p.ch <- cn
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) freeConn(cn *Conn) {
|
||||
if err := p.badConnError(); err != nil {
|
||||
p.pool.Remove(cn, err)
|
||||
} else {
|
||||
p.pool.Put(cn)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Remove(cn *Conn) {
|
||||
if p.cn != cn {
|
||||
panic("p.cn != cn")
|
||||
}
|
||||
func (p *SingleConnPool) Remove(cn *Conn, reason error) {
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
p.pool.Remove(cn, ErrClosed)
|
||||
}
|
||||
}()
|
||||
p._badConnError.Store(BadConnError{wrapped: reason})
|
||||
p.ch <- cn
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Len() int {
|
||||
return 1
|
||||
switch atomic.LoadUint32(&p.state) {
|
||||
case stateDefault:
|
||||
return 0
|
||||
case stateInited:
|
||||
return 1
|
||||
case stateClosed:
|
||||
return 0
|
||||
default:
|
||||
panic("not reached")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) IdleLen() int {
|
||||
return 0
|
||||
return len(p.ch)
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Stats() *Stats {
|
||||
return nil
|
||||
return &Stats{}
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Close() error {
|
||||
level := atomic.AddInt32(&p.level, -1)
|
||||
if level > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
state := atomic.LoadUint32(&p.state)
|
||||
if state == stateClosed {
|
||||
return ErrClosed
|
||||
}
|
||||
if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
|
||||
close(p.ch)
|
||||
cn, ok := <-p.ch
|
||||
if ok {
|
||||
p.freeConn(cn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("pg: SingleConnPool.Close: infinite loop")
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Reset() error {
|
||||
if p.badConnError() == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case cn, ok := <-p.ch:
|
||||
if !ok {
|
||||
return ErrClosed
|
||||
}
|
||||
p.pool.Remove(cn, ErrClosed)
|
||||
p._badConnError.Store(BadConnError{wrapped: nil})
|
||||
default:
|
||||
return fmt.Errorf("pg: SingleConnPool does not have a Conn")
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
|
||||
state := atomic.LoadUint32(&p.state)
|
||||
return fmt.Errorf("pg: invalid SingleConnPool state: %d", state)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) badConnError() error {
|
||||
if v := p._badConnError.Load(); v != nil {
|
||||
err := v.(BadConnError)
|
||||
if err.wrapped != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
10
vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go
generated
vendored
10
vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go
generated
vendored
|
@ -55,13 +55,13 @@ func (p *StickyConnPool) putUpstream() {
|
|||
|
||||
func (p *StickyConnPool) Put(cn *Conn) {}
|
||||
|
||||
func (p *StickyConnPool) removeUpstream() {
|
||||
p.pool.Remove(p.cn)
|
||||
func (p *StickyConnPool) removeUpstream(reason error) {
|
||||
p.pool.Remove(p.cn, reason)
|
||||
p.cn = nil
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Remove(cn *Conn) {
|
||||
p.removeUpstream()
|
||||
func (p *StickyConnPool) Remove(cn *Conn, reason error) {
|
||||
p.removeUpstream(reason)
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Len() int {
|
||||
|
@ -101,7 +101,7 @@ func (p *StickyConnPool) Close() error {
|
|||
if p.reusable {
|
||||
p.putUpstream()
|
||||
} else {
|
||||
p.removeUpstream()
|
||||
p.removeUpstream(ErrClosed)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
64
vendor/github.com/go-redis/redis/internal/singleflight/singleflight.go
generated
vendored
64
vendor/github.com/go-redis/redis/internal/singleflight/singleflight.go
generated
vendored
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package singleflight provides a duplicate function call suppression
|
||||
// mechanism.
|
||||
package singleflight
|
||||
|
||||
import "sync"
|
||||
|
||||
// call is an in-flight or completed Do call
|
||||
type call struct {
|
||||
wg sync.WaitGroup
|
||||
val interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
// Group represents a class of work and forms a namespace in which
|
||||
// units of work can be executed with duplicate suppression.
|
||||
type Group struct {
|
||||
mu sync.Mutex // protects m
|
||||
m map[string]*call // lazily initialized
|
||||
}
|
||||
|
||||
// Do executes and returns the results of the given function, making
|
||||
// sure that only one execution is in-flight for a given key at a
|
||||
// time. If a duplicate comes in, the duplicate caller waits for the
|
||||
// original to complete and receives the same results.
|
||||
func (g *Group) Do(key string, fn func() (interface{}, error)) (interface{}, error) {
|
||||
g.mu.Lock()
|
||||
if g.m == nil {
|
||||
g.m = make(map[string]*call)
|
||||
}
|
||||
if c, ok := g.m[key]; ok {
|
||||
g.mu.Unlock()
|
||||
c.wg.Wait()
|
||||
return c.val, c.err
|
||||
}
|
||||
c := new(call)
|
||||
c.wg.Add(1)
|
||||
g.m[key] = c
|
||||
g.mu.Unlock()
|
||||
|
||||
c.val, c.err = fn()
|
||||
c.wg.Done()
|
||||
|
||||
g.mu.Lock()
|
||||
delete(g.m, key)
|
||||
g.mu.Unlock()
|
||||
|
||||
return c.val, c.err
|
||||
}
|
10
vendor/github.com/go-redis/redis/internal/util.go
generated
vendored
10
vendor/github.com/go-redis/redis/internal/util.go
generated
vendored
|
@ -27,3 +27,13 @@ func isLower(s string) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func Unwrap(err error) error {
|
||||
u, ok := err.(interface {
|
||||
Unwrap() error
|
||||
})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return u.Unwrap()
|
||||
}
|
||||
|
|
16
vendor/github.com/go-redis/redis/options.go
generated
vendored
16
vendor/github.com/go-redis/redis/options.go
generated
vendored
|
@ -14,6 +14,17 @@ import (
|
|||
"github.com/go-redis/redis/internal/pool"
|
||||
)
|
||||
|
||||
// Limiter is the interface of a rate limiter or a circuit breaker.
|
||||
type Limiter interface {
|
||||
// Allow returns a nil if operation is allowed or an error otherwise.
|
||||
// If operation is allowed client must report the result of operation
|
||||
// whether is a success or a failure.
|
||||
Allow() error
|
||||
// ReportResult reports the result of previously allowed operation.
|
||||
// nil indicates a success, non-nil error indicates a failure.
|
||||
ReportResult(result error)
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
// The network type, either tcp or unix.
|
||||
// Default is tcp.
|
||||
|
@ -48,7 +59,7 @@ type Options struct {
|
|||
// Default is 5 seconds.
|
||||
DialTimeout time.Duration
|
||||
// Timeout for socket reads. If reached, commands will fail
|
||||
// with a timeout instead of blocking.
|
||||
// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
|
||||
// Default is 3 seconds.
|
||||
ReadTimeout time.Duration
|
||||
// Timeout for socket writes. If reached, commands will fail
|
||||
|
@ -90,6 +101,9 @@ func (opt *Options) init() {
|
|||
if opt.Network == "" {
|
||||
opt.Network = "tcp"
|
||||
}
|
||||
if opt.Addr == "" {
|
||||
opt.Addr = "localhost:6379"
|
||||
}
|
||||
if opt.Dialer == nil {
|
||||
opt.Dialer = func() (net.Conn, error) {
|
||||
netDialer := &net.Dialer{
|
||||
|
|
20
vendor/github.com/go-redis/redis/pipeline.go
generated
vendored
20
vendor/github.com/go-redis/redis/pipeline.go
generated
vendored
|
@ -8,8 +8,22 @@ import (
|
|||
|
||||
type pipelineExecer func([]Cmder) error
|
||||
|
||||
// Pipeliner is an mechanism to realise Redis Pipeline technique.
|
||||
//
|
||||
// Pipelining is a technique to extremely speed up processing by packing
|
||||
// operations to batches, send them at once to Redis and read a replies in a
|
||||
// singe step.
|
||||
// See https://redis.io/topics/pipelining
|
||||
//
|
||||
// Pay attention, that Pipeline is not a transaction, so you can get unexpected
|
||||
// results in case of big pipelines and small read/write timeouts.
|
||||
// Redis client has retransmission logic in case of timeouts, pipeline
|
||||
// can be retransmitted and commands can be executed more then once.
|
||||
// To avoid this: it is good idea to use reasonable bigger read/write timeouts
|
||||
// depends of your batch size and/or use TxPipeline.
|
||||
type Pipeliner interface {
|
||||
StatefulCmdable
|
||||
Do(args ...interface{}) *Cmd
|
||||
Process(cmd Cmder) error
|
||||
Close() error
|
||||
Discard() error
|
||||
|
@ -31,6 +45,12 @@ type Pipeline struct {
|
|||
closed bool
|
||||
}
|
||||
|
||||
func (c *Pipeline) Do(args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(args...)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Process queues the cmd for later execution.
|
||||
func (c *Pipeline) Process(cmd Cmder) error {
|
||||
c.mu.Lock()
|
||||
|
|
72
vendor/github.com/go-redis/redis/pubsub.go
generated
vendored
72
vendor/github.com/go-redis/redis/pubsub.go
generated
vendored
|
@ -1,7 +1,9 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -10,7 +12,9 @@ import (
|
|||
"github.com/go-redis/redis/internal/proto"
|
||||
)
|
||||
|
||||
// PubSub implements Pub/Sub commands bas described in
|
||||
var errPingTimeout = errors.New("redis: ping timeout")
|
||||
|
||||
// PubSub implements Pub/Sub commands as described in
|
||||
// http://redis.io/topics/pubsub. Message receiving is NOT safe
|
||||
// for concurrent use by multiple goroutines.
|
||||
//
|
||||
|
@ -26,8 +30,9 @@ type PubSub struct {
|
|||
cn *pool.Conn
|
||||
channels map[string]struct{}
|
||||
patterns map[string]struct{}
|
||||
closed bool
|
||||
exit chan struct{}
|
||||
|
||||
closed bool
|
||||
exit chan struct{}
|
||||
|
||||
cmd *Cmd
|
||||
|
||||
|
@ -36,6 +41,12 @@ type PubSub struct {
|
|||
ping chan struct{}
|
||||
}
|
||||
|
||||
func (c *PubSub) String() string {
|
||||
channels := mapKeys(c.channels)
|
||||
channels = append(channels, mapKeys(c.patterns)...)
|
||||
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
|
||||
}
|
||||
|
||||
func (c *PubSub) init() {
|
||||
c.exit = make(chan struct{})
|
||||
}
|
||||
|
@ -51,7 +62,6 @@ func (c *PubSub) _conn(newChannels []string) (*pool.Conn, error) {
|
|||
if c.closed {
|
||||
return nil, pool.ErrClosed
|
||||
}
|
||||
|
||||
if c.cn != nil {
|
||||
return c.cn, nil
|
||||
}
|
||||
|
@ -387,16 +397,39 @@ func (c *PubSub) ReceiveMessage() (*Message, error) {
|
|||
// It periodically sends Ping messages to test connection health.
|
||||
// The channel is closed with PubSub. Receive* APIs can not be used
|
||||
// after channel is created.
|
||||
//
|
||||
// If the Go channel is full for 30 seconds the message is dropped.
|
||||
func (c *PubSub) Channel() <-chan *Message {
|
||||
c.chOnce.Do(c.initChannel)
|
||||
return c.channel(100)
|
||||
}
|
||||
|
||||
// ChannelSize is like Channel, but creates a Go channel
|
||||
// with specified buffer size.
|
||||
func (c *PubSub) ChannelSize(size int) <-chan *Message {
|
||||
return c.channel(size)
|
||||
}
|
||||
|
||||
func (c *PubSub) channel(size int) <-chan *Message {
|
||||
c.chOnce.Do(func() {
|
||||
c.initChannel(size)
|
||||
})
|
||||
if cap(c.ch) != size {
|
||||
err := fmt.Errorf("redis: PubSub.Channel is called with different buffer size")
|
||||
panic(err)
|
||||
}
|
||||
return c.ch
|
||||
}
|
||||
|
||||
func (c *PubSub) initChannel() {
|
||||
c.ch = make(chan *Message, 100)
|
||||
c.ping = make(chan struct{}, 10)
|
||||
func (c *PubSub) initChannel(size int) {
|
||||
const timeout = 30 * time.Second
|
||||
|
||||
c.ch = make(chan *Message, size)
|
||||
c.ping = make(chan struct{}, 1)
|
||||
|
||||
go func() {
|
||||
timer := time.NewTimer(timeout)
|
||||
timer.Stop()
|
||||
|
||||
var errCount int
|
||||
for {
|
||||
msg, err := c.Receive()
|
||||
|
@ -411,6 +444,7 @@ func (c *PubSub) initChannel() {
|
|||
errCount++
|
||||
continue
|
||||
}
|
||||
|
||||
errCount = 0
|
||||
|
||||
// Any message is as good as a ping.
|
||||
|
@ -425,21 +459,28 @@ func (c *PubSub) initChannel() {
|
|||
case *Pong:
|
||||
// Ignore.
|
||||
case *Message:
|
||||
c.ch <- msg
|
||||
timer.Reset(timeout)
|
||||
select {
|
||||
case c.ch <- msg:
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
case <-timer.C:
|
||||
internal.Logf(
|
||||
"redis: %s channel is full for %s (message is dropped)",
|
||||
c, timeout)
|
||||
}
|
||||
default:
|
||||
internal.Logf("redis: unknown message: %T", msg)
|
||||
internal.Logf("redis: unknown message type: %T", msg)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
const timeout = 5 * time.Second
|
||||
|
||||
timer := time.NewTimer(timeout)
|
||||
timer.Stop()
|
||||
|
||||
healthy := true
|
||||
var pingErr error
|
||||
for {
|
||||
timer.Reset(timeout)
|
||||
select {
|
||||
|
@ -449,10 +490,13 @@ func (c *PubSub) initChannel() {
|
|||
<-timer.C
|
||||
}
|
||||
case <-timer.C:
|
||||
pingErr = c.Ping()
|
||||
pingErr := c.Ping()
|
||||
if healthy {
|
||||
healthy = false
|
||||
} else {
|
||||
if pingErr == nil {
|
||||
pingErr = errPingTimeout
|
||||
}
|
||||
c.mu.Lock()
|
||||
c._reconnect(pingErr)
|
||||
c.mu.Unlock()
|
||||
|
|
128
vendor/github.com/go-redis/redis/redis.go
generated
vendored
128
vendor/github.com/go-redis/redis/redis.go
generated
vendored
|
@ -26,6 +26,7 @@ func SetLogger(logger *log.Logger) {
|
|||
type baseClient struct {
|
||||
opt *Options
|
||||
connPool pool.Pooler
|
||||
limiter Limiter
|
||||
|
||||
process func(Cmder) error
|
||||
processPipeline func([]Cmder) error
|
||||
|
@ -50,45 +51,80 @@ func (c *baseClient) newConn() (*pool.Conn, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if cn.InitedAt.IsZero() {
|
||||
if err := c.initConn(cn); err != nil {
|
||||
_ = c.connPool.CloseConn(cn)
|
||||
return nil, err
|
||||
}
|
||||
err = c.initConn(cn)
|
||||
if err != nil {
|
||||
_ = c.connPool.CloseConn(cn)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (c *baseClient) getConn() (*pool.Conn, error) {
|
||||
if c.limiter != nil {
|
||||
err := c.limiter.Allow()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
cn, err := c._getConn()
|
||||
if err != nil {
|
||||
if c.limiter != nil {
|
||||
c.limiter.ReportResult(err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (c *baseClient) _getConn() (*pool.Conn, error) {
|
||||
cn, err := c.connPool.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cn.InitedAt.IsZero() {
|
||||
err := c.initConn(cn)
|
||||
if err != nil {
|
||||
c.connPool.Remove(cn)
|
||||
err = c.initConn(cn)
|
||||
if err != nil {
|
||||
c.connPool.Remove(cn, err)
|
||||
if err := internal.Unwrap(err); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (c *baseClient) releaseConn(cn *pool.Conn, err error) bool {
|
||||
if internal.IsBadConn(err, false) {
|
||||
c.connPool.Remove(cn)
|
||||
return false
|
||||
func (c *baseClient) releaseConn(cn *pool.Conn, err error) {
|
||||
if c.limiter != nil {
|
||||
c.limiter.ReportResult(err)
|
||||
}
|
||||
|
||||
c.connPool.Put(cn)
|
||||
return true
|
||||
if internal.IsBadConn(err, false) {
|
||||
c.connPool.Remove(cn, err)
|
||||
} else {
|
||||
c.connPool.Put(cn)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *baseClient) releaseConnStrict(cn *pool.Conn, err error) {
|
||||
if c.limiter != nil {
|
||||
c.limiter.ReportResult(err)
|
||||
}
|
||||
|
||||
if err == nil || internal.IsRedisError(err) {
|
||||
c.connPool.Put(cn)
|
||||
} else {
|
||||
c.connPool.Remove(cn, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *baseClient) initConn(cn *pool.Conn) error {
|
||||
cn.InitedAt = time.Now()
|
||||
if cn.Inited {
|
||||
return nil
|
||||
}
|
||||
cn.Inited = true
|
||||
|
||||
if c.opt.Password == "" &&
|
||||
c.opt.DB == 0 &&
|
||||
|
@ -126,7 +162,7 @@ func (c *baseClient) initConn(cn *pool.Conn) error {
|
|||
// Do creates a Cmd from the args and processes the cmd.
|
||||
func (c *baseClient) Do(args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(args...)
|
||||
c.Process(cmd)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -168,9 +204,7 @@ func (c *baseClient) defaultProcess(cmd Cmder) error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = cn.WithReader(c.cmdTimeout(cmd), func(rd *proto.Reader) error {
|
||||
return cmd.readReply(rd)
|
||||
})
|
||||
err = cn.WithReader(c.cmdTimeout(cmd), cmd.readReply)
|
||||
c.releaseConn(cn, err)
|
||||
if err != nil && internal.IsRetryableError(err, cmd.readTimeout() == nil) {
|
||||
continue
|
||||
|
@ -188,7 +222,11 @@ func (c *baseClient) retryBackoff(attempt int) time.Duration {
|
|||
|
||||
func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
|
||||
if timeout := cmd.readTimeout(); timeout != nil {
|
||||
return readTimeout(*timeout)
|
||||
t := *timeout
|
||||
if t == 0 {
|
||||
return 0
|
||||
}
|
||||
return t + 10*time.Second
|
||||
}
|
||||
return c.opt.ReadTimeout
|
||||
}
|
||||
|
@ -200,7 +238,7 @@ func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
|
|||
func (c *baseClient) Close() error {
|
||||
var firstErr error
|
||||
if c.onClose != nil {
|
||||
if err := c.onClose(); err != nil && firstErr == nil {
|
||||
if err := c.onClose(); err != nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
|
@ -244,12 +282,7 @@ func (c *baseClient) generalProcessPipeline(cmds []Cmder, p pipelineProcessor) e
|
|||
}
|
||||
|
||||
canRetry, err := p(cn, cmds)
|
||||
|
||||
if err == nil || internal.IsRedisError(err) {
|
||||
c.connPool.Put(cn)
|
||||
break
|
||||
}
|
||||
c.connPool.Remove(cn)
|
||||
c.releaseConnStrict(cn, err)
|
||||
|
||||
if !canRetry || !internal.IsRetryableError(err, true) {
|
||||
break
|
||||
|
@ -319,7 +352,7 @@ func txPipelineReadQueued(rd *proto.Reader, cmds []Cmder) error {
|
|||
return err
|
||||
}
|
||||
|
||||
for _ = range cmds {
|
||||
for range cmds {
|
||||
err = statusCmd.readReply(rd)
|
||||
if err != nil && !internal.IsRedisError(err) {
|
||||
return err
|
||||
|
@ -391,12 +424,12 @@ func (c *Client) WithContext(ctx context.Context) *Client {
|
|||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
c2 := c.copy()
|
||||
c2 := c.clone()
|
||||
c2.ctx = ctx
|
||||
return c2
|
||||
}
|
||||
|
||||
func (c *Client) copy() *Client {
|
||||
func (c *Client) clone() *Client {
|
||||
cp := *c
|
||||
cp.init()
|
||||
return &cp
|
||||
|
@ -407,6 +440,11 @@ func (c *Client) Options() *Options {
|
|||
return c.opt
|
||||
}
|
||||
|
||||
func (c *Client) SetLimiter(l Limiter) *Client {
|
||||
c.limiter = l
|
||||
return c
|
||||
}
|
||||
|
||||
type PoolStats pool.Stats
|
||||
|
||||
// PoolStats returns connection pool stats.
|
||||
|
@ -455,6 +493,30 @@ func (c *Client) pubSub() *PubSub {
|
|||
|
||||
// Subscribe subscribes the client to the specified channels.
|
||||
// Channels can be omitted to create empty subscription.
|
||||
// Note that this method does not wait on a response from Redis, so the
|
||||
// subscription may not be active immediately. To force the connection to wait,
|
||||
// you may call the Receive() method on the returned *PubSub like so:
|
||||
//
|
||||
// sub := client.Subscribe(queryResp)
|
||||
// iface, err := sub.Receive()
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// // Should be *Subscription, but others are possible if other actions have been
|
||||
// // taken on sub since it was created.
|
||||
// switch iface.(type) {
|
||||
// case *Subscription:
|
||||
// // subscribe succeeded
|
||||
// case *Message:
|
||||
// // received first message
|
||||
// case *Pong:
|
||||
// // pong received
|
||||
// default:
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// ch := sub.Channel()
|
||||
func (c *Client) Subscribe(channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
|
@ -482,10 +544,12 @@ type Conn struct {
|
|||
}
|
||||
|
||||
func newConn(opt *Options, cn *pool.Conn) *Conn {
|
||||
connPool := pool.NewSingleConnPool(nil)
|
||||
connPool.SetConn(cn)
|
||||
c := Conn{
|
||||
baseClient: baseClient{
|
||||
opt: opt,
|
||||
connPool: pool.NewSingleConnPool(cn),
|
||||
connPool: connPool,
|
||||
},
|
||||
}
|
||||
c.baseClient.init()
|
||||
|
|
143
vendor/github.com/go-redis/redis/ring.go
generated
vendored
143
vendor/github.com/go-redis/redis/ring.go
generated
vendored
|
@ -273,9 +273,13 @@ func (c *ringShards) Heartbeat(frequency time.Duration) {
|
|||
|
||||
// rebalance removes dead shards from the Ring.
|
||||
func (c *ringShards) rebalance() {
|
||||
c.mu.RLock()
|
||||
shards := c.shards
|
||||
c.mu.RUnlock()
|
||||
|
||||
hash := newConsistentHash(c.opt)
|
||||
var shardsNum int
|
||||
for name, shard := range c.shards {
|
||||
for name, shard := range shards {
|
||||
if shard.IsUp() {
|
||||
hash.Add(name)
|
||||
shardsNum++
|
||||
|
@ -319,12 +323,12 @@ func (c *ringShards) Close() error {
|
|||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Ring is a Redis client that uses constistent hashing to distribute
|
||||
// Ring is a Redis client that uses consistent hashing to distribute
|
||||
// keys across multiple Redis servers (shards). It's safe for
|
||||
// concurrent use by multiple goroutines.
|
||||
//
|
||||
// Ring monitors the state of each shard and removes dead shards from
|
||||
// the ring. When shard comes online it is added back to the ring. This
|
||||
// the ring. When a shard comes online it is added back to the ring. This
|
||||
// gives you maximum availability and partition tolerance, but no
|
||||
// consistency between different shards or even clients. Each client
|
||||
// uses shards that are available to the client and does not do any
|
||||
|
@ -342,6 +346,7 @@ type Ring struct {
|
|||
shards *ringShards
|
||||
cmdsInfoCache *cmdsInfoCache
|
||||
|
||||
process func(Cmder) error
|
||||
processPipeline func([]Cmder) error
|
||||
}
|
||||
|
||||
|
@ -354,8 +359,10 @@ func NewRing(opt *RingOptions) *Ring {
|
|||
}
|
||||
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
|
||||
|
||||
ring.process = ring.defaultProcess
|
||||
ring.processPipeline = ring.defaultProcessPipeline
|
||||
ring.cmdable.setProcessor(ring.Process)
|
||||
|
||||
ring.init()
|
||||
|
||||
for name, addr := range opt.Addrs {
|
||||
clopt := opt.clientOptions()
|
||||
|
@ -368,6 +375,10 @@ func NewRing(opt *RingOptions) *Ring {
|
|||
return ring
|
||||
}
|
||||
|
||||
func (c *Ring) init() {
|
||||
c.cmdable.setProcessor(c.Process)
|
||||
}
|
||||
|
||||
func (c *Ring) Context() context.Context {
|
||||
if c.ctx != nil {
|
||||
return c.ctx
|
||||
|
@ -379,13 +390,15 @@ func (c *Ring) WithContext(ctx context.Context) *Ring {
|
|||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
c2 := c.copy()
|
||||
c2 := c.clone()
|
||||
c2.ctx = ctx
|
||||
return c2
|
||||
}
|
||||
|
||||
func (c *Ring) copy() *Ring {
|
||||
func (c *Ring) clone() *Ring {
|
||||
cp := *c
|
||||
cp.init()
|
||||
|
||||
return &cp
|
||||
}
|
||||
|
||||
|
@ -526,19 +539,34 @@ func (c *Ring) Do(args ...interface{}) *Cmd {
|
|||
func (c *Ring) WrapProcess(
|
||||
fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error,
|
||||
) {
|
||||
c.ForEachShard(func(c *Client) error {
|
||||
c.WrapProcess(fn)
|
||||
return nil
|
||||
})
|
||||
c.process = fn(c.process)
|
||||
}
|
||||
|
||||
func (c *Ring) Process(cmd Cmder) error {
|
||||
shard, err := c.cmdShard(cmd)
|
||||
if err != nil {
|
||||
cmd.setErr(err)
|
||||
return err
|
||||
return c.process(cmd)
|
||||
}
|
||||
|
||||
func (c *Ring) defaultProcess(cmd Cmder) error {
|
||||
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
|
||||
if attempt > 0 {
|
||||
time.Sleep(c.retryBackoff(attempt))
|
||||
}
|
||||
|
||||
shard, err := c.cmdShard(cmd)
|
||||
if err != nil {
|
||||
cmd.setErr(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = shard.Client.Process(cmd)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !internal.IsRetryableError(err, cmd.readTimeout() == nil) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return shard.Client.Process(cmd)
|
||||
return cmd.Err()
|
||||
}
|
||||
|
||||
func (c *Ring) Pipeline() Pipeliner {
|
||||
|
@ -575,36 +603,42 @@ func (c *Ring) defaultProcessPipeline(cmds []Cmder) error {
|
|||
time.Sleep(c.retryBackoff(attempt))
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
var failedCmdsMap map[string][]Cmder
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for hash, cmds := range cmdsMap {
|
||||
shard, err := c.shards.GetByHash(hash)
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(hash string, cmds []Cmder) {
|
||||
defer wg.Done()
|
||||
|
||||
cn, err := shard.Client.getConn()
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
continue
|
||||
}
|
||||
|
||||
canRetry, err := shard.Client.pipelineProcessCmds(cn, cmds)
|
||||
if err == nil || internal.IsRedisError(err) {
|
||||
shard.Client.connPool.Put(cn)
|
||||
continue
|
||||
}
|
||||
shard.Client.connPool.Remove(cn)
|
||||
|
||||
if canRetry && internal.IsRetryableError(err, true) {
|
||||
if failedCmdsMap == nil {
|
||||
failedCmdsMap = make(map[string][]Cmder)
|
||||
shard, err := c.shards.GetByHash(hash)
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return
|
||||
}
|
||||
failedCmdsMap[hash] = cmds
|
||||
}
|
||||
|
||||
cn, err := shard.Client.getConn()
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return
|
||||
}
|
||||
|
||||
canRetry, err := shard.Client.pipelineProcessCmds(cn, cmds)
|
||||
shard.Client.releaseConnStrict(cn, err)
|
||||
|
||||
if canRetry && internal.IsRetryableError(err, true) {
|
||||
mu.Lock()
|
||||
if failedCmdsMap == nil {
|
||||
failedCmdsMap = make(map[string][]Cmder)
|
||||
}
|
||||
failedCmdsMap[hash] = cmds
|
||||
mu.Unlock()
|
||||
}
|
||||
}(hash, cmds)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if len(failedCmdsMap) == 0 {
|
||||
break
|
||||
}
|
||||
|
@ -630,6 +664,39 @@ func (c *Ring) Close() error {
|
|||
return c.shards.Close()
|
||||
}
|
||||
|
||||
func (c *Ring) Watch(fn func(*Tx) error, keys ...string) error {
|
||||
if len(keys) == 0 {
|
||||
return fmt.Errorf("redis: Watch requires at least one key")
|
||||
}
|
||||
|
||||
var shards []*ringShard
|
||||
for _, key := range keys {
|
||||
if key != "" {
|
||||
shard, err := c.shards.GetByKey(hashtag.Key(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
shards = append(shards, shard)
|
||||
}
|
||||
}
|
||||
|
||||
if len(shards) == 0 {
|
||||
return fmt.Errorf("redis: Watch requires at least one shard")
|
||||
}
|
||||
|
||||
if len(shards) > 1 {
|
||||
for _, shard := range shards[1:] {
|
||||
if shard.Client != shards[0].Client {
|
||||
err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return shards[0].Client.Watch(fn, keys...)
|
||||
}
|
||||
|
||||
func newConsistentHash(opt *RingOptions) *consistenthash.Map {
|
||||
return consistenthash.New(opt.HashReplicas, consistenthash.Hash(opt.Hash))
|
||||
}
|
||||
|
|
272
vendor/github.com/go-redis/redis/sentinel.go
generated
vendored
272
vendor/github.com/go-redis/redis/sentinel.go
generated
vendored
|
@ -90,9 +90,7 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
|
|||
opt: opt,
|
||||
connPool: failover.Pool(),
|
||||
|
||||
onClose: func() error {
|
||||
return failover.Close()
|
||||
},
|
||||
onClose: failover.Close,
|
||||
},
|
||||
}
|
||||
c.baseClient.init()
|
||||
|
@ -119,7 +117,7 @@ func NewSentinelClient(opt *Options) *SentinelClient {
|
|||
return c
|
||||
}
|
||||
|
||||
func (c *SentinelClient) PubSub() *PubSub {
|
||||
func (c *SentinelClient) pubSub() *PubSub {
|
||||
pubsub := &PubSub{
|
||||
opt: c.opt,
|
||||
|
||||
|
@ -132,14 +130,67 @@ func (c *SentinelClient) PubSub() *PubSub {
|
|||
return pubsub
|
||||
}
|
||||
|
||||
// Subscribe subscribes the client to the specified channels.
|
||||
// Channels can be omitted to create empty subscription.
|
||||
func (c *SentinelClient) Subscribe(channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.Subscribe(channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// PSubscribe subscribes the client to the given patterns.
|
||||
// Patterns can be omitted to create empty subscription.
|
||||
func (c *SentinelClient) PSubscribe(channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.PSubscribe(channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
func (c *SentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
|
||||
cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name)
|
||||
cmd := NewStringSliceCmd("sentinel", "get-master-addr-by-name", name)
|
||||
c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *SentinelClient) Sentinels(name string) *SliceCmd {
|
||||
cmd := NewSliceCmd("SENTINEL", "sentinels", name)
|
||||
cmd := NewSliceCmd("sentinel", "sentinels", name)
|
||||
c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Failover forces a failover as if the master was not reachable, and without
|
||||
// asking for agreement to other Sentinels.
|
||||
func (c *SentinelClient) Failover(name string) *StatusCmd {
|
||||
cmd := NewStatusCmd("sentinel", "failover", name)
|
||||
c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Reset resets all the masters with matching name. The pattern argument is a
|
||||
// glob-style pattern. The reset process clears any previous state in a master
|
||||
// (including a failover in progress), and removes every slave and sentinel
|
||||
// already discovered and associated with the master.
|
||||
func (c *SentinelClient) Reset(pattern string) *IntCmd {
|
||||
cmd := NewIntCmd("sentinel", "reset", pattern)
|
||||
c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// FlushConfig forces Sentinel to rewrite its configuration on disk, including
|
||||
// the current Sentinel state.
|
||||
func (c *SentinelClient) FlushConfig() *StatusCmd {
|
||||
cmd := NewStatusCmd("sentinel", "flushconfig")
|
||||
c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Master shows the state and info of the specified master.
|
||||
func (c *SentinelClient) Master(name string) *StringStringMapCmd {
|
||||
cmd := NewStringStringMapCmd("sentinel", "master", name)
|
||||
c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
@ -156,79 +207,92 @@ type sentinelFailover struct {
|
|||
masterName string
|
||||
_masterAddr string
|
||||
sentinel *SentinelClient
|
||||
pubsub *PubSub
|
||||
}
|
||||
|
||||
func (d *sentinelFailover) Close() error {
|
||||
return d.resetSentinel()
|
||||
func (c *sentinelFailover) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.sentinel != nil {
|
||||
return c.closeSentinel()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *sentinelFailover) Pool() *pool.ConnPool {
|
||||
d.poolOnce.Do(func() {
|
||||
d.opt.Dialer = d.dial
|
||||
d.pool = newConnPool(d.opt)
|
||||
func (c *sentinelFailover) Pool() *pool.ConnPool {
|
||||
c.poolOnce.Do(func() {
|
||||
c.opt.Dialer = c.dial
|
||||
c.pool = newConnPool(c.opt)
|
||||
})
|
||||
return d.pool
|
||||
return c.pool
|
||||
}
|
||||
|
||||
func (d *sentinelFailover) dial() (net.Conn, error) {
|
||||
addr, err := d.MasterAddr()
|
||||
func (c *sentinelFailover) dial() (net.Conn, error) {
|
||||
addr, err := c.MasterAddr()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return net.DialTimeout("tcp", addr, d.opt.DialTimeout)
|
||||
return net.DialTimeout("tcp", addr, c.opt.DialTimeout)
|
||||
}
|
||||
|
||||
func (d *sentinelFailover) MasterAddr() (string, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
addr, err := d.masterAddr()
|
||||
func (c *sentinelFailover) MasterAddr() (string, error) {
|
||||
addr, err := c.masterAddr()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
d._switchMaster(addr)
|
||||
|
||||
c.switchMaster(addr)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func (d *sentinelFailover) masterAddr() (string, error) {
|
||||
// Try last working sentinel.
|
||||
if d.sentinel != nil {
|
||||
addr, err := d.sentinel.GetMasterAddrByName(d.masterName).Result()
|
||||
if err == nil {
|
||||
addr := net.JoinHostPort(addr[0], addr[1])
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
internal.Logf("sentinel: GetMasterAddrByName name=%q failed: %s",
|
||||
d.masterName, err)
|
||||
d._resetSentinel()
|
||||
func (c *sentinelFailover) masterAddr() (string, error) {
|
||||
c.mu.RLock()
|
||||
addr := c.getMasterAddr()
|
||||
c.mu.RUnlock()
|
||||
if addr != "" {
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
for i, sentinelAddr := range d.sentinelAddrs {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
addr = c.getMasterAddr()
|
||||
if addr != "" {
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
if c.sentinel != nil {
|
||||
c.closeSentinel()
|
||||
}
|
||||
|
||||
for i, sentinelAddr := range c.sentinelAddrs {
|
||||
sentinel := NewSentinelClient(&Options{
|
||||
Addr: sentinelAddr,
|
||||
|
||||
DialTimeout: d.opt.DialTimeout,
|
||||
ReadTimeout: d.opt.ReadTimeout,
|
||||
WriteTimeout: d.opt.WriteTimeout,
|
||||
MaxRetries: c.opt.MaxRetries,
|
||||
|
||||
PoolSize: d.opt.PoolSize,
|
||||
PoolTimeout: d.opt.PoolTimeout,
|
||||
IdleTimeout: d.opt.IdleTimeout,
|
||||
DialTimeout: c.opt.DialTimeout,
|
||||
ReadTimeout: c.opt.ReadTimeout,
|
||||
WriteTimeout: c.opt.WriteTimeout,
|
||||
|
||||
PoolSize: c.opt.PoolSize,
|
||||
PoolTimeout: c.opt.PoolTimeout,
|
||||
IdleTimeout: c.opt.IdleTimeout,
|
||||
IdleCheckFrequency: c.opt.IdleCheckFrequency,
|
||||
|
||||
TLSConfig: c.opt.TLSConfig,
|
||||
})
|
||||
|
||||
masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result()
|
||||
masterAddr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
|
||||
if err != nil {
|
||||
internal.Logf("sentinel: GetMasterAddrByName master=%q failed: %s",
|
||||
d.masterName, err)
|
||||
sentinel.Close()
|
||||
c.masterName, err)
|
||||
_ = sentinel.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Push working sentinel to the top.
|
||||
d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0]
|
||||
d.setSentinel(sentinel)
|
||||
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
|
||||
c.setSentinel(sentinel)
|
||||
|
||||
addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
|
||||
return addr, nil
|
||||
|
@ -237,17 +301,34 @@ func (d *sentinelFailover) masterAddr() (string, error) {
|
|||
return "", errors.New("redis: all sentinels are unreachable")
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) switchMaster(addr string) {
|
||||
c.mu.Lock()
|
||||
c._switchMaster(addr)
|
||||
c.mu.Unlock()
|
||||
func (c *sentinelFailover) getMasterAddr() string {
|
||||
sentinel := c.sentinel
|
||||
|
||||
if sentinel == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
addr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
|
||||
if err != nil {
|
||||
internal.Logf("sentinel: GetMasterAddrByName name=%q failed: %s",
|
||||
c.masterName, err)
|
||||
return ""
|
||||
}
|
||||
|
||||
return net.JoinHostPort(addr[0], addr[1])
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) _switchMaster(addr string) {
|
||||
if c._masterAddr == addr {
|
||||
func (c *sentinelFailover) switchMaster(addr string) {
|
||||
c.mu.RLock()
|
||||
masterAddr := c._masterAddr
|
||||
c.mu.RUnlock()
|
||||
if masterAddr == addr {
|
||||
return
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
internal.Logf("sentinel: new master=%q addr=%q",
|
||||
c.masterName, addr)
|
||||
_ = c.Pool().Filter(func(cn *pool.Conn) bool {
|
||||
|
@ -256,32 +337,36 @@ func (c *sentinelFailover) _switchMaster(addr string) {
|
|||
c._masterAddr = addr
|
||||
}
|
||||
|
||||
func (d *sentinelFailover) setSentinel(sentinel *SentinelClient) {
|
||||
d.discoverSentinels(sentinel)
|
||||
d.sentinel = sentinel
|
||||
go d.listen(sentinel)
|
||||
func (c *sentinelFailover) setSentinel(sentinel *SentinelClient) {
|
||||
c.discoverSentinels(sentinel)
|
||||
c.sentinel = sentinel
|
||||
|
||||
c.pubsub = sentinel.Subscribe("+switch-master")
|
||||
go c.listen(c.pubsub)
|
||||
}
|
||||
|
||||
func (d *sentinelFailover) resetSentinel() error {
|
||||
var err error
|
||||
d.mu.Lock()
|
||||
if d.sentinel != nil {
|
||||
err = d._resetSentinel()
|
||||
func (c *sentinelFailover) closeSentinel() error {
|
||||
var firstErr error
|
||||
|
||||
err := c.pubsub.Close()
|
||||
if err != nil && firstErr == err {
|
||||
firstErr = err
|
||||
}
|
||||
d.mu.Unlock()
|
||||
return err
|
||||
c.pubsub = nil
|
||||
|
||||
err = c.sentinel.Close()
|
||||
if err != nil && firstErr == err {
|
||||
firstErr = err
|
||||
}
|
||||
c.sentinel = nil
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
func (d *sentinelFailover) _resetSentinel() error {
|
||||
err := d.sentinel.Close()
|
||||
d.sentinel = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *sentinelFailover) discoverSentinels(sentinel *SentinelClient) {
|
||||
sentinels, err := sentinel.Sentinels(d.masterName).Result()
|
||||
func (c *sentinelFailover) discoverSentinels(sentinel *SentinelClient) {
|
||||
sentinels, err := sentinel.Sentinels(c.masterName).Result()
|
||||
if err != nil {
|
||||
internal.Logf("sentinel: Sentinels master=%q failed: %s", d.masterName, err)
|
||||
internal.Logf("sentinel: Sentinels master=%q failed: %s", c.masterName, err)
|
||||
return
|
||||
}
|
||||
for _, sentinel := range sentinels {
|
||||
|
@ -290,49 +375,32 @@ func (d *sentinelFailover) discoverSentinels(sentinel *SentinelClient) {
|
|||
key := vals[i].(string)
|
||||
if key == "name" {
|
||||
sentinelAddr := vals[i+1].(string)
|
||||
if !contains(d.sentinelAddrs, sentinelAddr) {
|
||||
internal.Logf(
|
||||
"sentinel: discovered new sentinel=%q for master=%q",
|
||||
sentinelAddr, d.masterName,
|
||||
)
|
||||
d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr)
|
||||
if !contains(c.sentinelAddrs, sentinelAddr) {
|
||||
internal.Logf("sentinel: discovered new sentinel=%q for master=%q",
|
||||
sentinelAddr, c.masterName)
|
||||
c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *sentinelFailover) listen(sentinel *SentinelClient) {
|
||||
pubsub := sentinel.PubSub()
|
||||
defer pubsub.Close()
|
||||
|
||||
err := pubsub.Subscribe("+switch-master")
|
||||
if err != nil {
|
||||
internal.Logf("sentinel: Subscribe failed: %s", err)
|
||||
d.resetSentinel()
|
||||
return
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) listen(pubsub *PubSub) {
|
||||
ch := pubsub.Channel()
|
||||
for {
|
||||
msg, err := pubsub.ReceiveMessage()
|
||||
if err != nil {
|
||||
if err == pool.ErrClosed {
|
||||
d.resetSentinel()
|
||||
return
|
||||
}
|
||||
internal.Logf("sentinel: ReceiveMessage failed: %s", err)
|
||||
continue
|
||||
msg, ok := <-ch
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
switch msg.Channel {
|
||||
case "+switch-master":
|
||||
if msg.Channel == "+switch-master" {
|
||||
parts := strings.Split(msg.Payload, " ")
|
||||
if parts[0] != d.masterName {
|
||||
if parts[0] != c.masterName {
|
||||
internal.Logf("sentinel: ignore addr for master=%q", parts[0])
|
||||
continue
|
||||
}
|
||||
addr := net.JoinHostPort(parts[3], parts[4])
|
||||
d.switchMaster(addr)
|
||||
c.switchMaster(addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
4
vendor/github.com/go-redis/redis/tx.go
generated
vendored
4
vendor/github.com/go-redis/redis/tx.go
generated
vendored
|
@ -29,10 +29,10 @@ func (c *Client) newTx() *Tx {
|
|||
return &tx
|
||||
}
|
||||
|
||||
// Watch prepares a transcaction and marks the keys to be watched
|
||||
// Watch prepares a transaction and marks the keys to be watched
|
||||
// for conditional execution if there are any keys.
|
||||
//
|
||||
// The transaction is automatically closed when the fn exits.
|
||||
// The transaction is automatically closed when fn exits.
|
||||
func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
|
||||
tx := c.newTx()
|
||||
if len(keys) > 0 {
|
||||
|
|
1
vendor/github.com/go-redis/redis/universal.go
generated
vendored
1
vendor/github.com/go-redis/redis/universal.go
generated
vendored
|
@ -155,6 +155,7 @@ type UniversalClient interface {
|
|||
Watch(fn func(*Tx) error, keys ...string) error
|
||||
Process(cmd Cmder) error
|
||||
WrapProcess(fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error)
|
||||
WrapProcessPipeline(fn func(oldProcess func([]Cmder) error) func([]Cmder) error)
|
||||
Subscribe(channels ...string) *PubSub
|
||||
PSubscribe(channels ...string) *PubSub
|
||||
Close() error
|
||||
|
|
2
vendor/github.com/hashicorp/hcl/go.mod
generated
vendored
2
vendor/github.com/hashicorp/hcl/go.mod
generated
vendored
|
@ -1,3 +1,5 @@
|
|||
module github.com/hashicorp/hcl
|
||||
|
||||
require github.com/davecgh/go-spew v1.1.1
|
||||
|
||||
go 1.13
|
||||
|
|
2
vendor/github.com/spf13/afero/go.mod
generated
vendored
2
vendor/github.com/spf13/afero/go.mod
generated
vendored
|
@ -1,3 +1,5 @@
|
|||
module github.com/spf13/afero
|
||||
|
||||
require golang.org/x/text v0.3.0
|
||||
|
||||
go 1.13
|
||||
|
|
3
vendor/modules.txt
vendored
3
vendor/modules.txt
vendored
|
@ -60,14 +60,13 @@ github.com/go-openapi/jsonreference
|
|||
github.com/go-openapi/spec
|
||||
# github.com/go-openapi/swag v0.19.5
|
||||
github.com/go-openapi/swag
|
||||
# github.com/go-redis/redis v6.14.0+incompatible
|
||||
# github.com/go-redis/redis v6.15.7+incompatible
|
||||
github.com/go-redis/redis
|
||||
github.com/go-redis/redis/internal
|
||||
github.com/go-redis/redis/internal/consistenthash
|
||||
github.com/go-redis/redis/internal/hashtag
|
||||
github.com/go-redis/redis/internal/pool
|
||||
github.com/go-redis/redis/internal/proto
|
||||
github.com/go-redis/redis/internal/singleflight
|
||||
github.com/go-redis/redis/internal/util
|
||||
# github.com/go-sql-driver/mysql v1.5.0
|
||||
github.com/go-sql-driver/mysql
|
||||
|
|
Loading…
Reference in a new issue