[Vendor] update macaron related (#13409)

* Vendor: update gitea.com/macaron/session to a177a270

* make vendor

* Vendor: update gitea.com/macaron/macaron to 0db5d458

* make vendor

* Vendor: update gitea.com/macaron/cache to 905232fb

* make vendor

* Vendor: update gitea.com/macaron/i18n to 4ca3dd0c

* make vendor

* Vendor: update gitea.com/macaron/gzip to efa5e847

* make vendor

* Vendor: update gitea.com/macaron/captcha to e8597820

* make vendor
This commit is contained in:
6543 2020-11-03 07:04:09 +01:00 committed by GitHub
parent b687707014
commit 70ea2300ca
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
118 changed files with 14557 additions and 6115 deletions

18
go.mod
View file

@ -7,15 +7,15 @@ require (
code.gitea.io/sdk/gitea v0.13.1 code.gitea.io/sdk/gitea v0.13.1
gitea.com/lunny/levelqueue v0.3.0 gitea.com/lunny/levelqueue v0.3.0
gitea.com/macaron/binding v0.0.0-20190822013154-a5f53841ed2b gitea.com/macaron/binding v0.0.0-20190822013154-a5f53841ed2b
gitea.com/macaron/cache v0.0.0-20190822004001-a6e7fee4ee76 gitea.com/macaron/cache v0.0.0-20200924044943-905232fba10b
gitea.com/macaron/captcha v0.0.0-20190822015246-daa973478bae gitea.com/macaron/captcha v0.0.0-20200825161008-e8597820aaca
gitea.com/macaron/cors v0.0.0-20190826180238-95aec09ea8b4 gitea.com/macaron/cors v0.0.0-20190826180238-95aec09ea8b4
gitea.com/macaron/csrf v0.0.0-20190822024205-3dc5a4474439 gitea.com/macaron/csrf v0.0.0-20190822024205-3dc5a4474439
gitea.com/macaron/gzip v0.0.0-20200827120000-efa5e8477cf5 gitea.com/macaron/gzip v0.0.0-20200827120000-efa5e8477cf5
gitea.com/macaron/i18n v0.0.0-20200910171939-7bbf54aa4c76 gitea.com/macaron/i18n v0.0.0-20200911004404-4ca3dd0cbd60
gitea.com/macaron/inject v0.0.0-20190805023432-d4c86e31027a gitea.com/macaron/inject v0.0.0-20190805023432-d4c86e31027a
gitea.com/macaron/macaron v1.5.0 gitea.com/macaron/macaron v1.5.1-0.20201027213641-0db5d4584804
gitea.com/macaron/session v0.0.0-20200902202411-e3a87877db6e gitea.com/macaron/session v0.0.0-20201103015045-a177a2701dee
gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7 gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7
github.com/PuerkitoBio/goquery v1.5.1 github.com/PuerkitoBio/goquery v1.5.1
github.com/RoaringBitmap/roaring v0.5.1 // indirect github.com/RoaringBitmap/roaring v0.5.1 // indirect
@ -23,7 +23,6 @@ require (
github.com/andybalholm/brotli v1.0.1 // indirect github.com/andybalholm/brotli v1.0.1 // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
github.com/blevesearch/bleve v1.0.12 github.com/blevesearch/bleve v1.0.12
github.com/couchbase/gomemcached v0.0.0-20191004160342-7b5da2ec40b2 // indirect
github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect
@ -50,7 +49,6 @@ require (
github.com/gobwas/glob v0.2.3 github.com/gobwas/glob v0.2.3
github.com/gogs/chardet v0.0.0-20191104214054-4b6791f73a28 github.com/gogs/chardet v0.0.0-20191104214054-4b6791f73a28
github.com/gogs/cron v0.0.0-20171120032916-9f6c956d3e14 github.com/gogs/cron v0.0.0-20171120032916-9f6c956d3e14
github.com/golang/snappy v0.0.2 // indirect
github.com/google/go-github/v32 v32.1.0 github.com/google/go-github/v32 v32.1.0
github.com/google/uuid v1.1.2 github.com/google/uuid v1.1.2
github.com/gorilla/context v1.1.1 github.com/gorilla/context v1.1.1
@ -64,7 +62,7 @@ require (
github.com/jmhodges/levigo v1.0.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect
github.com/kballard/go-shellquote v0.0.0-20170619183022-cd60e84ee657 github.com/kballard/go-shellquote v0.0.0-20170619183022-cd60e84ee657
github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4 github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4
github.com/klauspost/compress v1.11.1 github.com/klauspost/compress v1.11.2
github.com/klauspost/pgzip v1.2.5 // indirect github.com/klauspost/pgzip v1.2.5 // indirect
github.com/lafriks/xormstore v1.3.2 github.com/lafriks/xormstore v1.3.2
github.com/lib/pq v1.8.1-0.20200908161135-083382b7e6fc github.com/lib/pq v1.8.1-0.20200908161135-083382b7e6fc
@ -111,11 +109,11 @@ require (
github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60 github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60
go.jolheiser.com/hcaptcha v0.0.4 go.jolheiser.com/hcaptcha v0.0.4
go.jolheiser.com/pwn v0.0.3 go.jolheiser.com/pwn v0.0.3
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb golang.org/x/net v0.0.0-20201010224723-4f7140c49acb
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211
golang.org/x/text v0.3.3 golang.org/x/text v0.3.4
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
golang.org/x/tools v0.0.0-20200929161345-d7fc70abf50f golang.org/x/tools v0.0.0-20200929161345-d7fc70abf50f
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect

40
go.sum
View file

@ -43,20 +43,26 @@ code.gitea.io/sdk/gitea v0.13.1/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUr
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
gitea.com/lunny/levelqueue v0.3.0 h1:MHn1GuSZkxvVEDMyAPqlc7A3cOW+q8RcGhRgH/xtm6I= gitea.com/lunny/levelqueue v0.3.0 h1:MHn1GuSZkxvVEDMyAPqlc7A3cOW+q8RcGhRgH/xtm6I=
gitea.com/lunny/levelqueue v0.3.0/go.mod h1:HBqmLbz56JWpfEGG0prskAV97ATNRoj5LDmPicD22hU= gitea.com/lunny/levelqueue v0.3.0/go.mod h1:HBqmLbz56JWpfEGG0prskAV97ATNRoj5LDmPicD22hU=
gitea.com/lunny/log v0.0.0-20190322053110-01b5df579c4e h1:r1en/D7xJmcY24VkHkjkcJFa+7ZWubVWPBrvsHkmHxk=
gitea.com/lunny/log v0.0.0-20190322053110-01b5df579c4e/go.mod h1:uJEsN4LQpeGYRCjuPXPZBClU7N5pWzGuyF4uqLpE/e0=
gitea.com/lunny/nodb v0.0.0-20200923032308-3238c4655727 h1:ZF2Bd6rqVlwhIDhYiS0uGYcT+GaVNGjuKVJkTNqWMIs=
gitea.com/lunny/nodb v0.0.0-20200923032308-3238c4655727/go.mod h1:h0OwsgcpJLSYtHcM5+Xciw9OEeuxi6ty4HDiO8C7aIY=
gitea.com/macaron/binding v0.0.0-20190822013154-a5f53841ed2b h1:vXt85uYV17KURaUlhU7v4GbCShkqRZDSfo0TkC0YCjQ= gitea.com/macaron/binding v0.0.0-20190822013154-a5f53841ed2b h1:vXt85uYV17KURaUlhU7v4GbCShkqRZDSfo0TkC0YCjQ=
gitea.com/macaron/binding v0.0.0-20190822013154-a5f53841ed2b/go.mod h1:Cxadig6POWpPYYSfg23E7jo35Yf0yvsdC1lifoKWmPo= gitea.com/macaron/binding v0.0.0-20190822013154-a5f53841ed2b/go.mod h1:Cxadig6POWpPYYSfg23E7jo35Yf0yvsdC1lifoKWmPo=
gitea.com/macaron/cache v0.0.0-20190822004001-a6e7fee4ee76 h1:mMsMEg90c5KXQgRWsH8D6GHXfZIW1RAe5S9VYIb12lM= gitea.com/macaron/cache v0.0.0-20190822004001-a6e7fee4ee76 h1:mMsMEg90c5KXQgRWsH8D6GHXfZIW1RAe5S9VYIb12lM=
gitea.com/macaron/cache v0.0.0-20190822004001-a6e7fee4ee76/go.mod h1:NFHb9Of+LUnU86bU20CiXXg6ZlgCJ4XytP14UsHOXFs= gitea.com/macaron/cache v0.0.0-20190822004001-a6e7fee4ee76/go.mod h1:NFHb9Of+LUnU86bU20CiXXg6ZlgCJ4XytP14UsHOXFs=
gitea.com/macaron/captcha v0.0.0-20190822015246-daa973478bae h1:9C31eOCpMPbW9rDVq8M1UJ+5HZVYA38HHaKCVcRYDpI= gitea.com/macaron/cache v0.0.0-20200924044943-905232fba10b h1:2ZE0JE3bKVBcP1VTrWeE1jqWwCAMIzfOQm1U9EGbBKU=
gitea.com/macaron/captcha v0.0.0-20190822015246-daa973478bae/go.mod h1:J5h3N+1nKTXtU1x4GxexaQKgAz8UiWecNwi/CfX7CtQ= gitea.com/macaron/cache v0.0.0-20200924044943-905232fba10b/go.mod h1:W5hKG8T1GBfypp5CRQlgoJU4figIL0jhx02y4XA/NOA=
gitea.com/macaron/captcha v0.0.0-20200825161008-e8597820aaca h1:f5P41nXmXd/YOh8f6098Q0F1Y0QfpyRPSSIkni2XH4Q=
gitea.com/macaron/captcha v0.0.0-20200825161008-e8597820aaca/go.mod h1:J5h3N+1nKTXtU1x4GxexaQKgAz8UiWecNwi/CfX7CtQ=
gitea.com/macaron/cors v0.0.0-20190826180238-95aec09ea8b4 h1:e2rAFDejB0qN8OrY4xP4XSu8/yT6QmWxDZpB3J7r2GU= gitea.com/macaron/cors v0.0.0-20190826180238-95aec09ea8b4 h1:e2rAFDejB0qN8OrY4xP4XSu8/yT6QmWxDZpB3J7r2GU=
gitea.com/macaron/cors v0.0.0-20190826180238-95aec09ea8b4/go.mod h1:rtOK4J20kpMD9XcNsnO5YA843YSTe/MUMbDj/TJ/Q7A= gitea.com/macaron/cors v0.0.0-20190826180238-95aec09ea8b4/go.mod h1:rtOK4J20kpMD9XcNsnO5YA843YSTe/MUMbDj/TJ/Q7A=
gitea.com/macaron/csrf v0.0.0-20190822024205-3dc5a4474439 h1:88c34YM29a1GlWLrLBaG/GTT2htDdJz1u3n9+lmPolg= gitea.com/macaron/csrf v0.0.0-20190822024205-3dc5a4474439 h1:88c34YM29a1GlWLrLBaG/GTT2htDdJz1u3n9+lmPolg=
gitea.com/macaron/csrf v0.0.0-20190822024205-3dc5a4474439/go.mod h1:IsQPHx73HnnqFBYiVHjg87q4XBZyGXXu77xANukvZuk= gitea.com/macaron/csrf v0.0.0-20190822024205-3dc5a4474439/go.mod h1:IsQPHx73HnnqFBYiVHjg87q4XBZyGXXu77xANukvZuk=
gitea.com/macaron/gzip v0.0.0-20200827120000-efa5e8477cf5 h1:6rbhThlqfOb+sSmhrsVFz3bZoAeoloe7TZqyeiPbbWI= gitea.com/macaron/gzip v0.0.0-20200827120000-efa5e8477cf5 h1:6rbhThlqfOb+sSmhrsVFz3bZoAeoloe7TZqyeiPbbWI=
gitea.com/macaron/gzip v0.0.0-20200827120000-efa5e8477cf5/go.mod h1:z8vCjuhqDfvzPUJDowGqbsgoeYBvDbl95S5k6y43Pxo= gitea.com/macaron/gzip v0.0.0-20200827120000-efa5e8477cf5/go.mod h1:z8vCjuhqDfvzPUJDowGqbsgoeYBvDbl95S5k6y43Pxo=
gitea.com/macaron/i18n v0.0.0-20200910171939-7bbf54aa4c76 h1:r+z4ExFB3GHAXaGfWz+TMGs5q/RuOzDsTCGiXiAk5AY= gitea.com/macaron/i18n v0.0.0-20200911004404-4ca3dd0cbd60 h1:tNWNe5HBIlsfapFMtT4twTbXQmInRQWmdWNi8Di1ct0=
gitea.com/macaron/i18n v0.0.0-20200910171939-7bbf54aa4c76/go.mod h1:g5ope1b+iWhBdHzAn6EJ9u9Gp3FRESxpG+CDf7HYc/A= gitea.com/macaron/i18n v0.0.0-20200911004404-4ca3dd0cbd60/go.mod h1:g5ope1b+iWhBdHzAn6EJ9u9Gp3FRESxpG+CDf7HYc/A=
gitea.com/macaron/inject v0.0.0-20190803172902-8375ba841591/go.mod h1:h6E4kLao1Yko6DOU6QDnQPcuoNzvbZqzj2mtPcEn1aM= gitea.com/macaron/inject v0.0.0-20190803172902-8375ba841591/go.mod h1:h6E4kLao1Yko6DOU6QDnQPcuoNzvbZqzj2mtPcEn1aM=
gitea.com/macaron/inject v0.0.0-20190805023432-d4c86e31027a h1:aOKEXkDTnh4euoH0so/THLXeHtQuqHmDPb1xEk6Ehok= gitea.com/macaron/inject v0.0.0-20190805023432-d4c86e31027a h1:aOKEXkDTnh4euoH0so/THLXeHtQuqHmDPb1xEk6Ehok=
gitea.com/macaron/inject v0.0.0-20190805023432-d4c86e31027a/go.mod h1:h6E4kLao1Yko6DOU6QDnQPcuoNzvbZqzj2mtPcEn1aM= gitea.com/macaron/inject v0.0.0-20190805023432-d4c86e31027a/go.mod h1:h6E4kLao1Yko6DOU6QDnQPcuoNzvbZqzj2mtPcEn1aM=
@ -64,9 +70,11 @@ gitea.com/macaron/macaron v1.3.3-0.20190803174002-53e005ff4827/go.mod h1:/rvxMjI
gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb/go.mod h1:0coI+mSPSwbsyAbOuFllVS38awuk9mevhLD52l50Gjs= gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb/go.mod h1:0coI+mSPSwbsyAbOuFllVS38awuk9mevhLD52l50Gjs=
gitea.com/macaron/macaron v1.5.0 h1:TvWEcHw1/zaHlo0GTuKEukLh3A99+QsU2mjBrXLXjVQ= gitea.com/macaron/macaron v1.5.0 h1:TvWEcHw1/zaHlo0GTuKEukLh3A99+QsU2mjBrXLXjVQ=
gitea.com/macaron/macaron v1.5.0/go.mod h1:P7hfDbQjcW22lkYkXlxdRIfWOXxH2+K4EogN4Q0UlLY= gitea.com/macaron/macaron v1.5.0/go.mod h1:P7hfDbQjcW22lkYkXlxdRIfWOXxH2+K4EogN4Q0UlLY=
gitea.com/macaron/macaron v1.5.1-0.20201027213641-0db5d4584804 h1:yUiJVZKzdXsBe2tumTAXHBZa1qPGoGXM3fBG4RJ5fQg=
gitea.com/macaron/macaron v1.5.1-0.20201027213641-0db5d4584804/go.mod h1:P7hfDbQjcW22lkYkXlxdRIfWOXxH2+K4EogN4Q0UlLY=
gitea.com/macaron/session v0.0.0-20190821211443-122c47c5f705/go.mod h1:1ujH0jD6Ca4iK9NL0Q2a7fG2chvXx5hVa7hBfABwpkA= gitea.com/macaron/session v0.0.0-20190821211443-122c47c5f705/go.mod h1:1ujH0jD6Ca4iK9NL0Q2a7fG2chvXx5hVa7hBfABwpkA=
gitea.com/macaron/session v0.0.0-20200902202411-e3a87877db6e h1:BHoJ/xWNt6FrVsL54JennM9HPIQlnbmRvmaC5DO65pU= gitea.com/macaron/session v0.0.0-20201103015045-a177a2701dee h1:8/N3a56RXRJ66nnep0z+T7oHCB0bY6lpvtjv9Y9FPhE=
gitea.com/macaron/session v0.0.0-20200902202411-e3a87877db6e/go.mod h1:FanKy3WjWb5iw/iZBPk4ggoQT9FcM6bkBPvmDmsH6tY= gitea.com/macaron/session v0.0.0-20201103015045-a177a2701dee/go.mod h1:5tJCkDbrwpGv+MQUSIZSOW0wFrkh0exsonJgOvBs1Dw=
gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7 h1:N9QFoeNsUXLhl14mefLzGluqV7w2mGU3u+iZU+jCeWk= gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7 h1:N9QFoeNsUXLhl14mefLzGluqV7w2mGU3u+iZU+jCeWk=
gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7/go.mod h1:kgsbFPPS4P+acDYDOPDa3N4IWWOuDJt5/INKRUz7aks= gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7/go.mod h1:kgsbFPPS4P+acDYDOPDa3N4IWWOuDJt5/INKRUz7aks=
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s= gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s=
@ -218,12 +226,14 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k=
github.com/couchbase/go-couchbase v0.0.0-20201026062457-7b3be89bbd89 h1:uNLXQ6QO1TocD8BaN/KkRki0Xw0brCM1PKl/ZA5pgfs=
github.com/couchbase/go-couchbase v0.0.0-20201026062457-7b3be89bbd89/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A=
github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
github.com/couchbase/gomemcached v0.0.0-20191004160342-7b5da2ec40b2 h1:vZryARwW4PSFXd9arwegEywvMTvPuXL3/oa+4L5NTe8= github.com/couchbase/gomemcached v0.1.0 h1:whUde87n8CScx8ckMp2En5liqAlcuG3aKy/BQeBPu84=
github.com/couchbase/gomemcached v0.0.0-20191004160342-7b5da2ec40b2/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/gomemcached v0.1.0/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85 h1:0WMIDtuXCKEm4wtAJgAAXa/qtM5O9MariLwgHaRlYmk= github.com/couchbase/goutils v0.0.0-20201030094643-5e82bb967e67 h1:NCqJ6fwen6YP0WlV/IyibaT0kPt3JEI1rA62V/UPKT4=
github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= github.com/couchbase/goutils v0.0.0-20201030094643-5e82bb967e67/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs=
github.com/couchbase/vellum v1.0.2 h1:BrbP0NKiyDdndMPec8Jjhy0U47CZ0Lgx3xUC2r9rZqw= github.com/couchbase/vellum v1.0.2 h1:BrbP0NKiyDdndMPec8Jjhy0U47CZ0Lgx3xUC2r9rZqw=
github.com/couchbase/vellum v1.0.2/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= github.com/couchbase/vellum v1.0.2/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4=
@ -736,8 +746,8 @@ github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.1 h1:bPb7nMRdOZYDrpPMTA3EInUQrdgoBinqUuSwlGdKDdE= github.com/klauspost/compress v1.11.2 h1:MiK62aErc3gIiVEtyzKfeOHgW7atJb5g/KNX5m3c2nQ=
github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
@ -1240,8 +1250,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee h1:4yd7jl+vXjalO5ztz6Vc1VADv+S/80LGJmyl1ROJ2AI= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E=
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1415,6 +1425,8 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View file

@ -1,12 +1,14 @@
## log ## log
[![GoDoc](https://godoc.org/github.com/lunny/log?status.png)](https://godoc.org/github.com/lunny/log)
[简体中文](https://github.com/lunny/log/blob/master/README_CN.md) [![](https://goreportcard.com/badge/gitea.com/lunny/log)](https://goreportcard.com/report/gitea.com/lunny/log)
[![GoDoc](https://godoc.org/gitea.com/lunny/log?status.png)](https://godoc.org/gitea.com/lunny/log)
[简体中文](https://gitea.com/lunny/log/blob/master/README_CN.md)
# Installation # Installation
``` ```
go get github.com/lunny/log go get gitea.com/lunny/log
``` ```
# Features # Features

View file

@ -1,12 +1,14 @@
## log ## log
[![GoDoc](https://godoc.org/github.com/lunny/log?status.png)](https://godoc.org/github.com/lunny/log)
[English](https://github.com/lunny/log/blob/master/README.md) [![](https://goreportcard.com/badge/gitea.com/lunny/log)](https://goreportcard.com/report/gitea.com/lunny/log)
[![GoDoc](https://godoc.org/gitea.com/lunny/log?status.png)](https://godoc.org/gitea.com/lunny/log)
[English](https://gitea.com/lunny/log/blob/master/README.md)
# 安装 # 安装
``` ```
go get github.com/lunny/log go get gitea.com/lunny/log
``` ```
# 特性 # 特性

5
vendor/gitea.com/lunny/log/go.mod generated vendored Normal file
View file

@ -0,0 +1,5 @@
module gitea.com/lunny/log
go 1.12
require github.com/mattn/go-sqlite3 v1.10.0

2
vendor/gitea.com/lunny/log/go.sum generated vendored Normal file
View file

@ -0,0 +1,2 @@
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=

View file

@ -1,6 +1,6 @@
# NoDB # NoDB
[中文](https://github.com/lunny/nodb/blob/master/README_CN.md) [中文](https://gitea.com/lunny/nodb/blob/master/README_CN.md)
Nodb is a fork of [ledisdb](https://github.com/siddontang/ledisdb) and shrink version. It's get rid of all C or other language codes and only keep Go's. It aims to provide a nosql database library rather than a redis like server. So if you want a redis like server, ledisdb is the best choose. Nodb is a fork of [ledisdb](https://github.com/siddontang/ledisdb) and shrink version. It's get rid of all C or other language codes and only keep Go's. It aims to provide a nosql database library rather than a redis like server. So if you want a redis like server, ledisdb is the best choose.
@ -17,15 +17,15 @@ Nodb now use [goleveldb](https://github.com/syndtr/goleveldb) as backend to stor
## Install ## Install
go get github.com/lunny/nodb go get gitea.com/lunny/nodb
## Package Example ## Package Example
### Open And Select database ### Open And Select database
```go ```go
import( import(
"github.com/lunny/nodb" "gitea.com/lunny/nodb"
"github.com/lunny/nodb/config" "gitea.com/lunny/nodb/config"
) )
cfg := new(config.Config) cfg := new(config.Config)
@ -75,8 +75,7 @@ ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1)
## Links ## Links
+ [Ledisdb Official Website](http://ledisdb.com) + [Ledisdb Official Website](http://ledisdb.com)
+ [GoDoc](https://godoc.org/github.com/lunny/nodb) + [GoDoc](https://godoc.org/gitea.com/lunny/nodb)
+ [GoWalker](https://gowalker.org/github.com/lunny/nodb)
## Thanks ## Thanks

View file

@ -1,6 +1,6 @@
# NoDB # NoDB
[English](https://github.com/lunny/nodb/blob/master/README.md) [English](https://gitea.com/lunny/nodb/blob/master/README.md)
Nodb 是 [ledisdb](https://github.com/siddontang/ledisdb) 的克隆和缩减版本。该版本去掉了所有C和其它语言的依赖只保留Go语言的。目标是提供一个Nosql数据库的开发库而不是提供一个像Redis那样的服务器。因此如果你想要的是一个独立服务器你可以直接选择ledisdb。 Nodb 是 [ledisdb](https://github.com/siddontang/ledisdb) 的克隆和缩减版本。该版本去掉了所有C和其它语言的依赖只保留Go语言的。目标是提供一个Nosql数据库的开发库而不是提供一个像Redis那样的服务器。因此如果你想要的是一个独立服务器你可以直接选择ledisdb。
@ -17,15 +17,15 @@ Nodb 当前底层使用 (goleveldb)[https://github.com/syndtr/goleveldb] 来存
## 安装 ## 安装
go get github.com/lunny/nodb go get gitea.com/lunny/nodb
## 例子 ## 例子
### 打开和选择数据库 ### 打开和选择数据库
```go ```go
import( import(
"github.com/lunny/nodb" "gitea.com/lunny/nodb"
"github.com/lunny/nodb/config" "gitea.com/lunny/nodb/config"
) )
cfg := new(config.Config) cfg := new(config.Config)
@ -72,8 +72,7 @@ ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1)
## 链接 ## 链接
+ [Ledisdb Official Website](http://ledisdb.com) + [Ledisdb Official Website](http://ledisdb.com)
+ [GoDoc](https://godoc.org/github.com/lunny/nodb) + [GoDoc](https://godoc.org/gitea.com/lunny/nodb)
+ [GoWalker](https://gowalker.org/github.com/lunny/nodb)
## 感谢 ## 感谢

View file

@ -3,7 +3,7 @@ package nodb
import ( import (
"sync" "sync"
"github.com/lunny/nodb/store" "gitea.com/lunny/nodb/store"
) )
type batch struct { type batch struct {

View file

@ -13,8 +13,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/lunny/log" "gitea.com/lunny/log"
"github.com/lunny/nodb/config" "gitea.com/lunny/nodb/config"
) )
type BinLogHead struct { type BinLogHead struct {
@ -139,12 +139,12 @@ func (l *BinLog) flushIndex() error {
bakName := fmt.Sprintf("%s.bak", l.indexName) bakName := fmt.Sprintf("%s.bak", l.indexName)
f, err := os.OpenFile(bakName, os.O_WRONLY|os.O_CREATE, 0666) f, err := os.OpenFile(bakName, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil { if err != nil {
log.Error("create binlog bak index error %s", err.Error()) log.Errorf("create binlog bak index error %s", err.Error())
return err return err
} }
if _, err := f.WriteString(data); err != nil { if _, err := f.WriteString(data); err != nil {
log.Error("write binlog index error %s", err.Error()) log.Errorf("write binlog index error %s", err.Error())
f.Close() f.Close()
return err return err
} }
@ -152,7 +152,7 @@ func (l *BinLog) flushIndex() error {
f.Close() f.Close()
if err := os.Rename(bakName, l.indexName); err != nil { if err := os.Rename(bakName, l.indexName); err != nil {
log.Error("rename binlog bak index error %s", err.Error()) log.Errorf("rename binlog bak index error %s", err.Error())
return err return err
} }
@ -177,7 +177,7 @@ func (l *BinLog) loadIndex() error {
} }
if _, err := os.Stat(path.Join(l.path, line)); err != nil { if _, err := os.Stat(path.Join(l.path, line)); err != nil {
log.Error("load index line %s error %s", line, err.Error()) log.Errorf("load index line %s error %s", line, err.Error())
return err return err
} else { } else {
l.logNames = append(l.logNames, line) l.logNames = append(l.logNames, line)
@ -198,7 +198,7 @@ func (l *BinLog) loadIndex() error {
lastName := l.logNames[len(l.logNames)-1] lastName := l.logNames[len(l.logNames)-1]
if l.lastLogIndex, err = strconv.ParseInt(path.Ext(lastName)[1:], 10, 64); err != nil { if l.lastLogIndex, err = strconv.ParseInt(path.Ext(lastName)[1:], 10, 64); err != nil {
log.Error("invalid logfile name %s", err.Error()) log.Errorf("invalid logfile name %s", err.Error())
return err return err
} }
@ -219,7 +219,7 @@ func (l *BinLog) openNewLogFile() error {
logPath := path.Join(l.path, lastName) logPath := path.Join(l.path, lastName)
if l.logFile, err = os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0666); err != nil { if l.logFile, err = os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0666); err != nil {
log.Error("open new logfile error %s", err.Error()) log.Errorf("open new logfile error %s", err.Error())
return err return err
} }
@ -374,7 +374,7 @@ func (l *BinLog) Log(args ...[]byte) error {
} }
if err = l.logWb.Flush(); err != nil { if err = l.logWb.Flush(); err != nil {
log.Error("write log error %s", err.Error()) log.Errorf("write log error %s", err.Error())
return err return err
} }

View file

@ -3,7 +3,7 @@ package config
import ( import (
"io/ioutil" "io/ioutil"
"github.com/BurntSushi/toml" "github.com/pelletier/go-toml"
) )
type Size int type Size int
@ -71,7 +71,7 @@ func NewConfigWithFile(fileName string) (*Config, error) {
func NewConfigWithData(data []byte) (*Config, error) { func NewConfigWithData(data []byte) (*Config, error) {
cfg := NewConfigDefault() cfg := NewConfigDefault()
_, err := toml.Decode(string(data), cfg) err := toml.Unmarshal(data, cfg)
if err != nil { if err != nil {
return nil, err return nil, err
} }

11
vendor/gitea.com/lunny/nodb/go.mod generated vendored Normal file
View file

@ -0,0 +1,11 @@
module gitea.com/lunny/nodb
go 1.12
require (
gitea.com/lunny/log v0.0.0-20190322053110-01b5df579c4e
github.com/mattn/go-sqlite3 v1.11.0 // indirect
github.com/pelletier/go-toml v1.8.1
github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d
github.com/syndtr/goleveldb v1.0.0
)

42
vendor/gitea.com/lunny/nodb/go.sum generated vendored Normal file
View file

@ -0,0 +1,42 @@
gitea.com/lunny/log v0.0.0-20190322053110-01b5df579c4e h1:r1en/D7xJmcY24VkHkjkcJFa+7ZWubVWPBrvsHkmHxk=
gitea.com/lunny/log v0.0.0-20190322053110-01b5df579c4e/go.mod h1:uJEsN4LQpeGYRCjuPXPZBClU7N5pWzGuyF4uqLpE/e0=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d h1:qQWKKOvHN7Q9c6GdmUteCef2F9ubxMpxY1IKwpIKz68=
github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d/go.mod h1:vq0tzqLRu6TS7Id0wMo2N5QzJoKedVeovOpHjnykSzY=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View file

@ -5,9 +5,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/lunny/log" "gitea.com/lunny/nodb/config"
"github.com/lunny/nodb/config" "gitea.com/lunny/nodb/store"
"github.com/lunny/nodb/store" "gitea.com/lunny/log"
) )
type Nodb struct { type Nodb struct {
@ -83,7 +83,7 @@ func (l *Nodb) Select(index int) (*DB, error) {
func (l *Nodb) FlushAll() error { func (l *Nodb) FlushAll() error {
for index, db := range l.dbs { for index, db := range l.dbs {
if _, err := db.FlushAll(); err != nil { if _, err := db.FlushAll(); err != nil {
log.Error("flush db %d error %s", index, err.Error()) log.Errorf("flush db %d error %s", index, err.Error())
} }
} }

View file

@ -4,7 +4,7 @@ import (
"fmt" "fmt"
"sync" "sync"
"github.com/lunny/nodb/store" "gitea.com/lunny/nodb/store"
) )
type ibucket interface { type ibucket interface {

View file

@ -8,8 +8,8 @@ import (
"os" "os"
"time" "time"
"github.com/lunny/log" "gitea.com/lunny/log"
"github.com/lunny/nodb/store/driver" "gitea.com/lunny/nodb/store/driver"
) )
const ( const (
@ -143,7 +143,7 @@ func (l *Nodb) ReplicateFromReader(rb io.Reader) error {
b.lastHead = head b.lastHead = head
} else if !b.lastHead.InSameBatch(head) { } else if !b.lastHead.InSameBatch(head) {
if err := b.Commit(); err != nil { if err := b.Commit(); err != nil {
log.Fatal("replication error %s, skip to next", err.Error()) log.Fatalf("replication error %s, skip to next", err.Error())
return ErrSkipEvent return ErrSkipEvent
} }
b.lastHead = head b.lastHead = head
@ -151,7 +151,7 @@ func (l *Nodb) ReplicateFromReader(rb io.Reader) error {
err := l.replicateEvent(b, event) err := l.replicateEvent(b, event)
if err != nil { if err != nil {
log.Fatal("replication error %s, skip to next", err.Error()) log.Fatalf("replication error %s, skip to next", err.Error())
return ErrSkipEvent return ErrSkipEvent
} }
return nil return nil

View file

@ -5,7 +5,7 @@ import (
"errors" "errors"
"regexp" "regexp"
"github.com/lunny/nodb/store" "gitea.com/lunny/nodb/store"
) )
var errDataType = errors.New("error data type") var errDataType = errors.New("error data type")

View file

@ -1,7 +1,7 @@
package store package store
import ( import (
"github.com/lunny/nodb/store/driver" "gitea.com/lunny/nodb/store/driver"
) )
type DB struct { type DB struct {

View file

@ -3,7 +3,7 @@ package driver
import ( import (
"fmt" "fmt"
"github.com/lunny/nodb/config" "gitea.com/lunny/nodb/config"
) )
type Store interface { type Store interface {

View file

@ -7,8 +7,8 @@ import (
"github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/storage"
"github.com/lunny/nodb/config" "gitea.com/lunny/nodb/config"
"github.com/lunny/nodb/store/driver" "gitea.com/lunny/nodb/store/driver"
"os" "os"
) )

View file

@ -1,7 +1,7 @@
package goleveldb package goleveldb
import ( import (
"github.com/lunny/nodb/store/driver" "gitea.com/lunny/nodb/store/driver"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
) )

View file

@ -3,7 +3,7 @@ package store
import ( import (
"bytes" "bytes"
"github.com/lunny/nodb/store/driver" "gitea.com/lunny/nodb/store/driver"
) )
const ( const (

View file

@ -1,7 +1,7 @@
package store package store
import ( import (
"github.com/lunny/nodb/store/driver" "gitea.com/lunny/nodb/store/driver"
) )
type Snapshot struct { type Snapshot struct {

View file

@ -4,10 +4,10 @@ import (
"fmt" "fmt"
"os" "os"
"path" "path"
"github.com/lunny/nodb/config" "gitea.com/lunny/nodb/config"
"github.com/lunny/nodb/store/driver" "gitea.com/lunny/nodb/store/driver"
_ "github.com/lunny/nodb/store/goleveldb" _ "gitea.com/lunny/nodb/store/goleveldb"
) )
func getStorePath(cfg *config.Config) string { func getStorePath(cfg *config.Config) string {

View file

@ -1,7 +1,7 @@
package store package store
import ( import (
"github.com/lunny/nodb/store/driver" "gitea.com/lunny/nodb/store/driver"
) )
type Tx struct { type Tx struct {

View file

@ -1,7 +1,7 @@
package store package store
import ( import (
"github.com/lunny/nodb/store/driver" "gitea.com/lunny/nodb/store/driver"
) )
type WriteBatch interface { type WriteBatch interface {

View file

@ -6,7 +6,7 @@ import (
"sort" "sort"
"time" "time"
"github.com/lunny/nodb/store" "gitea.com/lunny/nodb/store"
) )
const ( const (

View file

@ -5,7 +5,7 @@ import (
"errors" "errors"
"time" "time"
"github.com/lunny/nodb/store" "gitea.com/lunny/nodb/store"
) )
type FVPair struct { type FVPair struct {

View file

@ -5,7 +5,7 @@ import (
"errors" "errors"
"time" "time"
"github.com/lunny/nodb/store" "gitea.com/lunny/nodb/store"
) )
const ( const (

View file

@ -5,7 +5,7 @@ import (
"errors" "errors"
"time" "time"
"github.com/lunny/nodb/store" "gitea.com/lunny/nodb/store"
) )
var errSetKey = errors.New("invalid set key") var errSetKey = errors.New("invalid set key")

View file

@ -5,7 +5,7 @@ import (
"errors" "errors"
"time" "time"
"github.com/lunny/nodb/store" "gitea.com/lunny/nodb/store"
) )
var ( var (

View file

@ -6,7 +6,7 @@ import (
"errors" "errors"
"time" "time"
"github.com/lunny/nodb/store" "gitea.com/lunny/nodb/store"
) )
const ( const (

View file

@ -4,7 +4,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/lunny/nodb/store" "gitea.com/lunny/nodb/store"
) )
var ( var (

View file

@ -4,18 +4,20 @@ go 1.11
require ( require (
gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 // indirect github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/go-redis/redis v6.15.2+incompatible github.com/go-redis/redis v6.15.2+incompatible
github.com/go-sql-driver/mysql v1.4.1 github.com/go-sql-driver/mysql v1.4.1
github.com/golang/snappy v0.0.2 // indirect
github.com/lib/pq v1.2.0 github.com/lib/pq v1.2.0
github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de // indirect github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de // indirect
github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af
github.com/mattn/go-sqlite3 v1.11.0 // indirect github.com/mattn/go-sqlite3 v1.11.0 // indirect
github.com/onsi/ginkgo v1.8.0 // indirect github.com/onsi/ginkgo v1.8.0 // indirect
github.com/onsi/gomega v1.5.0 // indirect github.com/onsi/gomega v1.5.0 // indirect
github.com/pelletier/go-toml v1.4.0 // indirect github.com/pelletier/go-toml v1.8.1 // indirect
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d // indirect github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d // indirect
github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92 github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92
@ -27,4 +29,5 @@ require (
golang.org/x/sys v0.0.0-20190730183949-1393eb018365 // indirect golang.org/x/sys v0.0.0-20190730183949-1393eb018365 // indirect
google.golang.org/appengine v1.6.1 // indirect google.golang.org/appengine v1.6.1 // indirect
gopkg.in/ini.v1 v1.44.2 gopkg.in/ini.v1 v1.44.2
gopkg.in/yaml.v2 v2.2.2 // indirect
) )

View file

@ -23,6 +23,8 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw=
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -46,8 +48,8 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg= github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM= github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw= github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d h1:qQWKKOvHN7Q9c6GdmUteCef2F9ubxMpxY1IKwpIKz68= github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d h1:qQWKKOvHN7Q9c6GdmUteCef2F9ubxMpxY1IKwpIKz68=

View file

@ -240,10 +240,10 @@ func Captchaer(options ...Options) macaron.Handler {
} }
} }
ctx.Status(200)
if _, err := NewImage([]byte(chars), cpt.StdWidth, cpt.StdHeight, cpt.ColorPalette).WriteTo(ctx.Resp); err != nil { if _, err := NewImage([]byte(chars), cpt.StdWidth, cpt.StdHeight, cpt.ColorPalette).WriteTo(ctx.Resp); err != nil {
panic(fmt.Errorf("write captcha: %v", err)) panic(fmt.Errorf("write captcha: %v", err))
} }
ctx.Status(200)
return return
} }

View file

@ -1,9 +1,9 @@
kind: pipeline kind: pipeline
name: golang-1-1 name: golang-1-14
steps: steps:
- name: test - name: test
image: golang:1.11 image: golang:1.14
environment: environment:
GOPROXY: https://goproxy.cn GOPROXY: https://goproxy.cn
commands: commands:
@ -12,11 +12,11 @@ steps:
--- ---
kind: pipeline kind: pipeline
name: golang-1-2 name: golang-1-15
steps: steps:
- name: test - name: test
image: golang:1.12 image: golang:1.15
environment: environment:
GOPROXY: https://goproxy.cn GOPROXY: https://goproxy.cn
commands: commands:

View file

@ -1,4 +1,5 @@
// Copyright 2014 The Macaron Authors // Copyright 2014 The Macaron Authors
// Copyright 2020 The Gitea Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"): you may // Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain // not use this file except in compliance with the License. You may obtain
@ -68,6 +69,7 @@ type Request struct {
*http.Request *http.Request
} }
// Body returns a RequestBody for the request
func (r *Request) Body() *RequestBody { func (r *Request) Body() *RequestBody {
return &RequestBody{r.Request.Body} return &RequestBody{r.Request.Body}
} }
@ -75,6 +77,7 @@ func (r *Request) Body() *RequestBody {
// ContextInvoker is an inject.FastInvoker wrapper of func(ctx *Context). // ContextInvoker is an inject.FastInvoker wrapper of func(ctx *Context).
type ContextInvoker func(ctx *Context) type ContextInvoker func(ctx *Context)
// Invoke implements inject.FastInvoker - in the context of a func(ctx *Context) this simply calls the function
func (invoke ContextInvoker) Invoke(params []interface{}) ([]reflect.Value, error) { func (invoke ContextInvoker) Invoke(params []interface{}) ([]reflect.Value, error) {
invoke(params[0].(*Context)) invoke(params[0].(*Context))
return nil, nil return nil, nil
@ -97,41 +100,43 @@ type Context struct {
Data map[string]interface{} Data map[string]interface{}
} }
func (c *Context) handler() Handler { func (ctx *Context) handler() Handler {
if c.index < len(c.handlers) { if ctx.index < len(ctx.handlers) {
return c.handlers[c.index] return ctx.handlers[ctx.index]
} }
if c.index == len(c.handlers) { if ctx.index == len(ctx.handlers) {
return c.action return ctx.action
} }
panic("invalid index for context handler") panic("invalid index for context handler")
} }
func (c *Context) Next() { // Next runs the next handler in the context chain
c.index += 1 func (ctx *Context) Next() {
c.run() ctx.index++
ctx.run()
} }
func (c *Context) Written() bool { // Written returns whether the context response has been written to
return c.Resp.Written() func (ctx *Context) Written() bool {
return ctx.Resp.Written()
} }
func (c *Context) run() { func (ctx *Context) run() {
for c.index <= len(c.handlers) { for ctx.index <= len(ctx.handlers) {
vals, err := c.Invoke(c.handler()) vals, err := ctx.Invoke(ctx.handler())
if err != nil { if err != nil {
panic(err) panic(err)
} }
c.index += 1 ctx.index++
// if the handler returned something, write it to the http response // if the handler returned something, write it to the http response
if len(vals) > 0 { if len(vals) > 0 {
ev := c.GetVal(reflect.TypeOf(ReturnHandler(nil))) ev := ctx.GetVal(reflect.TypeOf(ReturnHandler(nil)))
handleReturn := ev.Interface().(ReturnHandler) handleReturn := ev.Interface().(ReturnHandler)
handleReturn(c, vals) handleReturn(ctx, vals)
} }
if c.Written() { if ctx.Written() {
return return
} }
} }
@ -172,6 +177,7 @@ func (ctx *Context) HTMLSet(status int, setName, tplName string, data ...interfa
ctx.renderHTML(status, setName, tplName, data...) ctx.renderHTML(status, setName, tplName, data...)
} }
// Redirect sends a redirect response
func (ctx *Context) Redirect(location string, status ...int) { func (ctx *Context) Redirect(location string, status ...int) {
code := http.StatusFound code := http.StatusFound
if len(status) == 1 { if len(status) == 1 {
@ -181,7 +187,7 @@ func (ctx *Context) Redirect(location string, status ...int) {
http.Redirect(ctx.Resp, ctx.Req.Request, location, code) http.Redirect(ctx.Resp, ctx.Req.Request, location, code)
} }
// Maximum amount of memory to use when parsing a multipart form. // MaxMemory is the maximum amount of memory to use when parsing a multipart form.
// Set this to whatever value you prefer; default is 10 MB. // Set this to whatever value you prefer; default is 10 MB.
var MaxMemory = int64(1024 * 1024 * 10) var MaxMemory = int64(1024 * 1024 * 10)
@ -341,6 +347,8 @@ func (ctx *Context) SetCookie(name string, value string, others ...interface{})
cookie.MaxAge = int(v) cookie.MaxAge = int(v)
case int32: case int32:
cookie.MaxAge = int(v) cookie.MaxAge = int(v)
case func(*http.Cookie):
v(&cookie)
} }
} }
@ -348,12 +356,16 @@ func (ctx *Context) SetCookie(name string, value string, others ...interface{})
if len(others) > 1 { if len(others) > 1 {
if v, ok := others[1].(string); ok && len(v) > 0 { if v, ok := others[1].(string); ok && len(v) > 0 {
cookie.Path = v cookie.Path = v
} else if v, ok := others[1].(func(*http.Cookie)); ok {
v(&cookie)
} }
} }
if len(others) > 2 { if len(others) > 2 {
if v, ok := others[2].(string); ok && len(v) > 0 { if v, ok := others[2].(string); ok && len(v) > 0 {
cookie.Domain = v cookie.Domain = v
} else if v, ok := others[1].(func(*http.Cookie)); ok {
v(&cookie)
} }
} }
@ -361,6 +373,8 @@ func (ctx *Context) SetCookie(name string, value string, others ...interface{})
switch v := others[3].(type) { switch v := others[3].(type) {
case bool: case bool:
cookie.Secure = v cookie.Secure = v
case func(*http.Cookie):
v(&cookie)
default: default:
if others[3] != nil { if others[3] != nil {
cookie.Secure = true cookie.Secure = true
@ -371,6 +385,8 @@ func (ctx *Context) SetCookie(name string, value string, others ...interface{})
if len(others) > 4 { if len(others) > 4 {
if v, ok := others[4].(bool); ok && v { if v, ok := others[4].(bool); ok && v {
cookie.HttpOnly = true cookie.HttpOnly = true
} else if v, ok := others[1].(func(*http.Cookie)); ok {
v(&cookie)
} }
} }
@ -378,6 +394,16 @@ func (ctx *Context) SetCookie(name string, value string, others ...interface{})
if v, ok := others[5].(time.Time); ok { if v, ok := others[5].(time.Time); ok {
cookie.Expires = v cookie.Expires = v
cookie.RawExpires = v.Format(time.UnixDate) cookie.RawExpires = v.Format(time.UnixDate)
} else if v, ok := others[1].(func(*http.Cookie)); ok {
v(&cookie)
}
}
if len(others) > 6 {
for _, other := range others[6:] {
if v, ok := other.(func(*http.Cookie)); ok {
v(&cookie)
}
} }
} }

View file

@ -20,7 +20,7 @@ import (
"sync" "sync"
"gitea.com/macaron/session" "gitea.com/macaron/session"
"github.com/couchbaselabs/go-couchbase" "github.com/couchbase/go-couchbase"
) )
// CouchbaseSessionStore represents a couchbase session store implementation. // CouchbaseSessionStore represents a couchbase session store implementation.

View file

@ -3,30 +3,28 @@ module gitea.com/macaron/session
go 1.11 go 1.11
require ( require (
gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb gitea.com/lunny/nodb v0.0.0-20200923032308-3238c4655727
gitea.com/macaron/macaron v1.5.0
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668
github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d // indirect github.com/couchbase/go-couchbase v0.0.0-20201026062457-7b3be89bbd89
github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85 // indirect github.com/couchbase/gomemcached v0.1.0 // indirect
github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7 github.com/couchbase/goutils v0.0.0-20201030094643-5e82bb967e67 // indirect
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 // indirect github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/go-redis/redis v6.15.2+incompatible github.com/go-redis/redis v6.15.2+incompatible
github.com/go-sql-driver/mysql v1.4.1 github.com/go-sql-driver/mysql v1.4.1
github.com/golang/snappy v0.0.2 // indirect
github.com/lib/pq v1.2.0 github.com/lib/pq v1.2.0
github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de // indirect
github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af
github.com/mattn/go-sqlite3 v1.11.0 // indirect
github.com/pelletier/go-toml v1.4.0 // indirect
github.com/pkg/errors v0.8.1 // indirect github.com/pkg/errors v0.8.1 // indirect
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d // indirect
github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92 github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d // indirect github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d // indirect
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337
github.com/stretchr/testify v1.3.0 // indirect github.com/stretchr/testify v1.3.0 // indirect
github.com/syndtr/goleveldb v1.0.0 // indirect github.com/unknwon/com v1.0.1
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 // indirect
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 // indirect golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 // indirect
google.golang.org/appengine v1.6.1 // indirect google.golang.org/appengine v1.6.1 // indirect
gopkg.in/ini.v1 v1.44.0 gopkg.in/ini.v1 v1.62.0
gopkg.in/yaml.v2 v2.2.2 // indirect
) )

View file

@ -1,17 +1,17 @@
gitea.com/macaron/inject v0.0.0-20190803172902-8375ba841591 h1:UbCTjPcLrNxR9LzKDjQBMT2zoxZuEnca1pZCpgeMuhQ= gitea.com/lunny/log v0.0.0-20190322053110-01b5df579c4e h1:r1en/D7xJmcY24VkHkjkcJFa+7ZWubVWPBrvsHkmHxk=
gitea.com/macaron/inject v0.0.0-20190803172902-8375ba841591/go.mod h1:h6E4kLao1Yko6DOU6QDnQPcuoNzvbZqzj2mtPcEn1aM= gitea.com/lunny/log v0.0.0-20190322053110-01b5df579c4e/go.mod h1:uJEsN4LQpeGYRCjuPXPZBClU7N5pWzGuyF4uqLpE/e0=
gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb h1:amL0md6orTj1tXY16ANzVU9FmzQB+W7aJwp8pVDbrmA= gitea.com/lunny/nodb v0.0.0-20200923032308-3238c4655727 h1:ZF2Bd6rqVlwhIDhYiS0uGYcT+GaVNGjuKVJkTNqWMIs=
gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb/go.mod h1:0coI+mSPSwbsyAbOuFllVS38awuk9mevhLD52l50Gjs= gitea.com/lunny/nodb v0.0.0-20200923032308-3238c4655727/go.mod h1:h0OwsgcpJLSYtHcM5+Xciw9OEeuxi6ty4HDiO8C7aIY=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= gitea.com/macaron/inject v0.0.0-20190805023432-d4c86e31027a/go.mod h1:h6E4kLao1Yko6DOU6QDnQPcuoNzvbZqzj2mtPcEn1aM=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= gitea.com/macaron/macaron v1.5.0/go.mod h1:P7hfDbQjcW22lkYkXlxdRIfWOXxH2+K4EogN4Q0UlLY=
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA= github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA=
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d h1:XMf4E1U+b9E3ElF0mjvfXZdflBRZz4gLp16nQ/QSHQM= github.com/couchbase/go-couchbase v0.0.0-20201026062457-7b3be89bbd89 h1:uNLXQ6QO1TocD8BaN/KkRki0Xw0brCM1PKl/ZA5pgfs=
github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/go-couchbase v0.0.0-20201026062457-7b3be89bbd89/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A=
github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85 h1:0WMIDtuXCKEm4wtAJgAAXa/qtM5O9MariLwgHaRlYmk= github.com/couchbase/gomemcached v0.1.0 h1:whUde87n8CScx8ckMp2En5liqAlcuG3aKy/BQeBPu84=
github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= github.com/couchbase/gomemcached v0.1.0/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7 h1:1XjEY/gnjQ+AfXef2U6dxCquhiRzkEpxZuWqs+QxTL8= github.com/couchbase/goutils v0.0.0-20201030094643-5e82bb967e67 h1:NCqJ6fwen6YP0WlV/IyibaT0kPt3JEI1rA62V/UPKT4=
github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7/go.mod h1:mby/05p8HE5yHEAKiIH/555NoblMs7PtW6NrYshDruc= github.com/couchbase/goutils v0.0.0-20201030094643-5e82bb967e67/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 h1:Lgdd/Qp96Qj8jqLpq2cI1I1X7BJnu06efS+XkhRoLUQ= github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 h1:Lgdd/Qp96Qj8jqLpq2cI1I1X7BJnu06efS+XkhRoLUQ=
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY= github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -30,6 +30,8 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw=
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -40,10 +42,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de h1:nyxwRdWHAVxpFcDThedEgQ07DbcRc5xgNObtbTp76fk= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de/go.mod h1:3q8WtuPQsoRbatJuy3nvq/hRSvuBJrHHr+ybPPiNvHQ=
github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af h1:UaWHNBdukWrSG3DRvHFR/hyfg681fceqQDYVTBncKfQ=
github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af/go.mod h1:Cqz6pqow14VObJ7peltM+2n3PWOz7yTrfUuGbVFkzN0=
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q= github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@ -51,8 +50,8 @@ github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg= github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -68,6 +67,7 @@ github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY= github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
@ -76,12 +76,13 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e h1:GSGeB9EAKY2spCABz6xOX5DbxZEXolK+nBSvmsQwRjM= github.com/unknwon/com v1.0.1/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM=
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -109,6 +110,7 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.44.0 h1:YRJzTUp0kSYWUVFF5XAbDFfyiqwsl0Vb9R8TVP5eRi0= gopkg.in/ini.v1 v1.44.0 h1:YRJzTUp0kSYWUVFF5XAbDFfyiqwsl0Vb9R8TVP5eRi0=
gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View file

@ -19,8 +19,8 @@ import (
"sync" "sync"
"gitea.com/macaron/session" "gitea.com/macaron/session"
"github.com/lunny/nodb" "gitea.com/lunny/nodb"
"github.com/lunny/nodb/config" "gitea.com/lunny/nodb/config"
) )
// NodbStore represents a nodb session store implementation. // NodbStore represents a nodb session store implementation.

View file

@ -1,5 +0,0 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test

View file

@ -1,15 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

View file

@ -1,3 +0,0 @@
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 TOML authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -1,19 +0,0 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

View file

@ -1,218 +0,0 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/toml-lang/toml
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godoc.org/github.com/BurntSushi/toml
Installation:
```bash
go get github.com/BurntSushi/toml
```
Try the toml validator:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.

View file

@ -1,509 +0,0 @@
package toml
import (
"fmt"
"io"
"io/ioutil"
"math"
"reflect"
"strings"
"time"
)
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error {
_, err := Decode(string(p), v)
return err
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
//
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
//
// The underlying representation of a `Primitive` value is subject to change.
// Do not rely on it.
//
// N.B. Primitive values are still parsed, so using them will only avoid
// the overhead of reflection. They can be useful when you don't know the
// exact type of TOML data until run time.
type Primitive struct {
undecoded interface{}
context Key
}
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
//
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, indirect(rv))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
copy(context, md.context)
rv.Set(reflect.ValueOf(Primitive{
undecoded: data,
context: context,
}))
return nil
}
// Special case. Unmarshaler Interface support.
if rv.CanAddr() {
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
}
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok {
return md.unifyText(data, v)
}
// BUG(burntsushi)
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML
// hash or array. In particular, the unmarshaler should only be applied
// to primitive TOML values. But at this point, it will be applied to
// all kinds of values and produce an incorrect error whenever those values
// are hashes or arrays (including arrays of tables).
k := rv.Kind()
// laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
switch k {
case reflect.Ptr:
elem := reflect.New(rv.Type().Elem())
err := md.unify(data, reflect.Indirect(elem))
if err != nil {
return err
}
rv.Set(elem)
return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
return md.unifyMap(data, rv)
case reflect.Array:
return md.unifyArray(data, rv)
case reflect.Slice:
return md.unifySlice(data, rv)
case reflect.String:
return md.unifyString(data, rv)
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
return e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("unsupported type %s", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if mapping == nil {
return nil
}
return e("type mismatch for %s: expected table but found %T",
rv.Type().String(), mapping)
}
for key, datum := range tmap {
var f *field
fields := cachedTypeFields(rv.Type())
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv := rv
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("cannot write unexported field %s.%s",
rv.Type().String(), f.name)
}
}
}
return nil
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
return nil
}
return badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval)
}
return nil
}
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
sliceLen := datav.Len()
if sliceLen != rv.Len() {
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
}
rv.SetLen(n)
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len()
for i := 0; i < sliceLen; i++ {
v := data.Index(i).Interface()
sliceval := indirect(rv.Index(i))
if err := md.unify(v, sliceval); err != nil {
return err
}
}
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok {
switch rv.Kind() {
case reflect.Float32:
fallthrough
case reflect.Float64:
rv.SetFloat(num)
default:
panic("bug")
}
return nil
}
return badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
switch rv.Kind() {
case reflect.Int, reflect.Int64:
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
return e("value %d is out of range for int8", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
return e("value %d is out of range for int16", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
return e("value %d is out of range for int32", num)
}
}
rv.SetInt(num)
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
unum := uint64(num)
switch rv.Kind() {
case reflect.Uint, reflect.Uint64:
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
return e("value %d is out of range for uint8", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
return e("value %d is out of range for uint16", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
return e("value %d is out of range for uint32", num)
}
}
rv.SetUint(unum)
} else {
panic("unreachable")
}
return nil
}
return badtype("integer", data)
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
}
s = string(text)
case fmt.Stringer:
s = sdata.String()
case string:
s = sdata
case bool:
s = fmt.Sprintf("%v", sdata)
case int64:
s = fmt.Sprintf("%d", sdata)
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
}
return nil
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv
}
}
return v
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return indirect(reflect.Indirect(v))
}
func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
if _, ok := rv.Interface().(TextUnmarshaler); ok {
return true
}
return false
}
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}

View file

@ -1,121 +0,0 @@
package toml
import "strings"
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
return false
}
}
return true
}
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
return md.keys
}
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
// and so no keys will be considered decoded.
//
// In this sense, the Undecoded keys correspond to keys in the TOML document
// that do not have a concrete type in your representation.
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
undecoded = append(undecoded, key)
}
}
return undecoded
}

View file

@ -1,27 +0,0 @@
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/
package toml

View file

@ -1,568 +0,0 @@
package toml
import (
"bufio"
"errors"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
"toml: cannot encode array with mixed element types")
errArrayNilElement = errors.New(
"toml: cannot encode array with nil element")
errNonString = errors.New(
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
)
// Encoder controls the encoding of Go values to a TOML document to some
// io.Writer.
//
// The indentation level can be controlled with the Indent field.
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
}
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
Indent: " ",
}
}
// Encode writes a TOML representation of the Go value to the underlying
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
//
// The mapping between Go values and TOML values should be precisely the same
// as for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
return err
}
return enc.w.Flush()
}
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
defer func() {
if r := recover(); r != nil {
if terr, ok := r.(tomlEncodeError); ok {
err = terr.error
return
}
panic(r)
}
}()
enc.encode(key, rv)
return nil
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) {
case time.Time, TextMarshaler:
enc.keyEqElement(key, rv)
return
}
k := rv.Kind()
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
enc.keyEqElement(key, rv)
}
case reflect.Interface:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Map:
if rv.IsNil() {
return
}
enc.eTable(key, rv)
case reflect.Ptr:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Struct:
enc.eTable(key, rv)
default:
panic(e("unsupported type for key '%s': %s", key, k))
}
}
// eElement encodes any value that can be an array element (primitives and
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
case time.Time:
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
}
return
}
switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String:
enc.writeQuoted(rv.String())
default:
panic(e("unexpected primitive type: %s", rv.Kind()))
}
}
// By the TOML spec, all floats must have a decimal with at least one
// number on either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
}
return fstr
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
elem := rv.Index(i)
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
}
}
enc.wf("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
trv := rv.Index(i)
if isNil(trv) {
continue
}
panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
enc.eMapOrStruct(key, trv)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
enc.eMapOrStruct(key, rv)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
enc.eMap(key, rv)
case reflect.Struct:
enc.eStruct(key, rv)
default:
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
}
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
var writeMapKeys = func(mapKeys []string) {
sort.Strings(mapKeys)
for _, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) {
// Don't write anything for nil fields.
continue
}
enc.encode(key.add(mapKey), mrv)
}
}
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that
// table (not the one we're writing here).
rt := rv.Type()
var fieldsDirect, fieldsSub [][]int
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexported fields
if f.PkgPath != "" && !f.Anonymous {
continue
}
frv := rv.Field(i)
if f.Anonymous {
t := f.Type
switch t.Kind() {
case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index)
continue
}
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct &&
getOptions(f.Tag).name == "" {
if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index)
}
continue
}
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
}
}
if typeIsHash(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
var writeFields = func(fields [][]int) {
for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields.
continue
}
opts := getOptions(sft.Tag)
if opts.skip {
continue
}
keyName := sft.Name
if opts.name != "" {
keyName = opts.name
}
if opts.omitempty && isEmpty(sf) {
continue
}
if opts.omitzero && isZero(sf) {
continue
}
enc.encode(key.add(keyName), sf)
}
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
switch rv.Kind() {
case reflect.Bool:
return tomlBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64:
return tomlInteger
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash
}
return tomlArray
case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem())
case reflect.String:
return tomlString
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
case TextMarshaler:
return tomlString
default:
return tomlHash
}
default:
panic("unexpected reflect.Kind: " + rv.Kind().String())
}
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
// nested arrays of tables).
func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil {
encPanic(errArrayNilElement)
}
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType
}
type tagOptions struct {
skip bool // "-"
name string
omitempty bool
omitzero bool
}
func getOptions(tag reflect.StructTag) tagOptions {
t := tag.Get("toml")
if t == "-" {
return tagOptions{skip: true}
}
var opts tagOptions
parts := strings.Split(t, ",")
opts.name = parts[0]
for _, s := range parts[1:] {
switch s {
case "omitempty":
opts.omitempty = true
case "omitzero":
opts.omitzero = true
}
}
return opts
}
func isZero(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return rv.Uint() == 0
case reflect.Float32, reflect.Float64:
return rv.Float() == 0.0
}
return false
}
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Bool:
return !rv.Bool()
}
return false
}
func (enc *Encoder) newline() {
if enc.hasWritten {
enc.wf("\n")
}
}
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
enc.newline()
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
encPanic(err)
}
enc.hasWritten = true
}
func (enc *Encoder) indentStr(key Key) string {
return strings.Repeat(enc.Indent, len(key)-1)
}
func encPanic(err error) {
panic(tomlEncodeError{err})
}
func eindirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
return eindirect(v.Elem())
default:
return v
}
}
func isNil(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return rv.IsNil()
default:
return false
}
}
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

View file

@ -1,19 +0,0 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View file

@ -1,18 +0,0 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

View file

@ -1,953 +0,0 @@
package toml
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
type itemType int
const (
itemError itemType = iota
itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
itemRawString
itemMultilineString
itemRawMultilineString
itemBool
itemInteger
itemFloat
itemDatetime
itemArray // the start of an array
itemArrayEnd
itemTableStart
itemTableEnd
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
)
const (
eof = 0
comma = ','
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
)
type stateFn func(lx *lexer) stateFn
type lexer struct {
input string
start int
pos int
line int
state stateFn
items chan item
// Allow for backing up up to three runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
// nested arrays. The last state on the stack is used after a value has
// been lexed. Similarly for comments.
stack []stateFn
}
type item struct {
typ itemType
val string
line int
}
func (lx *lexer) nextItem() item {
for {
select {
case item := <-lx.items:
return item
default:
lx.state = lx.state(lx)
}
}
}
func lex(input string) *lexer {
lx := &lexer{
input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
}
return lx
}
func (lx *lexer) push(state stateFn) {
lx.stack = append(lx.stack, state)
}
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
return last
}
func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
func (lx *lexer) emit(typ itemType) {
lx.items <- item{typ, lx.current(), lx.line}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
lx.start = lx.pos
}
func (lx *lexer) next() (r rune) {
if lx.atEOF {
panic("next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.atEOF = true
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 {
lx.nprev++
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.prevWidths[0] = w
lx.pos += w
return r
}
// ignore skips over the pending input before this point.
func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() {
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
panic("backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
}
// accept consumes the next rune if it's equal to `valid`.
func (lx *lexer) accept(valid rune) bool {
if lx.next() == valid {
return true
}
lx.backup()
return false
}
// peek returns but does not consume the next rune in the input.
func (lx *lexer) peek() rune {
r := lx.next()
lx.backup()
return r
}
// skip ignores all input that matches the given predicate.
func (lx *lexer) skip(pred func(rune) bool) {
for {
r := lx.next()
if pred(r) {
continue
}
lx.backup()
lx.ignore()
return
}
}
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
fmt.Sprintf(format, values...),
lx.line,
}
return nil
}
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
return lexCommentStart
case tableStart:
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
return nil
}
// At this point, the only valid item can be a key, so we back up
// and let the key lexer do the rest.
lx.backup()
lx.push(lexTopEnd)
return lexKeyStart
}
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
return lexTopEnd
case isNL(r):
lx.ignore()
return lexTop
case r == eof:
lx.emit(itemEOF)
return nil
}
return lx.errorf("expected a top-level item to end with a newline, "+
"comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
// it starts with a character other than '.' and ']'.
// It assumes that '[' has already been consumed.
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
if lx.peek() == arrayTableStart {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
} else {
lx.emit(itemTableStart)
lx.push(lexTableEnd)
}
return lexTableNameStart
}
func lexTableEnd(lx *lexer) stateFn {
lx.emit(itemTableEnd)
return lexTopEnd
}
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("expected end of table array name delimiter %q, "+
"but got %q instead", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
}
func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("unexpected end of table name " +
"(table names cannot be empty)")
case r == tableSep:
return lx.errorf("unexpected table separator " +
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
return lexValue // reuse string lexing
default:
return lexBareTableName
}
}
// lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareTableName
}
lx.backup()
lx.emit(itemText)
return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == tableSep:
lx.ignore()
return lexTableNameStart
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("expected '.' or ']' to end table name, "+
"but got %q instead", r)
}
}
// lexKeyStart consumes a key name up until the first non-whitespace character.
// lexKeyStart will ignore whitespace.
func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.emit(itemKeyStart)
lx.push(lexKeyEnd)
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
}
}
// lexBareKey consumes the text of a bare key. Assumes that the first character
// (which is not whitespace) has not yet been consumed.
func lexBareKey(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareKey
case isWhitespace(r):
lx.backup()
lx.emit(itemText)
return lexKeyEnd
case r == keySep:
lx.backup()
lx.emit(itemText)
return lexKeyEnd
default:
return lx.errorf("bare keys cannot contain %q", r)
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("expected key separator %q, but got %q instead",
keySep, r)
}
}
// lexValue starts the consumption of a value anywhere a value is expected.
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT newlines.
// In array syntax, the array states are responsible for ignoring newlines.
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexValue)
case isDigit(r):
lx.backup() // avoid an extra state and use the same as above
return lexNumberOrDateStart
}
switch r {
case arrayStart:
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case inlineTableStart:
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
lx.ignore() // Ignore """
return lexMultilineString
}
lx.backup()
}
lx.ignore() // ignore the '"'
return lexString
case rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
lx.ignore() // Ignore """
return lexMultilineRawString
}
lx.backup()
}
lx.ignore() // ignore the "'"
return lexRawString
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'")
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
// user wrote something like
// x = foo
// (i.e. not 'true' or 'false' but is something else word-like.)
lx.backup()
return lexBool
}
return lx.errorf("expected value but found %q instead", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
return lexArrayEnd
}
lx.backup()
lx.push(lexArrayValueEnd)
return lexValue
}
// lexArrayValueEnd consumes everything between the end of an array value and
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf(
"expected a comma or array terminator %q, but got %q instead",
arrayEnd, r,
)
}
// lexArrayEnd finishes the lexing of an array.
// It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexInlineTableValue consumes one key/value pair in an inline table.
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
func lexInlineTableValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
return lexInlineTableEnd
}
lx.backup()
lx.push(lexInlineTableValueEnd)
return lexKeyStart
}
// lexInlineTableValueEnd consumes everything between the end of an inline table
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
}
return lx.errorf("expected a comma or an inline table terminator %q, "+
"but got %q instead", inlineTableEnd, r)
}
// lexInlineTableEnd finishes the lexing of an inline table.
// It assumes that a '}' has just been consumed.
func lexInlineTableEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemInlineTableEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == '\\':
lx.push(lexString)
return lexStringEscape
case r == stringEnd:
lx.backup()
lx.emit(itemString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexString
}
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case '\\':
return lexMultilineStringEscape
case stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
// It assumes that the beginning "'" has already been consumed and ignored.
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
return lexMultilineString
}
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
}
func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'b':
fallthrough
case 't':
fallthrough
case 'n':
fallthrough
case 'f':
fallthrough
case 'r':
fallthrough
case '"':
fallthrough
case '\\':
return lx.pop()
case 'u':
return lexShortUnicodeEscape
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected four hexadecimal digits after '\u', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '_':
return lexNumber
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
// lexNumberOrDate consumes either an integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '-':
return lexDatetime
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDatetime consumes a Datetime, to a first approximation.
// The parser validates that it matches one of the accepted formats.
func lexDatetime(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexDatetime
}
switch r {
case '-', 'T', ':', '.', 'Z', '+':
return lexDatetime
}
lx.backup()
lx.emit(itemDatetime)
return lx.pop()
}
// lexNumberStart consumes either an integer or a float. It assumes that a sign
// has already been read, but that *no* digits have been consumed.
// lexNumberStart will move to the appropriate integer or float states.
func lexNumberStart(lx *lexer) stateFn {
// We MUST see a digit. Even floats have to start with a digit.
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumber
}
switch r {
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexFloat consumes the elements of a float. It allows any sequence of
// float-like characters, so floats emitted by the lexer are only a first
// approximation and must be validated by the parser.
func lexFloat(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexFloat
}
switch r {
case '_', '.', '-', '+', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemFloat)
return lx.pop()
}
// lexBool consumes a bool string: 'true' or 'false.
func lexBool(lx *lexer) stateFn {
var rs []rune
for {
r := lx.next()
if !unicode.IsLetter(r) {
lx.backup()
break
}
rs = append(rs, r)
}
s := string(rs)
switch s {
case "true", "false":
lx.emit(itemBool)
return lx.pop()
}
return lx.errorf("expected value but found %q instead", s)
}
// lexCommentStart begins the lexing of a comment. It will emit
// itemCommentStart and consume no characters, passing control to lexComment.
func lexCommentStart(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemCommentStart)
return lexComment
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()
if isNL(r) || r == eof {
lx.emit(itemText)
return lx.pop()
}
lx.next()
return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn {
lx.ignore()
return nextState
}
}
// isWhitespace returns true if `r` is a whitespace character according
// to the spec.
func isWhitespace(r rune) bool {
return r == '\t' || r == ' '
}
func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' ||
r == '-'
}
func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
case itemNIL:
return "NIL"
case itemEOF:
return "EOF"
case itemText:
return "Text"
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
case itemInteger:
return "Integer"
case itemFloat:
return "Float"
case itemDatetime:
return "DateTime"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemKeyStart:
return "KeyStart"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}

View file

@ -1,592 +0,0 @@
package toml
import (
"fmt"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
// A list of keys in the order that they appear in the TOML data.
ordered []Key
// the full key for the current hash in scope
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(parseError); ok {
return
}
panic(r)
}
}()
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
}
for {
item := p.next()
if item.typ == itemEOF {
break
}
p.topLevel(item)
}
return p, nil
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
p.approxLine, p.current(), fmt.Sprintf(format, v...))
panic(parseError(msg))
}
func (p *parser) next() item {
it := p.lx.nextItem()
if it.typ == itemError {
p.panicf("%s", it.val)
}
return it
}
func (p *parser) bug(format string, v ...interface{}) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
func (p *parser) expect(typ itemType) item {
it := p.next()
p.assertEqual(typ, it.typ)
return it
}
func (p *parser) assertEqual(expected, got itemType) {
if expected != got {
p.bug("Expected '%s' but got '%s'.", expected, got)
}
}
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart:
p.approxLine = item.line
p.expect(itemText)
case itemTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemTableEnd, kg.typ)
p.establishContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
case itemArrayTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemArrayTableEnd, kg.typ)
p.establishContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
case itemKeyStart:
kname := p.next()
p.approxLine = kname.line
p.currentKey = p.keyString(kname)
val, typ := p.value(p.next())
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
}
}
// Gets a string for a key (or part of a key in a table name).
func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
}
}
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
var t time.Time
var ok bool
var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
array := make([]interface{}, 0)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
accept = false
continue
}
accept = true
}
return accept
}
// numPeriodsOK checks whether every period in s is followed by a digit.
func numPeriodsOK(s string) bool {
period := false
for _, r := range s {
if period && !isDigit(r) {
return false
}
period = r == '.'
}
return !period
}
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
hashContext := p.mapping
keyContext := make(Key, 0)
// We only need implicit hashes for key[0:-1]
for _, k := range key[0 : len(key)-1] {
_, ok = hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
hashContext[k] = make(map[string]interface{})
}
// If the hash context is actually an array of tables, then set
// the hash context to the last element in that array.
//
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
case []map[string]interface{}:
hashContext = t[len(t)-1]
case map[string]interface{}:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
}
}
p.context = keyContext
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as "+
"an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
}
p.context = append(p.context, key[len(key)-1])
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{}
var ok bool
hash := p.mapping
keyContext := make(Key, 0)
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
case []map[string]interface{}:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
case map[string]interface{}:
hash = t
default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+
"it has '%T' instead.", tmpHash)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have
// to raise an error since duplicate keys are disallowed. However,
// it's possible that a key was previously defined implicitly. In this
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
}
// Otherwise, we have a concrete key trying to override a previous
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
p.types[keyContext.String()] = typ
}
// addImplicit sets the given Key as having been created implicitly.
func (p *parser) addImplicit(key Key) {
p.implicits[key.String()] = true
}
// removeImplicit stops tagging the given key as having been implicitly
// created.
func (p *parser) removeImplicit(key Key) {
p.implicits[key.String()] = false
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
}
// current returns the full key name of the current context.
func (p *parser) current() string {
if len(p.currentKey) == 0 {
return p.context.String()
}
if len(p.context) == 0 {
return p.currentKey
}
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
}
func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
return s
}
return s[1:]
}
func stripEscapedWhitespace(s string) string {
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
}
}
return strings.Join(esc, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
s := []byte(str)
r := 0
for r < len(s) {
if s[r] != '\\' {
c, size := utf8.DecodeRune(s[r:])
r += size
replaced = append(replaced, c)
continue
}
r += 1
if r >= len(s) {
p.bug("Escape sequence at end of string.")
return ""
}
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
case 't':
replaced = append(replaced, rune(0x0009))
r += 1
case 'n':
replaced = append(replaced, rune(0x000A))
r += 1
case 'f':
replaced = append(replaced, rune(0x000C))
r += 1
case 'r':
replaced = append(replaced, rune(0x000D))
r += 1
case '"':
replaced = append(replaced, rune(0x0022))
r += 1
case '\\':
replaced = append(replaced, rune(0x005C))
r += 1
case 'u':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
replaced = append(replaced, escaped)
r += 9
}
}
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

View file

@ -1 +0,0 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

View file

@ -1,91 +0,0 @@
package toml
// tomlType represents any Go type that corresponds to a TOML type.
// While the first draft of the TOML spec has a simplistic type system that
// probably doesn't need this level of sophistication, we seem to be militating
// toward adding real composite types.
type tomlType interface {
typeString() string
}
// typeEqual accepts any two types and returns true if they are equal.
func typeEqual(t1, t2 tomlType) bool {
if t1 == nil || t2 == nil {
return false
}
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
type tomlBaseType string
func (btype tomlBaseType) typeString() string {
return string(btype)
}
func (btype tomlBaseType) String() string {
return btype.typeString()
}
var (
tomlInteger tomlBaseType = "Integer"
tomlFloat tomlBaseType = "Float"
tomlDatetime tomlBaseType = "Datetime"
tomlString tomlBaseType = "String"
tomlBool tomlBaseType = "Bool"
tomlArray tomlBaseType = "Array"
tomlHash tomlBaseType = "Hash"
tomlArrayHash tomlBaseType = "ArrayHash"
)
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
// Primitive values are: Integer, Float, Datetime, String and Bool.
//
// Passing a lexer item other than the following will cause a BUG message
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
switch lexItem.typ {
case itemInteger:
return tomlInteger
case itemFloat:
return tomlFloat
case itemDatetime:
return tomlDatetime
case itemString:
return tomlString
case itemMultilineString:
return tomlString
case itemRawString:
return tomlString
case itemRawMultilineString:
return tomlString
case itemBool:
return tomlBool
}
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

View file

@ -1,242 +0,0 @@
package toml
// Struct field handling is adapted from code in encoding/json:
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the Go distribution.
import (
"reflect"
"sort"
"sync"
)
// A field represents a single field found in a struct.
type field struct {
name string // the name of the field (`toml` tag included)
tag bool // whether field has a `toml` tag
index []int // represents the depth of an anonymous field
typ reflect.Type // the type of the field
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from toml tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that TOML should recognize for the given
// type. The algorithm is breadth-first search over the set of structs to
// include - the top struct and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
opts := getOptions(sf.Tag)
if opts.skip {
continue
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := opts.name != ""
name := opts.name
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
f := field{name: ft.Name(), index: index, typ: ft}
next = append(next, f)
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with TOML tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// TOML tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}

View file

@ -98,6 +98,17 @@ func IsRefreshRequired(err error) bool {
return false return false
} }
// Return true if a collection is not known. Required by cbq-engine
func IsUnknownCollection(err error) bool {
res, ok := err.(*gomemcached.MCResponse)
if ok && (res.Status == gomemcached.UNKNOWN_COLLECTION) {
return true
}
return false
}
// ClientOpCallback is called for each invocation of Do. // ClientOpCallback is called for each invocation of Do.
var ClientOpCallback func(opname, k string, start time.Time, err error) var ClientOpCallback func(opname, k string, start time.Time, err error)
@ -129,11 +140,10 @@ func (b *Bucket) Do2(k string, f func(mc *memcached.Client, vb uint16) error, de
if deadline && DefaultTimeout > 0 { if deadline && DefaultTimeout > 0 {
conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout)) conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout))
err = f(conn, uint16(vb))
conn.SetDeadline(noDeadline)
} else { } else {
err = f(conn, uint16(vb)) conn.SetDeadline(noDeadline)
} }
err = f(conn, uint16(vb))
var retry bool var retry bool
discard := isOutOfBoundsError(err) discard := isOutOfBoundsError(err)
@ -195,6 +205,7 @@ func getStatsParallel(sn string, b *Bucket, offset int, which string,
if err != nil { if err != nil {
gatheredStats = GatheredStats{Server: sn, Err: err} gatheredStats = GatheredStats{Server: sn, Err: err}
} else { } else {
conn.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
sm, err := conn.StatsMap(which) sm, err := conn.StatsMap(which)
gatheredStats = GatheredStats{Server: sn, Stats: sm, Err: err} gatheredStats = GatheredStats{Server: sn, Stats: sm, Err: err}
} }
@ -236,19 +247,48 @@ func (b *Bucket) GatherStats(which string) map[string]GatheredStats {
} }
// Get bucket count through the bucket stats // Get bucket count through the bucket stats
func (b *Bucket) GetCount(refresh bool) (count int64, err error) { func (b *Bucket) GetCount(refresh bool, context ...*memcached.ClientContext) (count int64, err error) {
if refresh { if refresh {
b.Refresh() b.Refresh()
} }
var cnt int64 var cnt int64
for _, gs := range b.GatherStats("") { if len(context) > 0 {
if len(gs.Stats) > 0 { key := fmt.Sprintf("collections-byid 0x%x", context[0].CollId)
cnt, err = strconv.ParseInt(gs.Stats["curr_items"], 10, 64) resKey := ""
if err != nil { for _, gs := range b.GatherStats(key) {
return 0, err if len(gs.Stats) > 0 {
// the key encodes the scope and collection id
// we don't have the scope id, so we have to find it...
if resKey == "" {
for k, _ := range gs.Stats {
resKey = strings.TrimRightFunc(k, func(r rune) bool {
return r != ':'
}) + "items"
break
}
}
cnt, err = strconv.ParseInt(gs.Stats[resKey], 10, 64)
if err != nil {
return 0, err
}
count += cnt
} else if gs.Err != nil {
return 0, gs.Err
}
}
} else {
for _, gs := range b.GatherStats("") {
if len(gs.Stats) > 0 {
cnt, err = strconv.ParseInt(gs.Stats["curr_items"], 10, 64)
if err != nil {
return 0, err
}
count += cnt
} else if gs.Err != nil {
return 0, gs.Err
} }
count += cnt
} }
} }
@ -256,19 +296,49 @@ func (b *Bucket) GetCount(refresh bool) (count int64, err error) {
} }
// Get bucket document size through the bucket stats // Get bucket document size through the bucket stats
func (b *Bucket) GetSize(refresh bool) (size int64, err error) { func (b *Bucket) GetSize(refresh bool, context ...*memcached.ClientContext) (size int64, err error) {
if refresh { if refresh {
b.Refresh() b.Refresh()
} }
var sz int64 var sz int64
for _, gs := range b.GatherStats("") { if len(context) > 0 {
if len(gs.Stats) > 0 { key := fmt.Sprintf("collections-byid 0x%x", context[0].CollId)
sz, err = strconv.ParseInt(gs.Stats["ep_value_size"], 10, 64) resKey := ""
if err != nil { for _, gs := range b.GatherStats(key) {
return 0, err if len(gs.Stats) > 0 {
// the key encodes the scope and collection id
// we don't have the scope id, so we have to find it...
if resKey == "" {
for k, _ := range gs.Stats {
resKey = strings.TrimRightFunc(k, func(r rune) bool {
return r != ':'
}) + "disk_size"
break
}
}
sz, err = strconv.ParseInt(gs.Stats[resKey], 10, 64)
if err != nil {
return 0, err
}
size += sz
} else if gs.Err != nil {
return 0, gs.Err
}
}
} else {
for _, gs := range b.GatherStats("") {
if len(gs.Stats) > 0 {
sz, err = strconv.ParseInt(gs.Stats["ep_value_size"], 10, 64)
if err != nil {
return 0, err
}
size += sz
} else if gs.Err != nil {
return 0, gs.Err
} }
size += sz
} }
} }
@ -311,8 +381,12 @@ func isOutOfBoundsError(err error) bool {
} }
func getDeadline(reqDeadline time.Time, duration time.Duration) time.Time { func getDeadline(reqDeadline time.Time, duration time.Duration) time.Time {
if reqDeadline.IsZero() && duration > 0 { if reqDeadline.IsZero() {
return time.Now().Add(duration) if duration > 0 {
return time.Unix(time.Now().Unix(), 0).Add(duration)
} else {
return noDeadline
}
} }
return reqDeadline return reqDeadline
} }
@ -334,7 +408,7 @@ func backOff(attempt, maxAttempts int, duration time.Duration, exponential bool)
func (b *Bucket) doBulkGet(vb uint16, keys []string, reqDeadline time.Time, func (b *Bucket) doBulkGet(vb uint16, keys []string, reqDeadline time.Time,
ch chan<- map[string]*gomemcached.MCResponse, ech chan<- error, subPaths []string, ch chan<- map[string]*gomemcached.MCResponse, ech chan<- error, subPaths []string,
eStatus *errorStatus) { eStatus *errorStatus, context ...*memcached.ClientContext) {
if SlowServerCallWarningThreshold > 0 { if SlowServerCallWarningThreshold > 0 {
defer slowLog(time.Now(), "call to doBulkGet(%d, %d keys)", vb, len(keys)) defer slowLog(time.Now(), "call to doBulkGet(%d, %d keys)", vb, len(keys))
} }
@ -389,8 +463,7 @@ func (b *Bucket) doBulkGet(vb uint16, keys []string, reqDeadline time.Time,
} }
conn.SetDeadline(getDeadline(reqDeadline, DefaultTimeout)) conn.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
err = conn.GetBulk(vb, keys, rv, subPaths) err = conn.GetBulk(vb, keys, rv, subPaths, context...)
conn.SetDeadline(noDeadline)
discard := false discard := false
defer func() { defer func() {
@ -474,6 +547,7 @@ type vbBulkGet struct {
wg *sync.WaitGroup wg *sync.WaitGroup
subPaths []string subPaths []string
groupError *errorStatus groupError *errorStatus
context []*memcached.ClientContext
} }
const _NUM_CHANNELS = 5 const _NUM_CHANNELS = 5
@ -523,14 +597,14 @@ func vbDoBulkGet(vbg *vbBulkGet) {
// Workers cannot panic and die // Workers cannot panic and die
recover() recover()
}() }()
vbg.b.doBulkGet(vbg.k, vbg.keys, vbg.reqDeadline, vbg.ch, vbg.ech, vbg.subPaths, vbg.groupError) vbg.b.doBulkGet(vbg.k, vbg.keys, vbg.reqDeadline, vbg.ch, vbg.ech, vbg.subPaths, vbg.groupError, vbg.context...)
} }
var _ERR_CHAN_FULL = fmt.Errorf("Data request queue full, aborting query.") var _ERR_CHAN_FULL = fmt.Errorf("Data request queue full, aborting query.")
func (b *Bucket) processBulkGet(kdm map[uint16][]string, reqDeadline time.Time, func (b *Bucket) processBulkGet(kdm map[uint16][]string, reqDeadline time.Time,
ch chan<- map[string]*gomemcached.MCResponse, ech chan<- error, subPaths []string, ch chan<- map[string]*gomemcached.MCResponse, ech chan<- error, subPaths []string,
eStatus *errorStatus) { eStatus *errorStatus, context ...*memcached.ClientContext) {
defer close(ch) defer close(ch)
defer close(ech) defer close(ech)
@ -554,6 +628,7 @@ func (b *Bucket) processBulkGet(kdm map[uint16][]string, reqDeadline time.Time,
wg: wg, wg: wg,
subPaths: subPaths, subPaths: subPaths,
groupError: eStatus, groupError: eStatus,
context: context,
} }
wg.Add(1) wg.Add(1)
@ -612,9 +687,9 @@ func errorCollector(ech <-chan error, eout chan<- error, eStatus *errorStatus) {
// This is a wrapper around GetBulk which converts all values returned // This is a wrapper around GetBulk which converts all values returned
// by GetBulk from raw memcached responses into []byte slices. // by GetBulk from raw memcached responses into []byte slices.
// Returns one document for duplicate keys // Returns one document for duplicate keys
func (b *Bucket) GetBulkRaw(keys []string) (map[string][]byte, error) { func (b *Bucket) GetBulkRaw(keys []string, context ...*memcached.ClientContext) (map[string][]byte, error) {
resp, eout := b.getBulk(keys, noDeadline, nil) resp, eout := b.getBulk(keys, noDeadline, nil, context...)
rv := make(map[string][]byte, len(keys)) rv := make(map[string][]byte, len(keys))
for k, av := range resp { for k, av := range resp {
@ -632,15 +707,15 @@ func (b *Bucket) GetBulkRaw(keys []string) (map[string][]byte, error) {
// map array for each key. Keys that were not found will not be included in // map array for each key. Keys that were not found will not be included in
// the map. // the map.
func (b *Bucket) GetBulk(keys []string, reqDeadline time.Time, subPaths []string) (map[string]*gomemcached.MCResponse, error) { func (b *Bucket) GetBulk(keys []string, reqDeadline time.Time, subPaths []string, context ...*memcached.ClientContext) (map[string]*gomemcached.MCResponse, error) {
return b.getBulk(keys, reqDeadline, subPaths) return b.getBulk(keys, reqDeadline, subPaths, context...)
} }
func (b *Bucket) ReleaseGetBulkPools(rv map[string]*gomemcached.MCResponse) { func (b *Bucket) ReleaseGetBulkPools(rv map[string]*gomemcached.MCResponse) {
_STRING_MCRESPONSE_POOL.Put(rv) _STRING_MCRESPONSE_POOL.Put(rv)
} }
func (b *Bucket) getBulk(keys []string, reqDeadline time.Time, subPaths []string) (map[string]*gomemcached.MCResponse, error) { func (b *Bucket) getBulk(keys []string, reqDeadline time.Time, subPaths []string, context ...*memcached.ClientContext) (map[string]*gomemcached.MCResponse, error) {
kdm := _VB_STRING_POOL.Get() kdm := _VB_STRING_POOL.Get()
defer _VB_STRING_POOL.Put(kdm) defer _VB_STRING_POOL.Put(kdm)
for _, k := range keys { for _, k := range keys {
@ -663,7 +738,7 @@ func (b *Bucket) getBulk(keys []string, reqDeadline time.Time, subPaths []string
ech := make(chan error) ech := make(chan error)
go errorCollector(ech, eout, groupErrorStatus) go errorCollector(ech, eout, groupErrorStatus)
go b.processBulkGet(kdm, reqDeadline, ch, ech, subPaths, groupErrorStatus) go b.processBulkGet(kdm, reqDeadline, ch, ech, subPaths, groupErrorStatus, context...)
var rv map[string]*gomemcached.MCResponse var rv map[string]*gomemcached.MCResponse
@ -739,7 +814,7 @@ var ErrKeyExists = errors.New("key exists")
// before being written. It must be JSON-marshalable and it must not // before being written. It must be JSON-marshalable and it must not
// be nil. // be nil.
func (b *Bucket) Write(k string, flags, exp int, v interface{}, func (b *Bucket) Write(k string, flags, exp int, v interface{},
opt WriteOptions) (err error) { opt WriteOptions, context ...*memcached.ClientContext) (err error) {
if ClientOpCallback != nil { if ClientOpCallback != nil {
defer func(t time.Time) { defer func(t time.Time) {
@ -761,7 +836,7 @@ func (b *Bucket) Write(k string, flags, exp int, v interface{},
err = b.Do(k, func(mc *memcached.Client, vb uint16) error { err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
if opt&AddOnly != 0 { if opt&AddOnly != 0 {
res, err = memcached.UnwrapMemcachedError( res, err = memcached.UnwrapMemcachedError(
mc.Add(vb, k, flags, exp, data)) mc.Add(vb, k, flags, exp, data, context...))
if err == nil && res.Status != gomemcached.SUCCESS { if err == nil && res.Status != gomemcached.SUCCESS {
if res.Status == gomemcached.KEY_EEXISTS { if res.Status == gomemcached.KEY_EEXISTS {
err = ErrKeyExists err = ErrKeyExists
@ -770,11 +845,11 @@ func (b *Bucket) Write(k string, flags, exp int, v interface{},
} }
} }
} else if opt&Append != 0 { } else if opt&Append != 0 {
res, err = mc.Append(vb, k, data) res, err = mc.Append(vb, k, data, context...)
} else if data == nil { } else if data == nil {
res, err = mc.Del(vb, k) res, err = mc.Del(vb, k, context...)
} else { } else {
res, err = mc.Set(vb, k, flags, exp, data) res, err = mc.Set(vb, k, flags, exp, data, context...)
} }
return err return err
@ -788,7 +863,7 @@ func (b *Bucket) Write(k string, flags, exp int, v interface{},
} }
func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{}, func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
opt WriteOptions) (mt *MutationToken, err error) { opt WriteOptions, context ...*memcached.ClientContext) (mt *MutationToken, err error) {
if ClientOpCallback != nil { if ClientOpCallback != nil {
defer func(t time.Time) { defer func(t time.Time) {
@ -810,7 +885,7 @@ func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
err = b.Do(k, func(mc *memcached.Client, vb uint16) error { err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
if opt&AddOnly != 0 { if opt&AddOnly != 0 {
res, err = memcached.UnwrapMemcachedError( res, err = memcached.UnwrapMemcachedError(
mc.Add(vb, k, flags, exp, data)) mc.Add(vb, k, flags, exp, data, context...))
if err == nil && res.Status != gomemcached.SUCCESS { if err == nil && res.Status != gomemcached.SUCCESS {
if res.Status == gomemcached.KEY_EEXISTS { if res.Status == gomemcached.KEY_EEXISTS {
err = ErrKeyExists err = ErrKeyExists
@ -819,11 +894,11 @@ func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
} }
} }
} else if opt&Append != 0 { } else if opt&Append != 0 {
res, err = mc.Append(vb, k, data) res, err = mc.Append(vb, k, data, context...)
} else if data == nil { } else if data == nil {
res, err = mc.Del(vb, k) res, err = mc.Del(vb, k, context...)
} else { } else {
res, err = mc.Set(vb, k, flags, exp, data) res, err = mc.Set(vb, k, flags, exp, data, context...)
} }
if len(res.Extras) >= 16 { if len(res.Extras) >= 16 {
@ -843,17 +918,17 @@ func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
} }
// Set a value in this bucket with Cas and return the new Cas value // Set a value in this bucket with Cas and return the new Cas value
func (b *Bucket) Cas(k string, exp int, cas uint64, v interface{}) (uint64, error) { func (b *Bucket) Cas(k string, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, error) {
return b.WriteCas(k, 0, exp, cas, v, 0) return b.WriteCas(k, 0, exp, cas, v, 0, context...)
} }
// Set a value in this bucket with Cas without json encoding it // Set a value in this bucket with Cas without json encoding it
func (b *Bucket) CasRaw(k string, exp int, cas uint64, v interface{}) (uint64, error) { func (b *Bucket) CasRaw(k string, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, error) {
return b.WriteCas(k, 0, exp, cas, v, Raw) return b.WriteCas(k, 0, exp, cas, v, Raw, context...)
} }
func (b *Bucket) WriteCas(k string, flags, exp int, cas uint64, v interface{}, func (b *Bucket) WriteCas(k string, flags, exp int, cas uint64, v interface{},
opt WriteOptions) (newCas uint64, err error) { opt WriteOptions, context ...*memcached.ClientContext) (newCas uint64, err error) {
if ClientOpCallback != nil { if ClientOpCallback != nil {
defer func(t time.Time) { defer func(t time.Time) {
@ -873,7 +948,7 @@ func (b *Bucket) WriteCas(k string, flags, exp int, cas uint64, v interface{},
var res *gomemcached.MCResponse var res *gomemcached.MCResponse
err = b.Do(k, func(mc *memcached.Client, vb uint16) error { err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
res, err = mc.SetCas(vb, k, flags, exp, cas, data) res, err = mc.SetCas(vb, k, flags, exp, cas, data, context...)
return err return err
}) })
@ -885,16 +960,16 @@ func (b *Bucket) WriteCas(k string, flags, exp int, cas uint64, v interface{},
} }
// Extended CAS operation. These functions will return the mutation token, i.e vbuuid & guard // Extended CAS operation. These functions will return the mutation token, i.e vbuuid & guard
func (b *Bucket) CasWithMeta(k string, flags int, exp int, cas uint64, v interface{}) (uint64, *MutationToken, error) { func (b *Bucket) CasWithMeta(k string, flags int, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, *MutationToken, error) {
return b.WriteCasWithMT(k, flags, exp, cas, v, 0) return b.WriteCasWithMT(k, flags, exp, cas, v, 0, context...)
} }
func (b *Bucket) CasWithMetaRaw(k string, flags int, exp int, cas uint64, v interface{}) (uint64, *MutationToken, error) { func (b *Bucket) CasWithMetaRaw(k string, flags int, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, *MutationToken, error) {
return b.WriteCasWithMT(k, flags, exp, cas, v, Raw) return b.WriteCasWithMT(k, flags, exp, cas, v, Raw, context...)
} }
func (b *Bucket) WriteCasWithMT(k string, flags, exp int, cas uint64, v interface{}, func (b *Bucket) WriteCasWithMT(k string, flags, exp int, cas uint64, v interface{},
opt WriteOptions) (newCas uint64, mt *MutationToken, err error) { opt WriteOptions, context ...*memcached.ClientContext) (newCas uint64, mt *MutationToken, err error) {
if ClientOpCallback != nil { if ClientOpCallback != nil {
defer func(t time.Time) { defer func(t time.Time) {
@ -914,7 +989,7 @@ func (b *Bucket) WriteCasWithMT(k string, flags, exp int, cas uint64, v interfac
var res *gomemcached.MCResponse var res *gomemcached.MCResponse
err = b.Do(k, func(mc *memcached.Client, vb uint16) error { err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
res, err = mc.SetCas(vb, k, flags, exp, cas, data) res, err = mc.SetCas(vb, k, flags, exp, cas, data, context...)
return err return err
}) })
@ -939,25 +1014,25 @@ func (b *Bucket) WriteCasWithMT(k string, flags, exp int, cas uint64, v interfac
// Set a value in this bucket. // Set a value in this bucket.
// The value will be serialized into a JSON document. // The value will be serialized into a JSON document.
func (b *Bucket) Set(k string, exp int, v interface{}) error { func (b *Bucket) Set(k string, exp int, v interface{}, context ...*memcached.ClientContext) error {
return b.Write(k, 0, exp, v, 0) return b.Write(k, 0, exp, v, 0, context...)
} }
// Set a value in this bucket with with flags // Set a value in this bucket with with flags
func (b *Bucket) SetWithMeta(k string, flags int, exp int, v interface{}) (*MutationToken, error) { func (b *Bucket) SetWithMeta(k string, flags int, exp int, v interface{}, context ...*memcached.ClientContext) (*MutationToken, error) {
return b.WriteWithMT(k, flags, exp, v, 0) return b.WriteWithMT(k, flags, exp, v, 0, context...)
} }
// SetRaw sets a value in this bucket without JSON encoding it. // SetRaw sets a value in this bucket without JSON encoding it.
func (b *Bucket) SetRaw(k string, exp int, v []byte) error { func (b *Bucket) SetRaw(k string, exp int, v []byte, context ...*memcached.ClientContext) error {
return b.Write(k, 0, exp, v, Raw) return b.Write(k, 0, exp, v, Raw, context...)
} }
// Add adds a value to this bucket; like Set except that nothing // Add adds a value to this bucket; like Set except that nothing
// happens if the key exists. The value will be serialized into a // happens if the key exists. The value will be serialized into a
// JSON document. // JSON document.
func (b *Bucket) Add(k string, exp int, v interface{}) (added bool, err error) { func (b *Bucket) Add(k string, exp int, v interface{}, context ...*memcached.ClientContext) (added bool, err error) {
err = b.Write(k, 0, exp, v, AddOnly) err = b.Write(k, 0, exp, v, AddOnly, context...)
if err == ErrKeyExists { if err == ErrKeyExists {
return false, nil return false, nil
} }
@ -966,8 +1041,8 @@ func (b *Bucket) Add(k string, exp int, v interface{}) (added bool, err error) {
// AddRaw adds a value to this bucket; like SetRaw except that nothing // AddRaw adds a value to this bucket; like SetRaw except that nothing
// happens if the key exists. The value will be stored as raw bytes. // happens if the key exists. The value will be stored as raw bytes.
func (b *Bucket) AddRaw(k string, exp int, v []byte) (added bool, err error) { func (b *Bucket) AddRaw(k string, exp int, v []byte, context ...*memcached.ClientContext) (added bool, err error) {
err = b.Write(k, 0, exp, v, AddOnly|Raw) err = b.Write(k, 0, exp, v, AddOnly|Raw, context...)
if err == ErrKeyExists { if err == ErrKeyExists {
return false, nil return false, nil
} }
@ -977,8 +1052,8 @@ func (b *Bucket) AddRaw(k string, exp int, v []byte) (added bool, err error) {
// Add adds a value to this bucket; like Set except that nothing // Add adds a value to this bucket; like Set except that nothing
// happens if the key exists. The value will be serialized into a // happens if the key exists. The value will be serialized into a
// JSON document. // JSON document.
func (b *Bucket) AddWithMT(k string, exp int, v interface{}) (added bool, mt *MutationToken, err error) { func (b *Bucket) AddWithMT(k string, exp int, v interface{}, context ...*memcached.ClientContext) (added bool, mt *MutationToken, err error) {
mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly) mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly, context...)
if err == ErrKeyExists { if err == ErrKeyExists {
return false, mt, nil return false, mt, nil
} }
@ -987,8 +1062,8 @@ func (b *Bucket) AddWithMT(k string, exp int, v interface{}) (added bool, mt *Mu
// AddRaw adds a value to this bucket; like SetRaw except that nothing // AddRaw adds a value to this bucket; like SetRaw except that nothing
// happens if the key exists. The value will be stored as raw bytes. // happens if the key exists. The value will be stored as raw bytes.
func (b *Bucket) AddRawWithMT(k string, exp int, v []byte) (added bool, mt *MutationToken, err error) { func (b *Bucket) AddRawWithMT(k string, exp int, v []byte, context ...*memcached.ClientContext) (added bool, mt *MutationToken, err error) {
mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly|Raw) mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly|Raw, context...)
if err == ErrKeyExists { if err == ErrKeyExists {
return false, mt, nil return false, mt, nil
} }
@ -996,43 +1071,8 @@ func (b *Bucket) AddRawWithMT(k string, exp int, v []byte) (added bool, mt *Muta
} }
// Append appends raw data to an existing item. // Append appends raw data to an existing item.
func (b *Bucket) Append(k string, data []byte) error { func (b *Bucket) Append(k string, data []byte, context ...*memcached.ClientContext) error {
return b.Write(k, 0, 0, data, Append|Raw) return b.Write(k, 0, 0, data, Append|Raw, context...)
}
func (b *Bucket) GetsMCFromCollection(collUid uint32, key string, reqDeadline time.Time) (*gomemcached.MCResponse, error) {
var err error
var response *gomemcached.MCResponse
if key == "" {
return nil, nil
}
if ClientOpCallback != nil {
defer func(t time.Time) { ClientOpCallback("GetsMCFromCollection", key, t, err) }(time.Now())
}
err = b.Do2(key, func(mc *memcached.Client, vb uint16) error {
var err1 error
mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
_, err1 = mc.SelectBucket(b.Name)
if err1 != nil {
mc.SetDeadline(noDeadline)
return err1
}
mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
response, err1 = mc.GetFromCollection(vb, collUid, key)
if err1 != nil {
mc.SetDeadline(noDeadline)
return err1
}
return nil
}, false)
return response, err
} }
// Returns collectionUid, manifestUid, error. // Returns collectionUid, manifestUid, error.
@ -1053,13 +1093,11 @@ func (b *Bucket) GetCollectionCID(scope string, collection string, reqDeadline t
mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout)) mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
_, err1 = mc.SelectBucket(b.Name) _, err1 = mc.SelectBucket(b.Name)
if err1 != nil { if err1 != nil {
mc.SetDeadline(noDeadline)
return err1 return err1
} }
response, err1 = mc.CollectionsGetCID(scope, collection) response, err1 = mc.CollectionsGetCID(scope, collection)
if err1 != nil { if err1 != nil {
mc.SetDeadline(noDeadline)
return err1 return err1
} }
@ -1073,7 +1111,7 @@ func (b *Bucket) GetCollectionCID(scope string, collection string, reqDeadline t
} }
// Get a value straight from Memcached // Get a value straight from Memcached
func (b *Bucket) GetsMC(key string, reqDeadline time.Time) (*gomemcached.MCResponse, error) { func (b *Bucket) GetsMC(key string, reqDeadline time.Time, context ...*memcached.ClientContext) (*gomemcached.MCResponse, error) {
var err error var err error
var response *gomemcached.MCResponse var response *gomemcached.MCResponse
@ -1089,8 +1127,7 @@ func (b *Bucket) GetsMC(key string, reqDeadline time.Time) (*gomemcached.MCRespo
var err1 error var err1 error
mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout)) mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
response, err1 = mc.Get(vb, key) response, err1 = mc.Get(vb, key, context...)
mc.SetDeadline(noDeadline)
if err1 != nil { if err1 != nil {
return err1 return err1
} }
@ -1100,7 +1137,7 @@ func (b *Bucket) GetsMC(key string, reqDeadline time.Time) (*gomemcached.MCRespo
} }
// Get a value through the subdoc API // Get a value through the subdoc API
func (b *Bucket) GetsSubDoc(key string, reqDeadline time.Time, subPaths []string) (*gomemcached.MCResponse, error) { func (b *Bucket) GetsSubDoc(key string, reqDeadline time.Time, subPaths []string, context ...*memcached.ClientContext) (*gomemcached.MCResponse, error) {
var err error var err error
var response *gomemcached.MCResponse var response *gomemcached.MCResponse
@ -1116,8 +1153,7 @@ func (b *Bucket) GetsSubDoc(key string, reqDeadline time.Time, subPaths []string
var err1 error var err1 error
mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout)) mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
response, err1 = mc.GetSubdoc(vb, key, subPaths) response, err1 = mc.GetSubdoc(vb, key, subPaths, context...)
mc.SetDeadline(noDeadline)
if err1 != nil { if err1 != nil {
return err1 return err1
} }
@ -1128,7 +1164,7 @@ func (b *Bucket) GetsSubDoc(key string, reqDeadline time.Time, subPaths []string
// GetsRaw gets a raw value from this bucket including its CAS // GetsRaw gets a raw value from this bucket including its CAS
// counter and flags. // counter and flags.
func (b *Bucket) GetsRaw(k string) (data []byte, flags int, func (b *Bucket) GetsRaw(k string, context ...*memcached.ClientContext) (data []byte, flags int,
cas uint64, err error) { cas uint64, err error) {
if ClientOpCallback != nil { if ClientOpCallback != nil {
@ -1136,7 +1172,7 @@ func (b *Bucket) GetsRaw(k string) (data []byte, flags int,
} }
err = b.Do(k, func(mc *memcached.Client, vb uint16) error { err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
res, err := mc.Get(vb, k) res, err := mc.Get(vb, k, context...)
if err != nil { if err != nil {
return err return err
} }
@ -1153,8 +1189,8 @@ func (b *Bucket) GetsRaw(k string) (data []byte, flags int,
// Gets gets a value from this bucket, including its CAS counter. The // Gets gets a value from this bucket, including its CAS counter. The
// value is expected to be a JSON stream and will be deserialized into // value is expected to be a JSON stream and will be deserialized into
// rv. // rv.
func (b *Bucket) Gets(k string, rv interface{}, caso *uint64) error { func (b *Bucket) Gets(k string, rv interface{}, caso *uint64, context ...*memcached.ClientContext) error {
data, _, cas, err := b.GetsRaw(k) data, _, cas, err := b.GetsRaw(k, context...)
if err != nil { if err != nil {
return err return err
} }
@ -1167,19 +1203,19 @@ func (b *Bucket) Gets(k string, rv interface{}, caso *uint64) error {
// Get a value from this bucket. // Get a value from this bucket.
// The value is expected to be a JSON stream and will be deserialized // The value is expected to be a JSON stream and will be deserialized
// into rv. // into rv.
func (b *Bucket) Get(k string, rv interface{}) error { func (b *Bucket) Get(k string, rv interface{}, context ...*memcached.ClientContext) error {
return b.Gets(k, rv, nil) return b.Gets(k, rv, nil, context...)
} }
// GetRaw gets a raw value from this bucket. No marshaling is performed. // GetRaw gets a raw value from this bucket. No marshaling is performed.
func (b *Bucket) GetRaw(k string) ([]byte, error) { func (b *Bucket) GetRaw(k string, context ...*memcached.ClientContext) ([]byte, error) {
d, _, _, err := b.GetsRaw(k) d, _, _, err := b.GetsRaw(k, context...)
return d, err return d, err
} }
// GetAndTouchRaw gets a raw value from this bucket including its CAS // GetAndTouchRaw gets a raw value from this bucket including its CAS
// counter and flags, and updates the expiry on the doc. // counter and flags, and updates the expiry on the doc.
func (b *Bucket) GetAndTouchRaw(k string, exp int) (data []byte, func (b *Bucket) GetAndTouchRaw(k string, exp int, context ...*memcached.ClientContext) (data []byte,
cas uint64, err error) { cas uint64, err error) {
if ClientOpCallback != nil { if ClientOpCallback != nil {
@ -1187,7 +1223,7 @@ func (b *Bucket) GetAndTouchRaw(k string, exp int) (data []byte,
} }
err = b.Do(k, func(mc *memcached.Client, vb uint16) error { err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
res, err := mc.GetAndTouch(vb, k, exp) res, err := mc.GetAndTouch(vb, k, exp, context...)
if err != nil { if err != nil {
return err return err
} }
@ -1199,14 +1235,14 @@ func (b *Bucket) GetAndTouchRaw(k string, exp int) (data []byte,
} }
// GetMeta returns the meta values for a key // GetMeta returns the meta values for a key
func (b *Bucket) GetMeta(k string, flags *int, expiry *int, cas *uint64, seqNo *uint64) (err error) { func (b *Bucket) GetMeta(k string, flags *int, expiry *int, cas *uint64, seqNo *uint64, context ...*memcached.ClientContext) (err error) {
if ClientOpCallback != nil { if ClientOpCallback != nil {
defer func(t time.Time) { ClientOpCallback("GetsMeta", k, t, err) }(time.Now()) defer func(t time.Time) { ClientOpCallback("GetsMeta", k, t, err) }(time.Now())
} }
err = b.Do(k, func(mc *memcached.Client, vb uint16) error { err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
res, err := mc.GetMeta(vb, k) res, err := mc.GetMeta(vb, k, context...)
if err != nil { if err != nil {
return err return err
} }
@ -1231,19 +1267,19 @@ func (b *Bucket) GetMeta(k string, flags *int, expiry *int, cas *uint64, seqNo *
} }
// Delete a key from this bucket. // Delete a key from this bucket.
func (b *Bucket) Delete(k string) error { func (b *Bucket) Delete(k string, context ...*memcached.ClientContext) error {
return b.Write(k, 0, 0, nil, Raw) return b.Write(k, 0, 0, nil, Raw, context...)
} }
// Incr increments the value at a given key by amt and defaults to def if no value present. // Incr increments the value at a given key by amt and defaults to def if no value present.
func (b *Bucket) Incr(k string, amt, def uint64, exp int) (val uint64, err error) { func (b *Bucket) Incr(k string, amt, def uint64, exp int, context ...*memcached.ClientContext) (val uint64, err error) {
if ClientOpCallback != nil { if ClientOpCallback != nil {
defer func(t time.Time) { ClientOpCallback("Incr", k, t, err) }(time.Now()) defer func(t time.Time) { ClientOpCallback("Incr", k, t, err) }(time.Now())
} }
var rv uint64 var rv uint64
err = b.Do(k, func(mc *memcached.Client, vb uint16) error { err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
res, err := mc.Incr(vb, k, amt, def, exp) res, err := mc.Incr(vb, k, amt, def, exp, context...)
if err != nil { if err != nil {
return err return err
} }
@ -1254,14 +1290,14 @@ func (b *Bucket) Incr(k string, amt, def uint64, exp int) (val uint64, err error
} }
// Decr decrements the value at a given key by amt and defaults to def if no value present // Decr decrements the value at a given key by amt and defaults to def if no value present
func (b *Bucket) Decr(k string, amt, def uint64, exp int) (val uint64, err error) { func (b *Bucket) Decr(k string, amt, def uint64, exp int, context ...*memcached.ClientContext) (val uint64, err error) {
if ClientOpCallback != nil { if ClientOpCallback != nil {
defer func(t time.Time) { ClientOpCallback("Decr", k, t, err) }(time.Now()) defer func(t time.Time) { ClientOpCallback("Decr", k, t, err) }(time.Now())
} }
var rv uint64 var rv uint64
err = b.Do(k, func(mc *memcached.Client, vb uint16) error { err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
res, err := mc.Decr(vb, k, amt, def, exp) res, err := mc.Decr(vb, k, amt, def, exp, context...)
if err != nil { if err != nil {
return err return err
} }

View file

@ -45,11 +45,12 @@ type connectionPool struct {
poolSize int poolSize int
connCount uint64 connCount uint64
inUse bool inUse bool
encrypted bool
tlsConfig *tls.Config tlsConfig *tls.Config
bucket string bucket string
} }
func newConnectionPool(host string, ah AuthHandler, closer bool, poolSize, poolOverflow int, tlsConfig *tls.Config, bucket string) *connectionPool { func newConnectionPool(host string, ah AuthHandler, closer bool, poolSize, poolOverflow int, tlsConfig *tls.Config, bucket string, encrypted bool) *connectionPool {
connSize := poolSize connSize := poolSize
if closer { if closer {
connSize += poolOverflow connSize += poolOverflow
@ -61,9 +62,14 @@ func newConnectionPool(host string, ah AuthHandler, closer bool, poolSize, poolO
mkConn: defaultMkConn, mkConn: defaultMkConn,
auth: ah, auth: ah,
poolSize: poolSize, poolSize: poolSize,
tlsConfig: tlsConfig,
bucket: bucket, bucket: bucket,
encrypted: encrypted,
} }
if encrypted {
rv.tlsConfig = tlsConfig
}
if closer { if closer {
rv.bailOut = make(chan bool, 1) rv.bailOut = make(chan bool, 1)
go rv.connCloser() go rv.connCloser()
@ -91,6 +97,10 @@ func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketNam
return nil, err return nil, err
} }
if DefaultTimeout > 0 {
conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout))
}
if TCPKeepalive == true { if TCPKeepalive == true {
conn.SetKeepAliveOptions(time.Duration(TCPKeepaliveInterval) * time.Second) conn.SetKeepAliveOptions(time.Duration(TCPKeepaliveInterval) * time.Second)
} }
@ -111,16 +121,7 @@ func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketNam
} }
if len(features) > 0 { if len(features) > 0 {
if DefaultTimeout > 0 {
conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout))
}
res, err := conn.EnableFeatures(features) res, err := conn.EnableFeatures(features)
if DefaultTimeout > 0 {
conn.SetDeadline(noDeadline)
}
if err != nil && isTimeoutError(err) { if err != nil && isTimeoutError(err) {
conn.Close() conn.Close()
return nil, err return nil, err
@ -137,10 +138,15 @@ func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketNam
conn.Close() conn.Close()
return nil, err return nil, err
} }
if DefaultTimeout > 0 {
conn.SetDeadline(noDeadline)
}
return conn, nil return conn, nil
} }
name, pass, bucket := ah.GetCredentials() name, pass, bucket := ah.GetCredentials()
if bucket == "" { if bucket == "" {
// Authenticator does not know specific bucket. // Authenticator does not know specific bucket.
bucket = bucketName bucket = bucketName
} }
@ -161,6 +167,11 @@ func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketNam
} }
} }
} }
if DefaultTimeout > 0 {
conn.SetDeadline(noDeadline)
}
return conn, nil return conn, nil
} }

3
vendor/github.com/couchbase/go-couchbase/go.mod generated vendored Normal file
View file

@ -0,0 +1,3 @@
module github.com/couchbase/go-couchbase
go 1.13

View file

@ -34,6 +34,9 @@ var ClientTimeOut = 10 * time.Second
var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost} var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
var HTTPClient = &http.Client{Transport: HTTPTransport, Timeout: ClientTimeOut} var HTTPClient = &http.Client{Transport: HTTPTransport, Timeout: ClientTimeOut}
// Use this client for reading from streams that should be open for an extended duration.
var HTTPClientForStreaming = &http.Client{Transport: HTTPTransport, Timeout: 0}
// PoolSize is the size of each connection pool (per host). // PoolSize is the size of each connection pool (per host).
var PoolSize = 64 var PoolSize = 64
@ -164,22 +167,23 @@ type Pools struct {
// A Node is a computer in a cluster running the couchbase software. // A Node is a computer in a cluster running the couchbase software.
type Node struct { type Node struct {
ClusterCompatibility int `json:"clusterCompatibility"` ClusterCompatibility int `json:"clusterCompatibility"`
ClusterMembership string `json:"clusterMembership"` ClusterMembership string `json:"clusterMembership"`
CouchAPIBase string `json:"couchApiBase"` CouchAPIBase string `json:"couchApiBase"`
Hostname string `json:"hostname"` Hostname string `json:"hostname"`
InterestingStats map[string]float64 `json:"interestingStats,omitempty"` AlternateNames map[string]NodeAlternateNames `json:"alternateAddresses"`
MCDMemoryAllocated float64 `json:"mcdMemoryAllocated"` InterestingStats map[string]float64 `json:"interestingStats,omitempty"`
MCDMemoryReserved float64 `json:"mcdMemoryReserved"` MCDMemoryAllocated float64 `json:"mcdMemoryAllocated"`
MemoryFree float64 `json:"memoryFree"` MCDMemoryReserved float64 `json:"mcdMemoryReserved"`
MemoryTotal float64 `json:"memoryTotal"` MemoryFree float64 `json:"memoryFree"`
OS string `json:"os"` MemoryTotal float64 `json:"memoryTotal"`
Ports map[string]int `json:"ports"` OS string `json:"os"`
Services []string `json:"services"` Ports map[string]int `json:"ports"`
Status string `json:"status"` Services []string `json:"services"`
Uptime int `json:"uptime,string"` Status string `json:"status"`
Version string `json:"version"` Uptime int `json:"uptime,string"`
ThisNode bool `json:"thisNode,omitempty"` Version string `json:"version"`
ThisNode bool `json:"thisNode,omitempty"`
} }
// A Pool of nodes and buckets. // A Pool of nodes and buckets.
@ -189,6 +193,12 @@ type Pool struct {
BucketURL map[string]string `json:"buckets"` BucketURL map[string]string `json:"buckets"`
MemoryQuota float64 `json:"memoryQuota"`
CbasMemoryQuota float64 `json:"cbasMemoryQuota"`
EventingMemoryQuota float64 `json:"eventingMemoryQuota"`
FtsMemoryQuota float64 `json:"ftsMemoryQuota"`
IndexMemoryQuota float64 `json:"indexMemoryQuota"`
client *Client client *Client
} }
@ -217,6 +227,7 @@ type Bucket struct {
AuthType string `json:"authType"` AuthType string `json:"authType"`
Capabilities []string `json:"bucketCapabilities"` Capabilities []string `json:"bucketCapabilities"`
CapabilitiesVersion string `json:"bucketCapabilitiesVer"` CapabilitiesVersion string `json:"bucketCapabilitiesVer"`
CollectionsManifestUid string `json:"collectionsManifestUid"`
Type string `json:"bucketType"` Type string `json:"bucketType"`
Name string `json:"name"` Name string `json:"name"`
NodeLocator string `json:"nodeLocator"` NodeLocator string `json:"nodeLocator"`
@ -259,9 +270,15 @@ type PoolServices struct {
// NodeServices is all the bucket-independent services running on // NodeServices is all the bucket-independent services running on
// a node (given by Hostname) // a node (given by Hostname)
type NodeServices struct { type NodeServices struct {
Services map[string]int `json:"services,omitempty"` Services map[string]int `json:"services,omitempty"`
Hostname string `json:"hostname"`
ThisNode bool `json:"thisNode"`
AlternateNames map[string]NodeAlternateNames `json:"alternateAddresses"`
}
type NodeAlternateNames struct {
Hostname string `json:"hostname"` Hostname string `json:"hostname"`
ThisNode bool `json:"thisNode"` Ports map[string]int `json:"ports"`
} }
type BucketNotFoundError struct { type BucketNotFoundError struct {
@ -344,6 +361,13 @@ func (b *Bucket) GetName() string {
return ret return ret
} }
func (b *Bucket) GetUUID() string {
b.RLock()
defer b.RUnlock()
ret := b.UUID
return ret
}
// Nodes returns the current list of nodes servicing this bucket. // Nodes returns the current list of nodes servicing this bucket.
func (b *Bucket) Nodes() []Node { func (b *Bucket) Nodes() []Node {
b.RLock() b.RLock()
@ -474,13 +498,14 @@ func (b *Bucket) getRandomConnection() (*memcached.Client, *connectionPool, erro
// Client.GetRandomDoc() call to get a random document from that node. // Client.GetRandomDoc() call to get a random document from that node.
// //
func (b *Bucket) GetRandomDoc() (*gomemcached.MCResponse, error) { func (b *Bucket) GetRandomDoc(context ...*memcached.ClientContext) (*gomemcached.MCResponse, error) {
// get a connection from the pool // get a connection from the pool
conn, pool, err := b.getRandomConnection() conn, pool, err := b.getRandomConnection()
if err != nil { if err != nil {
return nil, err return nil, err
} }
conn.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
// We may need to select the bucket before GetRandomDoc() // We may need to select the bucket before GetRandomDoc()
// will work. This is sometimes done at startup (see defaultMkConn()) // will work. This is sometimes done at startup (see defaultMkConn())
@ -491,12 +516,60 @@ func (b *Bucket) GetRandomDoc() (*gomemcached.MCResponse, error) {
} }
// get a randomm document from the connection // get a randomm document from the connection
doc, err := conn.GetRandomDoc() doc, err := conn.GetRandomDoc(context...)
// need to return the connection to the pool // need to return the connection to the pool
pool.Return(conn) pool.Return(conn)
return doc, err return doc, err
} }
// Bucket DDL
func uriAdj(s string) string {
return strings.Replace(s, "%", "%25", -1)
}
func (b *Bucket) CreateScope(scope string) error {
b.RLock()
pool := b.pool
client := pool.client
b.RUnlock()
args := map[string]interface{}{"name": scope}
return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections", args, nil)
}
func (b *Bucket) DropScope(scope string) error {
b.RLock()
pool := b.pool
client := pool.client
b.RUnlock()
return client.parseDeleteURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections/"+uriAdj(scope), nil, nil)
}
func (b *Bucket) CreateCollection(scope string, collection string) error {
b.RLock()
pool := b.pool
client := pool.client
b.RUnlock()
args := map[string]interface{}{"name": collection}
return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections/"+uriAdj(scope), args, nil)
}
func (b *Bucket) DropCollection(scope string, collection string) error {
b.RLock()
pool := b.pool
client := pool.client
b.RUnlock()
return client.parseDeleteURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections/"+uriAdj(scope)+"/"+uriAdj(collection), nil, nil)
}
func (b *Bucket) FlushCollection(scope string, collection string) error {
b.RLock()
pool := b.pool
client := pool.client
b.RUnlock()
args := map[string]interface{}{"name": collection, "scope": scope}
return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections-flush", args, nil)
}
func (b *Bucket) getMasterNode(i int) string { func (b *Bucket) getMasterNode(i int) string {
p := b.getConnPools(false /* not already locked */) p := b.getConnPools(false /* not already locked */)
if len(p) > i { if len(p) > i {
@ -580,6 +653,7 @@ func isHttpConnError(err error) bool {
} }
var client *http.Client var client *http.Client
var clientForStreaming *http.Client
func ClientConfigForX509(certFile, keyFile, rootFile string) (*tls.Config, error) { func ClientConfigForX509(certFile, keyFile, rootFile string) (*tls.Config, error) {
cfg := &tls.Config{} cfg := &tls.Config{}
@ -612,6 +686,59 @@ func ClientConfigForX509(certFile, keyFile, rootFile string) (*tls.Config, error
return cfg, nil return cfg, nil
} }
// This version of doHTTPRequest is for requests where the response connection is held open
// for an extended duration since line is a new and significant output.
//
// The ordinary version of this method expects the results to arrive promptly, and
// therefore use an HTTP client with a timeout. This client is not suitable
// for streaming use.
func doHTTPRequestForStreaming(req *http.Request) (*http.Response, error) {
var err error
var res *http.Response
// we need a client that ignores certificate errors, since we self-sign
// our certs
if clientForStreaming == nil && req.URL.Scheme == "https" {
var tr *http.Transport
if skipVerify {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
} else {
// Handle cases with cert
cfg, err := ClientConfigForX509(certFile, keyFile, rootFile)
if err != nil {
return nil, err
}
tr = &http.Transport{
TLSClientConfig: cfg,
}
}
clientForStreaming = &http.Client{Transport: tr, Timeout: 0}
} else if clientForStreaming == nil {
clientForStreaming = HTTPClientForStreaming
}
for i := 0; i < HTTP_MAX_RETRY; i++ {
res, err = clientForStreaming.Do(req)
if err != nil && isHttpConnError(err) {
continue
}
break
}
if err != nil {
return nil, err
}
return res, err
}
func doHTTPRequest(req *http.Request) (*http.Response, error) { func doHTTPRequest(req *http.Request) (*http.Response, error) {
var err error var err error
@ -660,12 +787,16 @@ func doHTTPRequest(req *http.Request) (*http.Response, error) {
return res, err return res, err
} }
func doPutAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}) error { func doPutAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}, terse bool) error {
return doOutputAPI("PUT", baseURL, path, params, authHandler, out) return doOutputAPI("PUT", baseURL, path, params, authHandler, out, terse)
} }
func doPostAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}) error { func doPostAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}, terse bool) error {
return doOutputAPI("POST", baseURL, path, params, authHandler, out) return doOutputAPI("POST", baseURL, path, params, authHandler, out, terse)
}
func doDeleteAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}, terse bool) error {
return doOutputAPI("DELETE", baseURL, path, params, authHandler, out, terse)
} }
func doOutputAPI( func doOutputAPI(
@ -674,7 +805,8 @@ func doOutputAPI(
path string, path string,
params map[string]interface{}, params map[string]interface{},
authHandler AuthHandler, authHandler AuthHandler,
out interface{}) error { out interface{},
terse bool) error {
var requestUrl string var requestUrl string
@ -707,16 +839,40 @@ func doOutputAPI(
} }
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode != 200 { // 200 - ok, 202 - accepted (asynchronously)
if res.StatusCode != 200 && res.StatusCode != 202 {
bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512)) bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
if terse {
var outBuf interface{}
err := json.Unmarshal(bod, &outBuf)
if err == nil && outBuf != nil {
switch errText := outBuf.(type) {
case string:
return fmt.Errorf("%s", errText)
case map[string]interface{}:
errField := errText["errors"]
if errField != nil {
// remove annoying 'map' prefix
return fmt.Errorf("%s", strings.TrimPrefix(fmt.Sprintf("%v", errField), "map"))
}
}
}
return fmt.Errorf("%s", string(bod))
}
return fmt.Errorf("HTTP error %v getting %q: %s", return fmt.Errorf("HTTP error %v getting %q: %s",
res.Status, requestUrl, bod) res.Status, requestUrl, bod)
} }
d := json.NewDecoder(res.Body) d := json.NewDecoder(res.Body)
if err = d.Decode(&out); err != nil { // PUT/POST/DELETE request may not have a response body
return err if d.More() {
if err = d.Decode(&out); err != nil {
return err
}
} }
return nil return nil
} }
@ -724,7 +880,8 @@ func queryRestAPI(
baseURL *url.URL, baseURL *url.URL,
path string, path string,
authHandler AuthHandler, authHandler AuthHandler,
out interface{}) error { out interface{},
terse bool) error {
var requestUrl string var requestUrl string
@ -752,13 +909,27 @@ func queryRestAPI(
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode != 200 { if res.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512)) bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
if terse {
var outBuf interface{}
err := json.Unmarshal(bod, &outBuf)
if err == nil && outBuf != nil {
errText, ok := outBuf.(string)
if ok {
return fmt.Errorf(errText)
}
}
return fmt.Errorf(string(bod))
}
return fmt.Errorf("HTTP error %v getting %q: %s", return fmt.Errorf("HTTP error %v getting %q: %s",
res.Status, requestUrl, bod) res.Status, requestUrl, bod)
} }
d := json.NewDecoder(res.Body) d := json.NewDecoder(res.Body)
// GET request should have a response body
if err = d.Decode(&out); err != nil { if err = d.Decode(&out); err != nil {
return err return fmt.Errorf("json decode err: %#v, for requestUrl: %s",
err, requestUrl)
} }
return nil return nil
} }
@ -787,7 +958,7 @@ func (c *Client) processStream(baseURL *url.URL, path string, authHandler AuthHa
return err return err
} }
res, err := doHTTPRequest(req) res, err := doHTTPRequestForStreaming(req)
if err != nil { if err != nil {
return err return err
} }
@ -823,15 +994,31 @@ func (c *Client) processStream(baseURL *url.URL, path string, authHandler AuthHa
} }
func (c *Client) parseURLResponse(path string, out interface{}) error { func (c *Client) parseURLResponse(path string, out interface{}) error {
return queryRestAPI(c.BaseURL, path, c.ah, out) return queryRestAPI(c.BaseURL, path, c.ah, out, false)
} }
func (c *Client) parsePostURLResponse(path string, params map[string]interface{}, out interface{}) error { func (c *Client) parsePostURLResponse(path string, params map[string]interface{}, out interface{}) error {
return doPostAPI(c.BaseURL, path, params, c.ah, out) return doPostAPI(c.BaseURL, path, params, c.ah, out, false)
}
func (c *Client) parsePostURLResponseTerse(path string, params map[string]interface{}, out interface{}) error {
return doPostAPI(c.BaseURL, path, params, c.ah, out, true)
}
func (c *Client) parseDeleteURLResponse(path string, params map[string]interface{}, out interface{}) error {
return doDeleteAPI(c.BaseURL, path, params, c.ah, out, false)
}
func (c *Client) parseDeleteURLResponseTerse(path string, params map[string]interface{}, out interface{}) error {
return doDeleteAPI(c.BaseURL, path, params, c.ah, out, true)
} }
func (c *Client) parsePutURLResponse(path string, params map[string]interface{}, out interface{}) error { func (c *Client) parsePutURLResponse(path string, params map[string]interface{}, out interface{}) error {
return doPutAPI(c.BaseURL, path, params, c.ah, out) return doPutAPI(c.BaseURL, path, params, c.ah, out, false)
}
func (c *Client) parsePutURLResponseTerse(path string, params map[string]interface{}, out interface{}) error {
return doPutAPI(c.BaseURL, path, params, c.ah, out, true)
} }
func (b *Bucket) parseURLResponse(path string, out interface{}) error { func (b *Bucket) parseURLResponse(path string, out interface{}) error {
@ -856,7 +1043,7 @@ func (b *Bucket) parseURLResponse(path string, out interface{}) error {
// Lock here to avoid having pool closed under us. // Lock here to avoid having pool closed under us.
b.RLock() b.RLock()
err := queryRestAPI(url, path, b.pool.client.ah, out) err := queryRestAPI(url, path, b.pool.client.ah, out, false)
b.RUnlock() b.RUnlock()
if err == nil { if err == nil {
return err return err
@ -900,7 +1087,7 @@ func (b *Bucket) parseAPIResponse(path string, out interface{}) error {
// MB-13770 // MB-13770
requestPath := strings.Split(u.String(), u.Host)[1] requestPath := strings.Split(u.String(), u.Host)[1]
err = queryRestAPI(u, requestPath, b.pool.client.ah, out) err = queryRestAPI(u, requestPath, b.pool.client.ah, out, false)
b.RUnlock() b.RUnlock()
if err == nil { if err == nil {
return err return err
@ -1165,6 +1352,7 @@ func (b *Bucket) GetCollectionsManifest() (*Manifest, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("Unable to get connection to retrieve collections manifest: %v. No collections access to bucket %s.", err, b.Name) return nil, fmt.Errorf("Unable to get connection to retrieve collections manifest: %v. No collections access to bucket %s.", err, b.Name)
} }
client.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
// We need to select the bucket before GetCollectionsManifest() // We need to select the bucket before GetCollectionsManifest()
// will work. This is sometimes done at startup (see defaultMkConn()) // will work. This is sometimes done at startup (see defaultMkConn())
@ -1206,11 +1394,10 @@ func (b *Bucket) refresh(preserveConnections bool) error {
uri := b.URI uri := b.URI
client := pool.client client := pool.client
b.RUnlock() b.RUnlock()
tlsConfig := client.tlsConfig
var poolServices PoolServices var poolServices PoolServices
var err error var err error
if tlsConfig != nil { if client.tlsConfig != nil {
poolServices, err = client.GetPoolServices("default") poolServices, err = client.GetPoolServices("default")
if err != nil { if err != nil {
return err return err
@ -1238,10 +1425,10 @@ func (b *Bucket) refresh(preserveConnections bool) error {
newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList)) newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList))
for i := range newcps { for i := range newcps {
hostport := tmpb.VBSMJson.ServerList[i]
if preserveConnections { if preserveConnections {
pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */) pool := b.getConnPoolByHost(hostport, true /* bucket already locked */)
if pool != nil && pool.inUse == false { if pool != nil && pool.inUse == false && (!pool.encrypted || pool.tlsConfig == client.tlsConfig) {
// if the hostname and index is unchanged then reuse this pool // if the hostname and index is unchanged then reuse this pool
newcps[i] = pool newcps[i] = pool
pool.inUse = true pool.inUse = true
@ -1249,9 +1436,9 @@ func (b *Bucket) refresh(preserveConnections bool) error {
} }
} }
hostport := tmpb.VBSMJson.ServerList[i] var encrypted bool
if tlsConfig != nil { if client.tlsConfig != nil {
hostport, err = MapKVtoSSL(hostport, &poolServices) hostport, encrypted, err = MapKVtoSSL(hostport, &poolServices)
if err != nil { if err != nil {
b.Unlock() b.Unlock()
return err return err
@ -1260,12 +1447,12 @@ func (b *Bucket) refresh(preserveConnections bool) error {
if b.ah != nil { if b.ah != nil {
newcps[i] = newConnectionPool(hostport, newcps[i] = newConnectionPool(hostport,
b.ah, AsynchronousCloser, PoolSize, PoolOverflow, tlsConfig, b.Name) b.ah, AsynchronousCloser, PoolSize, PoolOverflow, client.tlsConfig, b.Name, encrypted)
} else { } else {
newcps[i] = newConnectionPool(hostport, newcps[i] = newConnectionPool(hostport,
b.authHandler(true /* bucket already locked */), b.authHandler(true /* bucket already locked */),
AsynchronousCloser, PoolSize, PoolOverflow, tlsConfig, b.Name) AsynchronousCloser, PoolSize, PoolOverflow, client.tlsConfig, b.Name, encrypted)
} }
} }
b.replaceConnPools2(newcps, true /* bucket already locked */) b.replaceConnPools2(newcps, true /* bucket already locked */)
@ -1301,6 +1488,7 @@ func (p *Pool) refresh() (err error) {
p.BucketMap[b.Name] = b p.BucketMap[b.Name] = b
runtime.SetFinalizer(b, bucketFinalizer) runtime.SetFinalizer(b, bucketFinalizer)
} }
buckets = nil
return nil return nil
} }
@ -1320,6 +1508,9 @@ func (c *Client) GetPool(name string) (p Pool, err error) {
} }
err = c.parseURLResponse(poolURI, &p) err = c.parseURLResponse(poolURI, &p)
if err != nil {
return p, err
}
p.client = c p.client = c
@ -1427,15 +1618,32 @@ func (p *Pool) GetClient() *Client {
// Release bucket connections when the pool is no longer in use // Release bucket connections when the pool is no longer in use
func (p *Pool) Close() { func (p *Pool) Close() {
// MB-36186 make the bucket map inaccessible
bucketMap := p.BucketMap
p.BucketMap = nil
// fine to loop through the buckets unlocked // fine to loop through the buckets unlocked
// locking happens at the bucket level // locking happens at the bucket level
for b, _ := range p.BucketMap { for b, _ := range bucketMap {
// MB-36186 make the bucket unreachable and avoid concurrent read/write map panics
bucket := bucketMap[b]
bucketMap[b] = nil
bucket.Lock()
// MB-33208 defer closing connection pools until the bucket is no longer used // MB-33208 defer closing connection pools until the bucket is no longer used
bucket := p.BucketMap[b] // MB-36186 if the bucket is unused make it unreachable straight away
bucket.Lock() needClose := bucket.connPools == nil && !bucket.closed
if needClose {
runtime.SetFinalizer(&bucket, nil)
}
bucket.closed = true bucket.closed = true
bucket.Unlock() bucket.Unlock()
if needClose {
bucket.Close()
}
} }
} }
@ -1472,3 +1680,67 @@ func ConnectWithAuthAndGetBucket(endpoint, poolname, bucketname string,
return pool.GetBucket(bucketname) return pool.GetBucket(bucketname)
} }
func GetSystemBucket(c *Client, p *Pool, name string) (*Bucket, error) {
bucket, err := p.GetBucket(name)
if err != nil {
if _, ok := err.(*BucketNotFoundError); !ok {
return nil, err
}
// create the bucket if not found
args := map[string]interface{}{
"authType": "sasl",
"bucketType": "couchbase",
"name": name,
"ramQuotaMB": 100,
"saslPassword": "donotuse",
}
var ret interface{}
// allow "bucket already exists" error in case duplicate create
// (e.g. two query nodes starting at same time)
err = c.parsePostURLResponseTerse("/pools/default/buckets", args, &ret)
if err != nil && !AlreadyExistsError(err) {
return nil, err
}
// bucket created asynchronously, try to get the bucket
maxRetry := 8
interval := 100 * time.Millisecond
for i := 0; i < maxRetry; i++ {
time.Sleep(interval)
interval *= 2
err = p.refresh()
if err != nil {
return nil, err
}
bucket, err = p.GetBucket(name)
if bucket != nil {
bucket.RLock()
ok := !bucket.closed && len(bucket.getConnPools(true /* already locked */)) > 0
bucket.RUnlock()
if ok {
break
}
} else if err != nil {
if _, ok := err.(*BucketNotFoundError); !ok {
break
}
}
}
}
return bucket, err
}
func DropSystemBucket(c *Client, name string) error {
err := c.parseDeleteURLResponseTerse("/pools/default/buckets/"+name, nil, nil)
return err
}
func AlreadyExistsError(err error) bool {
// Bucket error: Bucket with given name already exists
// Scope error: Scope with this name already exists
// Collection error: Collection with this name already exists
return strings.Contains(err.Error(), " name already exists")
}

106
vendor/github.com/couchbase/go-couchbase/port_map.go generated vendored Normal file
View file

@ -0,0 +1,106 @@
package couchbase
/*
The goal here is to map a hostname:port combination to another hostname:port
combination. The original hostname:port gives the name and regular KV port
of a couchbase server. We want to determine the corresponding SSL KV port.
To do this, we have a pool services structure, as obtained from
the /pools/default/nodeServices API.
For a fully configured two-node system, the structure may look like this:
{"rev":32,"nodesExt":[
{"services":{"mgmt":8091,"mgmtSSL":18091,"fts":8094,"ftsSSL":18094,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211},"hostname":"172.23.123.101"},
{"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211,"n1ql":8093,"n1qlSSL":18093},"thisNode":true,"hostname":"172.23.123.102"}]}
In this case, note the "hostname" fields, and the "kv" and "kvSSL" fields.
For a single-node system, perhaps brought up for testing, the structure may look like this:
{"rev":66,"nodesExt":[
{"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"kv":11210,"kvSSL":11207,"capi":8092,"capiSSL":18092,"projector":9999,"n1ql":8093,"n1qlSSL":18093},"thisNode":true}],"clusterCapabilitiesVer":[1,0],"clusterCapabilities":{"n1ql":["enhancedPreparedStatements"]}}
Here, note that there is only a single entry in the "nodeExt" array and that it does not have a "hostname" field.
We will assume that either hostname fields are present, or there is only a single node.
*/
import (
"encoding/json"
"fmt"
"net"
"strconv"
)
func ParsePoolServices(jsonInput string) (*PoolServices, error) {
ps := &PoolServices{}
err := json.Unmarshal([]byte(jsonInput), ps)
return ps, err
}
// Accepts a "host:port" string representing the KV TCP port and the pools
// nodeServices payload and returns a host:port string representing the KV
// TLS port on the same node as the KV TCP port.
// Returns the original host:port if in case of local communication (services
// on the same node as source)
func MapKVtoSSL(hostport string, ps *PoolServices) (string, bool, error) {
return MapKVtoSSLExt(hostport, ps, false)
}
func MapKVtoSSLExt(hostport string, ps *PoolServices, force bool) (string, bool, error) {
host, port, err := net.SplitHostPort(hostport)
if err != nil {
return "", false, fmt.Errorf("Unable to split hostport %s: %v", hostport, err)
}
portInt, err := strconv.Atoi(port)
if err != nil {
return "", false, fmt.Errorf("Unable to parse host/port combination %s: %v", hostport, err)
}
var ns *NodeServices
for i := range ps.NodesExt {
hostname := ps.NodesExt[i].Hostname
if len(hostname) != 0 && hostname != host {
/* If the hostname is the empty string, it means the node (and by extension
the cluster) is configured on the loopback. Further, it means that the client
should use whatever hostname it used to get the nodeServices information in
the first place to access the cluster. Thus, when the hostname is empty in
the nodeService entry we can assume that client will use the hostname it used
to access the KV TCP endpoint - and thus that it automatically "matches".
If hostname is not empty and doesn't match then we move to the next entry.
*/
continue
}
kvPort, found := ps.NodesExt[i].Services["kv"]
if !found {
/* not a node with a KV service */
continue
}
if kvPort == portInt {
ns = &(ps.NodesExt[i])
break
}
}
if ns == nil {
return "", false, fmt.Errorf("Unable to parse host/port combination %s: no matching node found among %d", hostport, len(ps.NodesExt))
}
kvSSL, found := ns.Services["kvSSL"]
if !found {
return "", false, fmt.Errorf("Unable to map host/port combination %s: target host has no kvSSL port listed", hostport)
}
//Don't encrypt for communication between local nodes
if !force && (len(ns.Hostname) == 0 || ns.ThisNode) {
return hostport, false, nil
}
ip := net.ParseIP(host)
if ip != nil && ip.To4() == nil && ip.To16() != nil { // IPv6 and not a FQDN
// Prefix and suffix square brackets as SplitHostPort removes them,
// see: https://golang.org/pkg/net/#SplitHostPort
host = "[" + host + "]"
}
return fmt.Sprintf("%s:%d", host, kvSSL), true, nil
}

View file

@ -22,6 +22,7 @@ const MAX_RETRY_COUNT = 5
const DISCONNECT_PERIOD = 120 * time.Second const DISCONNECT_PERIOD = 120 * time.Second
type NotifyFn func(bucket string, err error) type NotifyFn func(bucket string, err error)
type StreamingFn func(bucket *Bucket)
// Use TCP keepalive to detect half close sockets // Use TCP keepalive to detect half close sockets
var updaterTransport http.RoundTripper = &http.Transport{ var updaterTransport http.RoundTripper = &http.Transport{
@ -55,8 +56,12 @@ func doHTTPRequestForUpdate(req *http.Request) (*http.Response, error) {
} }
func (b *Bucket) RunBucketUpdater(notify NotifyFn) { func (b *Bucket) RunBucketUpdater(notify NotifyFn) {
b.RunBucketUpdater2(nil, notify)
}
func (b *Bucket) RunBucketUpdater2(streamingFn StreamingFn, notify NotifyFn) {
go func() { go func() {
err := b.UpdateBucket() err := b.UpdateBucket2(streamingFn)
if err != nil { if err != nil {
if notify != nil { if notify != nil {
notify(b.GetName(), err) notify(b.GetName(), err)
@ -84,19 +89,13 @@ func (b *Bucket) replaceConnPools2(with []*connectionPool, bucketLocked bool) {
} }
func (b *Bucket) UpdateBucket() error { func (b *Bucket) UpdateBucket() error {
return b.UpdateBucket2(nil)
}
func (b *Bucket) UpdateBucket2(streamingFn StreamingFn) error {
var failures int var failures int
var returnErr error var returnErr error
var poolServices PoolServices var poolServices PoolServices
var err error
tlsConfig := b.pool.client.tlsConfig
if tlsConfig != nil {
poolServices, err = b.pool.client.GetPoolServices("default")
if err != nil {
return err
}
}
for { for {
@ -113,7 +112,7 @@ func (b *Bucket) UpdateBucket() error {
startNode := rand.Intn(len(nodes)) startNode := rand.Intn(len(nodes))
node := nodes[(startNode)%len(nodes)] node := nodes[(startNode)%len(nodes)]
streamUrl := fmt.Sprintf("http://%s/pools/default/bucketsStreaming/%s", node.Hostname, b.GetName()) streamUrl := fmt.Sprintf("http://%s/pools/default/bucketsStreaming/%s", node.Hostname, uriAdj(b.GetName()))
logging.Infof(" Trying with %s", streamUrl) logging.Infof(" Trying with %s", streamUrl)
req, err := http.NewRequest("GET", streamUrl, nil) req, err := http.NewRequest("GET", streamUrl, nil)
if err != nil { if err != nil {
@ -156,6 +155,16 @@ func (b *Bucket) UpdateBucket() error {
// if we got here, reset failure count // if we got here, reset failure count
failures = 0 failures = 0
if b.pool.client.tlsConfig != nil {
poolServices, err = b.pool.client.GetPoolServices("default")
if err != nil {
returnErr = err
res.Body.Close()
break
}
}
b.Lock() b.Lock()
// mark all the old connection pools for deletion // mark all the old connection pools for deletion
@ -170,16 +179,17 @@ func (b *Bucket) UpdateBucket() error {
for i := range newcps { for i := range newcps {
// get the old connection pool and check if it is still valid // get the old connection pool and check if it is still valid
pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */) pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */)
if pool != nil && pool.inUse == false { if pool != nil && pool.inUse == false && pool.tlsConfig == b.pool.client.tlsConfig {
// if the hostname and index is unchanged then reuse this pool // if the hostname and index is unchanged then reuse this pool
newcps[i] = pool newcps[i] = pool
pool.inUse = true pool.inUse = true
continue continue
} }
// else create a new pool // else create a new pool
var encrypted bool
hostport := tmpb.VBSMJson.ServerList[i] hostport := tmpb.VBSMJson.ServerList[i]
if tlsConfig != nil { if b.pool.client.tlsConfig != nil {
hostport, err = MapKVtoSSL(hostport, &poolServices) hostport, encrypted, err = MapKVtoSSL(hostport, &poolServices)
if err != nil { if err != nil {
b.Unlock() b.Unlock()
return err return err
@ -187,12 +197,12 @@ func (b *Bucket) UpdateBucket() error {
} }
if b.ah != nil { if b.ah != nil {
newcps[i] = newConnectionPool(hostport, newcps[i] = newConnectionPool(hostport,
b.ah, false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name) b.ah, false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name, encrypted)
} else { } else {
newcps[i] = newConnectionPool(hostport, newcps[i] = newConnectionPool(hostport,
b.authHandler(true /* bucket already locked */), b.authHandler(true /* bucket already locked */),
false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name) false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name, encrypted)
} }
} }
@ -203,7 +213,10 @@ func (b *Bucket) UpdateBucket() error {
b.nodeList = unsafe.Pointer(&tmpb.NodesJSON) b.nodeList = unsafe.Pointer(&tmpb.NodesJSON)
b.Unlock() b.Unlock()
logging.Infof("Got new configuration for bucket %s", b.GetName()) if streamingFn != nil {
streamingFn(tmpb)
}
logging.Debugf("Got new configuration for bucket %s", b.GetName())
} }
// we are here because of an error // we are here because of an error

View file

@ -88,6 +88,7 @@ func (b *Bucket) GetFailoverLogs(vBuckets []uint16) (FailoverLog, error) {
// close the connection so that it doesn't get reused for upr data // close the connection so that it doesn't get reused for upr data
// connection // connection
defer mc.Close() defer mc.Close()
mc.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
failoverlogs, err := mc.UprGetFailoverLog(vbList) failoverlogs, err := mc.UprGetFailoverLog(vbList)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error getting failover log %s host %s", return nil, fmt.Errorf("Error getting failover log %s host %s",

View file

@ -13,8 +13,10 @@ type User struct {
} }
type Role struct { type Role struct {
Role string Role string
BucketName string `json:"bucket_name"` BucketName string `json:"bucket_name"`
ScopeName string `json:"scope_name"`
CollectionName string `json:"collection_name"`
} }
// Sample: // Sample:

View file

@ -45,7 +45,7 @@ type streamIdNonResumeScopeMeta struct {
} }
func (c *CollectionsFilter) IsValid() error { func (c *CollectionsFilter) IsValid() error {
if c.UseManifestUid { if c.UseManifestUid && c.UseStreamId {
return fmt.Errorf("Not implemented yet") return fmt.Errorf("Not implemented yet")
} }
@ -99,8 +99,10 @@ func (c *CollectionsFilter) ToStreamReqBody() ([]byte, error) {
case false: case false:
switch c.UseManifestUid { switch c.UseManifestUid {
case true: case true:
// TODO filter := &nonStreamIdResumeScopeMeta{
return nil, fmt.Errorf("NotImplemented1") ManifestId: fmt.Sprintf("%x", c.ManifestUid),
}
output = *filter
case false: case false:
switch len(c.CollectionsList) > 0 { switch len(c.CollectionsList) > 0 {
case true: case true:

Some files were not shown because too many files have changed in this diff Show more