Issue search support elasticsearch (#9428)
* Issue search support elasticsearch * Fix lint * Add indexer name on app.ini * add a warnning on SearchIssuesByKeyword * improve code
This commit is contained in:
parent
17656021f1
commit
5dbf36f356
286 changed files with 57032 additions and 25 deletions
|
@ -86,6 +86,12 @@ services:
|
||||||
pull: default
|
pull: default
|
||||||
image: gitea/test-openldap:latest
|
image: gitea/test-openldap:latest
|
||||||
|
|
||||||
|
- name: elasticsearch
|
||||||
|
pull: default
|
||||||
|
environment:
|
||||||
|
discovery.type: single-node
|
||||||
|
image: elasticsearch:7.5.0
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: fetch-tags
|
- name: fetch-tags
|
||||||
pull: default
|
pull: default
|
||||||
|
|
|
@ -368,8 +368,12 @@ CONN_MAX_LIFETIME = 3s
|
||||||
MAX_OPEN_CONNS = 0
|
MAX_OPEN_CONNS = 0
|
||||||
|
|
||||||
[indexer]
|
[indexer]
|
||||||
; Issue indexer type, currently support: bleve or db, default is bleve
|
; Issue indexer type, currently support: bleve, db or elasticsearch, default is bleve
|
||||||
ISSUE_INDEXER_TYPE = bleve
|
ISSUE_INDEXER_TYPE = bleve
|
||||||
|
; Issue indexer connection string, available when ISSUE_INDEXER_TYPE is elasticsearch
|
||||||
|
ISSUE_INDEXER_CONN_STR = http://elastic:changeme@localhost:9200
|
||||||
|
; Issue indexer name, available when ISSUE_INDEXER_TYPE is elasticsearch
|
||||||
|
ISSUE_INDEXER_NAME = gitea_issues
|
||||||
; Issue indexer storage path, available when ISSUE_INDEXER_TYPE is bleve
|
; Issue indexer storage path, available when ISSUE_INDEXER_TYPE is bleve
|
||||||
ISSUE_INDEXER_PATH = indexers/issues.bleve
|
ISSUE_INDEXER_PATH = indexers/issues.bleve
|
||||||
; Issue indexer queue, currently support: channel, levelqueue or redis, default is levelqueue
|
; Issue indexer queue, currently support: channel, levelqueue or redis, default is levelqueue
|
||||||
|
|
|
@ -228,8 +228,10 @@ relation to port exhaustion.
|
||||||
|
|
||||||
## Indexer (`indexer`)
|
## Indexer (`indexer`)
|
||||||
|
|
||||||
- `ISSUE_INDEXER_TYPE`: **bleve**: Issue indexer type, currently support: bleve or db, if it's db, below issue indexer item will be invalid.
|
- `ISSUE_INDEXER_TYPE`: **bleve**: Issue indexer type, currently supported: `bleve`, `db` or `elasticsearch`.
|
||||||
- `ISSUE_INDEXER_PATH`: **indexers/issues.bleve**: Index file used for issue search.
|
- `ISSUE_INDEXER_CONN_STR`: ****: Issue indexer connection string, available when ISSUE_INDEXER_TYPE is elasticsearch. i.e. http://elastic:changeme@localhost:9200
|
||||||
|
- `ISSUE_INDEXER_NAME`: **gitea_issues**: Issue indexer name, available when ISSUE_INDEXER_TYPE is elasticsearch
|
||||||
|
- `ISSUE_INDEXER_PATH`: **indexers/issues.bleve**: Index file used for issue search; available when ISSUE_INDEXER_TYPE is bleve and elasticsearch.
|
||||||
- The next 4 configuration values are deprecated and should be set in `queue.issue_indexer` however are kept for backwards compatibility:
|
- The next 4 configuration values are deprecated and should be set in `queue.issue_indexer` however are kept for backwards compatibility:
|
||||||
- `ISSUE_INDEXER_QUEUE_TYPE`: **levelqueue**: Issue indexer queue, currently supports:`channel`, `levelqueue`, `redis`.
|
- `ISSUE_INDEXER_QUEUE_TYPE`: **levelqueue**: Issue indexer queue, currently supports:`channel`, `levelqueue`, `redis`.
|
||||||
- `ISSUE_INDEXER_QUEUE_DIR`: **indexers/issues.queue**: When `ISSUE_INDEXER_QUEUE_TYPE` is `levelqueue`, this will be the queue will be saved path.
|
- `ISSUE_INDEXER_QUEUE_DIR`: **indexers/issues.queue**: When `ISSUE_INDEXER_QUEUE_TYPE` is `levelqueue`, this will be the queue will be saved path.
|
||||||
|
|
|
@ -89,7 +89,9 @@ menu:
|
||||||
|
|
||||||
## Indexer (`indexer`)
|
## Indexer (`indexer`)
|
||||||
|
|
||||||
- `ISSUE_INDEXER_TYPE`: **bleve**: 工单索引类型,当前支持 `bleve` 或 `db`,当为 `db` 时其它工单索引项可不用设置。
|
- `ISSUE_INDEXER_TYPE`: **bleve**: 工单索引类型,当前支持 `bleve`, `db` 和 `elasticsearch`,当为 `db` 时其它工单索引项可不用设置。
|
||||||
|
- `ISSUE_INDEXER_CONN_STR`: ****: 工单索引连接字符串,仅当 ISSUE_INDEXER_TYPE 为 `elasticsearch` 时有效。例如: http://elastic:changeme@localhost:9200
|
||||||
|
- `ISSUE_INDEXER_NAME`: **gitea_issues**: 工单索引名称,仅当 ISSUE_INDEXER_TYPE 为 `elasticsearch` 时有效。
|
||||||
- `ISSUE_INDEXER_PATH`: **indexers/issues.bleve**: 工单索引文件存放路径,当索引类型为 `bleve` 时有效。
|
- `ISSUE_INDEXER_PATH`: **indexers/issues.bleve**: 工单索引文件存放路径,当索引类型为 `bleve` 时有效。
|
||||||
- `ISSUE_INDEXER_QUEUE_TYPE`: **levelqueue**: 工单索引队列类型,当前支持 `channel`, `levelqueue` 或 `redis`。
|
- `ISSUE_INDEXER_QUEUE_TYPE`: **levelqueue**: 工单索引队列类型,当前支持 `channel`, `levelqueue` 或 `redis`。
|
||||||
- `ISSUE_INDEXER_QUEUE_DIR`: **indexers/issues.queue**: 当 `ISSUE_INDEXER_QUEUE_TYPE` 为 `levelqueue` 时,保存索引队列的磁盘路径。
|
- `ISSUE_INDEXER_QUEUE_DIR`: **indexers/issues.queue**: 当 `ISSUE_INDEXER_QUEUE_TYPE` 为 `levelqueue` 时,保存索引队列的磁盘路径。
|
||||||
|
|
1
go.mod
1
go.mod
|
@ -74,6 +74,7 @@ require (
|
||||||
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5
|
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5
|
||||||
github.com/niklasfasching/go-org v0.1.9
|
github.com/niklasfasching/go-org v0.1.9
|
||||||
github.com/oliamb/cutter v0.2.2
|
github.com/oliamb/cutter v0.2.2
|
||||||
|
github.com/olivere/elastic/v7 v7.0.9
|
||||||
github.com/pkg/errors v0.8.1
|
github.com/pkg/errors v0.8.1
|
||||||
github.com/pquerna/otp v0.0.0-20160912161815-54653902c20e
|
github.com/pquerna/otp v0.0.0-20160912161815-54653902c20e
|
||||||
github.com/prometheus/client_golang v1.1.0
|
github.com/prometheus/client_golang v1.1.0
|
||||||
|
|
10
go.sum
10
go.sum
|
@ -68,6 +68,7 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY
|
||||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||||
|
github.com/aws/aws-sdk-go v1.25.25/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
@ -154,6 +155,8 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQD
|
||||||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||||
|
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||||
|
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
@ -248,6 +251,7 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZ
|
||||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
|
@ -322,6 +326,7 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOl
|
||||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||||
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
|
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
|
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=
|
github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=
|
||||||
github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ=
|
github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ=
|
||||||
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
||||||
|
@ -411,6 +416,8 @@ github.com/niklasfasching/go-org v0.1.9/go.mod h1:AsLD6X7djzRIz4/RFZu8vwRL0VGjUv
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
github.com/oliamb/cutter v0.2.2 h1:Lfwkya0HHNU1YLnGv2hTkzHfasrSMkgv4Dn+5rmlk3k=
|
github.com/oliamb/cutter v0.2.2 h1:Lfwkya0HHNU1YLnGv2hTkzHfasrSMkgv4Dn+5rmlk3k=
|
||||||
github.com/oliamb/cutter v0.2.2/go.mod h1:4BenG2/4GuRBDbVm/OPahDVqbrOemzpPiG5mi1iryBU=
|
github.com/oliamb/cutter v0.2.2/go.mod h1:4BenG2/4GuRBDbVm/OPahDVqbrOemzpPiG5mi1iryBU=
|
||||||
|
github.com/olivere/elastic/v7 v7.0.9 h1:+bTR1xJbfLYD8WnTBt9672mFlKxjfWRJpEQ1y8BMS3g=
|
||||||
|
github.com/olivere/elastic/v7 v7.0.9/go.mod h1:2TeRd0vhLRTK9zqm5xP0uLiVeZ5yUoL7kZ+8SZA9r9Y=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
||||||
|
@ -418,6 +425,7 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
||||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
|
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||||
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
|
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
|
||||||
|
@ -486,6 +494,7 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
|
||||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
|
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
|
||||||
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||||
|
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
|
||||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
|
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
|
||||||
|
@ -573,6 +582,7 @@ go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL
|
||||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
|
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
|
|
|
@ -10,6 +10,8 @@ PASSWD = {{TEST_MYSQL_PASSWORD}}
|
||||||
SSL_MODE = disable
|
SSL_MODE = disable
|
||||||
|
|
||||||
[indexer]
|
[indexer]
|
||||||
|
ISSUE_INDEXER_TYPE = elasticsearch
|
||||||
|
ISSUE_INDEXER_CONN_STR = http://elastic:changeme@elasticsearch:9200
|
||||||
ISSUE_INDEXER_PATH = integrations/indexers-mysql/issues.bleve
|
ISSUE_INDEXER_PATH = integrations/indexers-mysql/issues.bleve
|
||||||
REPO_INDEXER_ENABLED = true
|
REPO_INDEXER_ENABLED = true
|
||||||
REPO_INDEXER_PATH = integrations/indexers-mysql/repos.bleve
|
REPO_INDEXER_PATH = integrations/indexers-mysql/repos.bleve
|
||||||
|
|
|
@ -170,7 +170,7 @@ func NewBleveIndexer(indexDir string) *BleveIndexer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init will initial the indexer
|
// Init will initialize the indexer
|
||||||
func (b *BleveIndexer) Init() (bool, error) {
|
func (b *BleveIndexer) Init() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
b.indexer, err = openIndexer(b.indexDir, issueIndexerLatestVersion)
|
b.indexer, err = openIndexer(b.indexDir, issueIndexerLatestVersion)
|
||||||
|
|
230
modules/indexer/issues/elastic_search.go
Normal file
230
modules/indexer/issues/elastic_search.go
Normal file
|
@ -0,0 +1,230 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package issues
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ Indexer = &ElasticSearchIndexer{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// ElasticSearchIndexer implements Indexer interface
|
||||||
|
type ElasticSearchIndexer struct {
|
||||||
|
client *elastic.Client
|
||||||
|
indexerName string
|
||||||
|
}
|
||||||
|
|
||||||
|
type elasticLogger struct {
|
||||||
|
*log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l elasticLogger) Printf(format string, args ...interface{}) {
|
||||||
|
_ = l.Logger.Log(2, l.Logger.GetLevel(), format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewElasticSearchIndexer creates a new elasticsearch indexer
|
||||||
|
func NewElasticSearchIndexer(url, indexerName string) (*ElasticSearchIndexer, error) {
|
||||||
|
opts := []elastic.ClientOptionFunc{
|
||||||
|
elastic.SetURL(url),
|
||||||
|
elastic.SetSniff(false),
|
||||||
|
elastic.SetHealthcheckInterval(10 * time.Second),
|
||||||
|
elastic.SetGzip(false),
|
||||||
|
}
|
||||||
|
|
||||||
|
logger := elasticLogger{log.GetLogger(log.DEFAULT)}
|
||||||
|
|
||||||
|
if logger.GetLevel() == log.TRACE || logger.GetLevel() == log.DEBUG {
|
||||||
|
opts = append(opts, elastic.SetTraceLog(logger))
|
||||||
|
} else if logger.GetLevel() == log.ERROR || logger.GetLevel() == log.CRITICAL || logger.GetLevel() == log.FATAL {
|
||||||
|
opts = append(opts, elastic.SetErrorLog(logger))
|
||||||
|
} else if logger.GetLevel() == log.INFO || logger.GetLevel() == log.WARN {
|
||||||
|
opts = append(opts, elastic.SetInfoLog(logger))
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := elastic.NewClient(opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ElasticSearchIndexer{
|
||||||
|
client: client,
|
||||||
|
indexerName: indexerName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultMapping = `{
|
||||||
|
"mappings": {
|
||||||
|
"properties": {
|
||||||
|
"id": {
|
||||||
|
"type": "integer",
|
||||||
|
"index": true
|
||||||
|
},
|
||||||
|
"repo_id": {
|
||||||
|
"type": "integer",
|
||||||
|
"index": true
|
||||||
|
},
|
||||||
|
"title": {
|
||||||
|
"type": "text",
|
||||||
|
"index": true
|
||||||
|
},
|
||||||
|
"content": {
|
||||||
|
"type": "text",
|
||||||
|
"index": true
|
||||||
|
},
|
||||||
|
"comments": {
|
||||||
|
"type" : "text",
|
||||||
|
"index": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Init will initialize the indexer
|
||||||
|
func (b *ElasticSearchIndexer) Init() (bool, error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
exists, err := b.client.IndexExists(b.indexerName).Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
var mapping = defaultMapping
|
||||||
|
|
||||||
|
createIndex, err := b.client.CreateIndex(b.indexerName).BodyString(mapping).Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if !createIndex.Acknowledged {
|
||||||
|
return false, errors.New("init failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index will save the index data
|
||||||
|
func (b *ElasticSearchIndexer) Index(issues []*IndexerData) error {
|
||||||
|
if len(issues) == 0 {
|
||||||
|
return nil
|
||||||
|
} else if len(issues) == 1 {
|
||||||
|
issue := issues[0]
|
||||||
|
_, err := b.client.Index().
|
||||||
|
Index(b.indexerName).
|
||||||
|
Id(fmt.Sprintf("%d", issue.ID)).
|
||||||
|
BodyJson(map[string]interface{}{
|
||||||
|
"id": issue.ID,
|
||||||
|
"repo_id": issue.RepoID,
|
||||||
|
"title": issue.Title,
|
||||||
|
"content": issue.Content,
|
||||||
|
"comments": issue.Comments,
|
||||||
|
}).
|
||||||
|
Do(context.Background())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqs := make([]elastic.BulkableRequest, 0)
|
||||||
|
for _, issue := range issues {
|
||||||
|
reqs = append(reqs,
|
||||||
|
elastic.NewBulkIndexRequest().
|
||||||
|
Index(b.indexerName).
|
||||||
|
Id(fmt.Sprintf("%d", issue.ID)).
|
||||||
|
Doc(map[string]interface{}{
|
||||||
|
"id": issue.ID,
|
||||||
|
"repo_id": issue.RepoID,
|
||||||
|
"title": issue.Title,
|
||||||
|
"content": issue.Content,
|
||||||
|
"comments": issue.Comments,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := b.client.Bulk().
|
||||||
|
Index(b.indexerName).
|
||||||
|
Add(reqs...).
|
||||||
|
Do(context.Background())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes indexes by ids
|
||||||
|
func (b *ElasticSearchIndexer) Delete(ids ...int64) error {
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
} else if len(ids) == 1 {
|
||||||
|
_, err := b.client.Delete().
|
||||||
|
Index(b.indexerName).
|
||||||
|
Id(fmt.Sprintf("%d", ids[0])).
|
||||||
|
Do(context.Background())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqs := make([]elastic.BulkableRequest, 0)
|
||||||
|
for _, id := range ids {
|
||||||
|
reqs = append(reqs,
|
||||||
|
elastic.NewBulkDeleteRequest().
|
||||||
|
Index(b.indexerName).
|
||||||
|
Id(fmt.Sprintf("%d", id)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := b.client.Bulk().
|
||||||
|
Index(b.indexerName).
|
||||||
|
Add(reqs...).
|
||||||
|
Do(context.Background())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search searches for issues by given conditions.
|
||||||
|
// Returns the matching issue IDs
|
||||||
|
func (b *ElasticSearchIndexer) Search(keyword string, repoIDs []int64, limit, start int) (*SearchResult, error) {
|
||||||
|
kwQuery := elastic.NewMultiMatchQuery(keyword, "title", "content", "comments")
|
||||||
|
query := elastic.NewBoolQuery()
|
||||||
|
query = query.Must(kwQuery)
|
||||||
|
if len(repoIDs) > 0 {
|
||||||
|
var repoStrs = make([]interface{}, 0, len(repoIDs))
|
||||||
|
for _, repoID := range repoIDs {
|
||||||
|
repoStrs = append(repoStrs, repoID)
|
||||||
|
}
|
||||||
|
repoQuery := elastic.NewTermsQuery("repo_id", repoStrs...)
|
||||||
|
query = query.Must(repoQuery)
|
||||||
|
}
|
||||||
|
searchResult, err := b.client.Search().
|
||||||
|
Index(b.indexerName).
|
||||||
|
Query(query).
|
||||||
|
Sort("id", true).
|
||||||
|
From(start).Size(limit).
|
||||||
|
Do(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
hits := make([]Match, 0, limit)
|
||||||
|
for _, hit := range searchResult.Hits.Hits {
|
||||||
|
id, _ := strconv.ParseInt(hit.Id, 10, 64)
|
||||||
|
hits = append(hits, Match{
|
||||||
|
ID: id,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SearchResult{
|
||||||
|
Total: searchResult.TotalHits(),
|
||||||
|
Hits: hits,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close implements indexer
|
||||||
|
func (b *ElasticSearchIndexer) Close() {}
|
|
@ -21,13 +21,13 @@ import (
|
||||||
|
|
||||||
// IndexerData data stored in the issue indexer
|
// IndexerData data stored in the issue indexer
|
||||||
type IndexerData struct {
|
type IndexerData struct {
|
||||||
ID int64
|
ID int64 `json:"id"`
|
||||||
RepoID int64
|
RepoID int64 `json:"repo_id"`
|
||||||
Title string
|
Title string `json:"title"`
|
||||||
Content string
|
Content string `json:"content"`
|
||||||
Comments []string
|
Comments []string `json:"comments"`
|
||||||
IsDelete bool
|
IsDelete bool `json:"is_delete"`
|
||||||
IDs []int64
|
IDs []int64 `json:"ids"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match represents on search result
|
// Match represents on search result
|
||||||
|
@ -100,7 +100,7 @@ func InitIssueIndexer(syncReindex bool) {
|
||||||
|
|
||||||
// Create the Queue
|
// Create the Queue
|
||||||
switch setting.Indexer.IssueType {
|
switch setting.Indexer.IssueType {
|
||||||
case "bleve":
|
case "bleve", "elasticsearch":
|
||||||
handler := func(data ...queue.Data) {
|
handler := func(data ...queue.Data) {
|
||||||
indexer := holder.get()
|
indexer := holder.get()
|
||||||
if indexer == nil {
|
if indexer == nil {
|
||||||
|
@ -160,6 +160,19 @@ func InitIssueIndexer(syncReindex bool) {
|
||||||
log.Info("PID: %d Issue Indexer closed", os.Getpid())
|
log.Info("PID: %d Issue Indexer closed", os.Getpid())
|
||||||
})
|
})
|
||||||
log.Debug("Created Bleve Indexer")
|
log.Debug("Created Bleve Indexer")
|
||||||
|
case "elasticsearch":
|
||||||
|
graceful.GetManager().RunWithShutdownFns(func(_, atTerminate func(context.Context, func())) {
|
||||||
|
issueIndexer, err := NewElasticSearchIndexer(setting.Indexer.IssueConnStr, "gitea_issues")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("Unable to initialize Elastic Search Issue Indexer: %v", err)
|
||||||
|
}
|
||||||
|
exist, err := issueIndexer.Init()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("Unable to issueIndexer.Init: %v", err)
|
||||||
|
}
|
||||||
|
populate = !exist
|
||||||
|
holder.set(issueIndexer)
|
||||||
|
})
|
||||||
case "db":
|
case "db":
|
||||||
issueIndexer := &DBIndexer{}
|
issueIndexer := &DBIndexer{}
|
||||||
holder.set(issueIndexer)
|
holder.set(issueIndexer)
|
||||||
|
@ -308,6 +321,7 @@ func DeleteRepoIssueIndexer(repo *models.Repository) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchIssuesByKeyword search issue ids by keywords and repo id
|
// SearchIssuesByKeyword search issue ids by keywords and repo id
|
||||||
|
// WARNNING: You have to ensure user have permission to visit repoIDs' issues
|
||||||
func SearchIssuesByKeyword(repoIDs []int64, keyword string) ([]int64, error) {
|
func SearchIssuesByKeyword(repoIDs []int64, keyword string) ([]int64, error) {
|
||||||
var issueIDs []int64
|
var issueIDs []int64
|
||||||
indexer := holder.get()
|
indexer := holder.get()
|
||||||
|
@ -316,7 +330,7 @@ func SearchIssuesByKeyword(repoIDs []int64, keyword string) ([]int64, error) {
|
||||||
log.Error("SearchIssuesByKeyword(): unable to get indexer!")
|
log.Error("SearchIssuesByKeyword(): unable to get indexer!")
|
||||||
return nil, fmt.Errorf("unable to get issue indexer")
|
return nil, fmt.Errorf("unable to get issue indexer")
|
||||||
}
|
}
|
||||||
res, err := indexer.Search(keyword, repoIDs, 1000, 0)
|
res, err := indexer.Search(keyword, repoIDs, 50, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,20 +27,25 @@ var (
|
||||||
Indexer = struct {
|
Indexer = struct {
|
||||||
IssueType string
|
IssueType string
|
||||||
IssuePath string
|
IssuePath string
|
||||||
RepoIndexerEnabled bool
|
IssueConnStr string
|
||||||
RepoPath string
|
IssueIndexerName string
|
||||||
UpdateQueueLength int
|
|
||||||
MaxIndexerFileSize int64
|
|
||||||
IssueQueueType string
|
IssueQueueType string
|
||||||
IssueQueueDir string
|
IssueQueueDir string
|
||||||
IssueQueueConnStr string
|
IssueQueueConnStr string
|
||||||
IssueQueueBatchNumber int
|
IssueQueueBatchNumber int
|
||||||
StartupTimeout time.Duration
|
StartupTimeout time.Duration
|
||||||
IncludePatterns []glob.Glob
|
|
||||||
ExcludePatterns []glob.Glob
|
RepoIndexerEnabled bool
|
||||||
|
RepoPath string
|
||||||
|
UpdateQueueLength int
|
||||||
|
MaxIndexerFileSize int64
|
||||||
|
IncludePatterns []glob.Glob
|
||||||
|
ExcludePatterns []glob.Glob
|
||||||
}{
|
}{
|
||||||
IssueType: "bleve",
|
IssueType: "bleve",
|
||||||
IssuePath: "indexers/issues.bleve",
|
IssuePath: "indexers/issues.bleve",
|
||||||
|
IssueConnStr: "",
|
||||||
|
IssueIndexerName: "gitea_issues",
|
||||||
IssueQueueType: LevelQueueType,
|
IssueQueueType: LevelQueueType,
|
||||||
IssueQueueDir: "indexers/issues.queue",
|
IssueQueueDir: "indexers/issues.queue",
|
||||||
IssueQueueConnStr: "",
|
IssueQueueConnStr: "",
|
||||||
|
@ -57,6 +62,14 @@ func newIndexerService() {
|
||||||
if !filepath.IsAbs(Indexer.IssuePath) {
|
if !filepath.IsAbs(Indexer.IssuePath) {
|
||||||
Indexer.IssuePath = path.Join(AppWorkPath, Indexer.IssuePath)
|
Indexer.IssuePath = path.Join(AppWorkPath, Indexer.IssuePath)
|
||||||
}
|
}
|
||||||
|
Indexer.IssueConnStr = sec.Key("ISSUE_INDEXER_CONN_STR").MustString(Indexer.IssueConnStr)
|
||||||
|
Indexer.IssueIndexerName = sec.Key("ISSUE_INDEXER_NAME").MustString(Indexer.IssueIndexerName)
|
||||||
|
|
||||||
|
Indexer.IssueQueueType = sec.Key("ISSUE_INDEXER_QUEUE_TYPE").MustString(LevelQueueType)
|
||||||
|
Indexer.IssueQueueDir = sec.Key("ISSUE_INDEXER_QUEUE_DIR").MustString(path.Join(AppDataPath, "indexers/issues.queue"))
|
||||||
|
Indexer.IssueQueueConnStr = sec.Key("ISSUE_INDEXER_QUEUE_CONN_STR").MustString(path.Join(AppDataPath, ""))
|
||||||
|
Indexer.IssueQueueBatchNumber = sec.Key("ISSUE_INDEXER_QUEUE_BATCH_NUMBER").MustInt(20)
|
||||||
|
|
||||||
Indexer.RepoIndexerEnabled = sec.Key("REPO_INDEXER_ENABLED").MustBool(false)
|
Indexer.RepoIndexerEnabled = sec.Key("REPO_INDEXER_ENABLED").MustBool(false)
|
||||||
Indexer.RepoPath = sec.Key("REPO_INDEXER_PATH").MustString(path.Join(AppDataPath, "indexers/repos.bleve"))
|
Indexer.RepoPath = sec.Key("REPO_INDEXER_PATH").MustString(path.Join(AppDataPath, "indexers/repos.bleve"))
|
||||||
if !filepath.IsAbs(Indexer.RepoPath) {
|
if !filepath.IsAbs(Indexer.RepoPath) {
|
||||||
|
@ -64,13 +77,8 @@ func newIndexerService() {
|
||||||
}
|
}
|
||||||
Indexer.IncludePatterns = IndexerGlobFromString(sec.Key("REPO_INDEXER_INCLUDE").MustString(""))
|
Indexer.IncludePatterns = IndexerGlobFromString(sec.Key("REPO_INDEXER_INCLUDE").MustString(""))
|
||||||
Indexer.ExcludePatterns = IndexerGlobFromString(sec.Key("REPO_INDEXER_EXCLUDE").MustString(""))
|
Indexer.ExcludePatterns = IndexerGlobFromString(sec.Key("REPO_INDEXER_EXCLUDE").MustString(""))
|
||||||
|
|
||||||
Indexer.UpdateQueueLength = sec.Key("UPDATE_BUFFER_LEN").MustInt(20)
|
Indexer.UpdateQueueLength = sec.Key("UPDATE_BUFFER_LEN").MustInt(20)
|
||||||
Indexer.MaxIndexerFileSize = sec.Key("MAX_FILE_SIZE").MustInt64(1024 * 1024)
|
Indexer.MaxIndexerFileSize = sec.Key("MAX_FILE_SIZE").MustInt64(1024 * 1024)
|
||||||
Indexer.IssueQueueType = sec.Key("ISSUE_INDEXER_QUEUE_TYPE").MustString(LevelQueueType)
|
|
||||||
Indexer.IssueQueueDir = sec.Key("ISSUE_INDEXER_QUEUE_DIR").MustString(path.Join(AppDataPath, "indexers/issues.queue"))
|
|
||||||
Indexer.IssueQueueConnStr = sec.Key("ISSUE_INDEXER_QUEUE_CONN_STR").MustString(path.Join(AppDataPath, ""))
|
|
||||||
Indexer.IssueQueueBatchNumber = sec.Key("ISSUE_INDEXER_QUEUE_BATCH_NUMBER").MustInt(20)
|
|
||||||
Indexer.StartupTimeout = sec.Key("STARTUP_TIMEOUT").MustDuration(30 * time.Second)
|
Indexer.StartupTimeout = sec.Key("STARTUP_TIMEOUT").MustDuration(30 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
5
vendor/github.com/mailru/easyjson/.gitignore
generated
vendored
Normal file
5
vendor/github.com/mailru/easyjson/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
.root
|
||||||
|
*_easyjson.go
|
||||||
|
*.iml
|
||||||
|
.idea
|
||||||
|
*.swp
|
12
vendor/github.com/mailru/easyjson/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/mailru/easyjson/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- tip
|
||||||
|
- stable
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get golang.org/x/lint/golint
|
52
vendor/github.com/mailru/easyjson/Makefile
generated
vendored
Normal file
52
vendor/github.com/mailru/easyjson/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
all: test
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -rf bin
|
||||||
|
rm -rf tests/*_easyjson.go
|
||||||
|
rm -rf benchmark/*_easyjson.go
|
||||||
|
|
||||||
|
build:
|
||||||
|
go build -i -o ./bin/easyjson ./easyjson
|
||||||
|
|
||||||
|
generate: build
|
||||||
|
bin/easyjson -stubs \
|
||||||
|
./tests/snake.go \
|
||||||
|
./tests/data.go \
|
||||||
|
./tests/omitempty.go \
|
||||||
|
./tests/nothing.go \
|
||||||
|
./tests/named_type.go \
|
||||||
|
./tests/custom_map_key_type.go \
|
||||||
|
./tests/embedded_type.go \
|
||||||
|
./tests/reference_to_pointer.go \
|
||||||
|
|
||||||
|
bin/easyjson -all ./tests/data.go
|
||||||
|
bin/easyjson -all ./tests/nothing.go
|
||||||
|
bin/easyjson -all ./tests/errors.go
|
||||||
|
bin/easyjson -snake_case ./tests/snake.go
|
||||||
|
bin/easyjson -omit_empty ./tests/omitempty.go
|
||||||
|
bin/easyjson -build_tags=use_easyjson ./benchmark/data.go
|
||||||
|
bin/easyjson ./tests/nested_easy.go
|
||||||
|
bin/easyjson ./tests/named_type.go
|
||||||
|
bin/easyjson ./tests/custom_map_key_type.go
|
||||||
|
bin/easyjson ./tests/embedded_type.go
|
||||||
|
bin/easyjson ./tests/reference_to_pointer.go
|
||||||
|
bin/easyjson ./tests/key_marshaler_map.go
|
||||||
|
bin/easyjson -disallow_unknown_fields ./tests/disallow_unknown.go
|
||||||
|
|
||||||
|
test: generate
|
||||||
|
go test \
|
||||||
|
./tests \
|
||||||
|
./jlexer \
|
||||||
|
./gen \
|
||||||
|
./buffer
|
||||||
|
cd benchmark && go test -benchmem -tags use_easyjson -bench .
|
||||||
|
golint -set_exit_status ./tests/*_easyjson.go
|
||||||
|
|
||||||
|
bench-other: generate
|
||||||
|
cd benchmark && make
|
||||||
|
|
||||||
|
bench-python:
|
||||||
|
benchmark/ujson.sh
|
||||||
|
|
||||||
|
|
||||||
|
.PHONY: clean generate test build
|
336
vendor/github.com/mailru/easyjson/README.md
generated
vendored
Normal file
336
vendor/github.com/mailru/easyjson/README.md
generated
vendored
Normal file
|
@ -0,0 +1,336 @@
|
||||||
|
# easyjson [![Build Status](https://travis-ci.org/mailru/easyjson.svg?branch=master)](https://travis-ci.org/mailru/easyjson) [![Go Report Card](https://goreportcard.com/badge/github.com/mailru/easyjson)](https://goreportcard.com/report/github.com/mailru/easyjson)
|
||||||
|
|
||||||
|
Package easyjson provides a fast and easy way to marshal/unmarshal Go structs
|
||||||
|
to/from JSON without the use of reflection. In performance tests, easyjson
|
||||||
|
outperforms the standard `encoding/json` package by a factor of 4-5x, and other
|
||||||
|
JSON encoding packages by a factor of 2-3x.
|
||||||
|
|
||||||
|
easyjson aims to keep generated Go code simple enough so that it can be easily
|
||||||
|
optimized or fixed. Another goal is to provide users with the ability to
|
||||||
|
customize the generated code by providing options not available with the
|
||||||
|
standard `encoding/json` package, such as generating "snake_case" names or
|
||||||
|
enabling `omitempty` behavior by default.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
```sh
|
||||||
|
# install
|
||||||
|
go get -u github.com/mailru/easyjson/...
|
||||||
|
|
||||||
|
# run
|
||||||
|
easyjson -all <file>.go
|
||||||
|
```
|
||||||
|
|
||||||
|
The above will generate `<file>_easyjson.go` containing the appropriate marshaler and
|
||||||
|
unmarshaler funcs for all structs contained in `<file>.go`.
|
||||||
|
|
||||||
|
Please note that easyjson requires a full Go build environment and the `GOPATH`
|
||||||
|
environment variable to be set. This is because easyjson code generation
|
||||||
|
invokes `go run` on a temporary file (an approach to code generation borrowed
|
||||||
|
from [ffjson](https://github.com/pquerna/ffjson)).
|
||||||
|
|
||||||
|
## Options
|
||||||
|
```txt
|
||||||
|
Usage of easyjson:
|
||||||
|
-all
|
||||||
|
generate marshaler/unmarshalers for all structs in a file
|
||||||
|
-build_tags string
|
||||||
|
build tags to add to generated file
|
||||||
|
-leave_temps
|
||||||
|
do not delete temporary files
|
||||||
|
-no_std_marshalers
|
||||||
|
don't generate MarshalJSON/UnmarshalJSON funcs
|
||||||
|
-noformat
|
||||||
|
do not run 'gofmt -w' on output file
|
||||||
|
-omit_empty
|
||||||
|
omit empty fields by default
|
||||||
|
-output_filename string
|
||||||
|
specify the filename of the output
|
||||||
|
-pkg
|
||||||
|
process the whole package instead of just the given file
|
||||||
|
-snake_case
|
||||||
|
use snake_case names instead of CamelCase by default
|
||||||
|
-lower_camel_case
|
||||||
|
use lowerCamelCase instead of CamelCase by default
|
||||||
|
-stubs
|
||||||
|
only generate stubs for marshaler/unmarshaler funcs
|
||||||
|
-disallow_unknown_fields
|
||||||
|
return error if some unknown field in json appeared
|
||||||
|
```
|
||||||
|
|
||||||
|
Using `-all` will generate marshalers/unmarshalers for all Go structs in the
|
||||||
|
file. If `-all` is not provided, then only those structs whose preceding
|
||||||
|
comment starts with `easyjson:json` will have marshalers/unmarshalers
|
||||||
|
generated. For example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
//easyjson:json
|
||||||
|
type A struct {}
|
||||||
|
```
|
||||||
|
|
||||||
|
Additional option notes:
|
||||||
|
|
||||||
|
* `-snake_case` tells easyjson to generate snake\_case field names by default
|
||||||
|
(unless overridden by a field tag). The CamelCase to snake\_case conversion
|
||||||
|
algorithm should work in most cases (ie, HTTPVersion will be converted to
|
||||||
|
"http_version").
|
||||||
|
|
||||||
|
* `-build_tags` will add the specified build tags to generated Go sources.
|
||||||
|
|
||||||
|
## Generated Marshaler/Unmarshaler Funcs
|
||||||
|
|
||||||
|
For Go struct types, easyjson generates the funcs `MarshalEasyJSON` /
|
||||||
|
`UnmarshalEasyJSON` for marshaling/unmarshaling JSON. In turn, these satisify
|
||||||
|
the `easyjson.Marshaler` and `easyjson.Unmarshaler` interfaces and when used in
|
||||||
|
conjunction with `easyjson.Marshal` / `easyjson.Unmarshal` avoid unnecessary
|
||||||
|
reflection / type assertions during marshaling/unmarshaling to/from JSON for Go
|
||||||
|
structs.
|
||||||
|
|
||||||
|
easyjson also generates `MarshalJSON` and `UnmarshalJSON` funcs for Go struct
|
||||||
|
types compatible with the standard `json.Marshaler` and `json.Unmarshaler`
|
||||||
|
interfaces. Please be aware that using the standard `json.Marshal` /
|
||||||
|
`json.Unmarshal` for marshaling/unmarshaling will incur a significant
|
||||||
|
performance penalty when compared to using `easyjson.Marshal` /
|
||||||
|
`easyjson.Unmarshal`.
|
||||||
|
|
||||||
|
Additionally, easyjson exposes utility funcs that use the `MarshalEasyJSON` and
|
||||||
|
`UnmarshalEasyJSON` for marshaling/unmarshaling to and from standard readers
|
||||||
|
and writers. For example, easyjson provides `easyjson.MarshalToHTTPResponseWriter`
|
||||||
|
which marshals to the standard `http.ResponseWriter`. Please see the [GoDoc
|
||||||
|
listing](https://godoc.org/github.com/mailru/easyjson) for the full listing of
|
||||||
|
utility funcs that are available.
|
||||||
|
|
||||||
|
## Controlling easyjson Marshaling and Unmarshaling Behavior
|
||||||
|
|
||||||
|
Go types can provide their own `MarshalEasyJSON` and `UnmarshalEasyJSON` funcs
|
||||||
|
that satisify the `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces.
|
||||||
|
These will be used by `easyjson.Marshal` and `easyjson.Unmarshal` when defined
|
||||||
|
for a Go type.
|
||||||
|
|
||||||
|
Go types can also satisify the `easyjson.Optional` interface, which allows the
|
||||||
|
type to define its own `omitempty` logic.
|
||||||
|
|
||||||
|
## Type Wrappers
|
||||||
|
|
||||||
|
easyjson provides additional type wrappers defined in the `easyjson/opt`
|
||||||
|
package. These wrap the standard Go primitives and in turn satisify the
|
||||||
|
easyjson interfaces.
|
||||||
|
|
||||||
|
The `easyjson/opt` type wrappers are useful when needing to distinguish between
|
||||||
|
a missing value and/or when needing to specifying a default value. Type
|
||||||
|
wrappers allow easyjson to avoid additional pointers and heap allocations and
|
||||||
|
can significantly increase performance when used properly.
|
||||||
|
|
||||||
|
## Memory Pooling
|
||||||
|
|
||||||
|
easyjson uses a buffer pool that allocates data in increasing chunks from 128
|
||||||
|
to 32768 bytes. Chunks of 512 bytes and larger will be reused with the help of
|
||||||
|
`sync.Pool`. The maximum size of a chunk is bounded to reduce redundant memory
|
||||||
|
allocation and to allow larger reusable buffers.
|
||||||
|
|
||||||
|
easyjson's custom allocation buffer pool is defined in the `easyjson/buffer`
|
||||||
|
package, and the default behavior pool behavior can be modified (if necessary)
|
||||||
|
through a call to `buffer.Init()` prior to any marshaling or unmarshaling.
|
||||||
|
Please see the [GoDoc listing](https://godoc.org/github.com/mailru/easyjson/buffer)
|
||||||
|
for more information.
|
||||||
|
|
||||||
|
## Issues, Notes, and Limitations
|
||||||
|
|
||||||
|
* easyjson is still early in its development. As such, there are likely to be
|
||||||
|
bugs and missing features when compared to `encoding/json`. In the case of a
|
||||||
|
missing feature or bug, please create a GitHub issue. Pull requests are
|
||||||
|
welcome!
|
||||||
|
|
||||||
|
* Unlike `encoding/json`, object keys are case-sensitive. Case-insensitive
|
||||||
|
matching is not currently provided due to the significant performance hit
|
||||||
|
when doing case-insensitive key matching. In the future, case-insensitive
|
||||||
|
object key matching may be provided via an option to the generator.
|
||||||
|
|
||||||
|
* easyjson makes use of `unsafe`, which simplifies the code and
|
||||||
|
provides significant performance benefits by allowing no-copy
|
||||||
|
conversion from `[]byte` to `string`. That said, `unsafe` is used
|
||||||
|
only when unmarshaling and parsing JSON, and any `unsafe` operations
|
||||||
|
/ memory allocations done will be safely deallocated by
|
||||||
|
easyjson. Set the build tag `easyjson_nounsafe` to compile it
|
||||||
|
without `unsafe`.
|
||||||
|
|
||||||
|
* easyjson is compatible with Google App Engine. The `appengine` build
|
||||||
|
tag (set by App Engine's environment) will automatically disable the
|
||||||
|
use of `unsafe`, which is not allowed in App Engine's Standard
|
||||||
|
Environment. Note that the use with App Engine is still experimental.
|
||||||
|
|
||||||
|
* Floats are formatted using the default precision from Go's `strconv` package.
|
||||||
|
As such, easyjson will not correctly handle high precision floats when
|
||||||
|
marshaling/unmarshaling JSON. Note, however, that there are very few/limited
|
||||||
|
uses where this behavior is not sufficient for general use. That said, a
|
||||||
|
different package may be needed if precise marshaling/unmarshaling of high
|
||||||
|
precision floats to/from JSON is required.
|
||||||
|
|
||||||
|
* While unmarshaling, the JSON parser does the minimal amount of work needed to
|
||||||
|
skip over unmatching parens, and as such full validation is not done for the
|
||||||
|
entire JSON value being unmarshaled/parsed.
|
||||||
|
|
||||||
|
* Currently there is no true streaming support for encoding/decoding as
|
||||||
|
typically for many uses/protocols the final, marshaled length of the JSON
|
||||||
|
needs to be known prior to sending the data. Currently this is not possible
|
||||||
|
with easyjson's architecture.
|
||||||
|
|
||||||
|
* easyjson parser and codegen based on reflection, so it wont works on `package main`
|
||||||
|
files, because they cant be imported by parser.
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
Most benchmarks were done using the example
|
||||||
|
[13kB example JSON](https://dev.twitter.com/rest/reference/get/search/tweets)
|
||||||
|
(9k after eliminating whitespace). This example is similar to real-world data,
|
||||||
|
is well-structured, and contains a healthy variety of different types, making
|
||||||
|
it ideal for JSON serialization benchmarks.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
|
||||||
|
* For small request benchmarks, an 80 byte portion of the above example was
|
||||||
|
used.
|
||||||
|
|
||||||
|
* For large request marshaling benchmarks, a struct containing 50 regular
|
||||||
|
samples was used, making a ~500kB output JSON.
|
||||||
|
|
||||||
|
* Benchmarks are showing the results of easyjson's default behaviour,
|
||||||
|
which makes use of `unsafe`.
|
||||||
|
|
||||||
|
Benchmarks are available in the repository and can be run by invoking `make`.
|
||||||
|
|
||||||
|
### easyjson vs. encoding/json
|
||||||
|
|
||||||
|
easyjson is roughly 5-6 times faster than the standard `encoding/json` for
|
||||||
|
unmarshaling, and 3-4 times faster for non-concurrent marshaling. Concurrent
|
||||||
|
marshaling is 6-7x faster if marshaling to a writer.
|
||||||
|
|
||||||
|
### easyjson vs. ffjson
|
||||||
|
|
||||||
|
easyjson uses the same approach for JSON marshaling as
|
||||||
|
[ffjson](https://github.com/pquerna/ffjson), but takes a significantly
|
||||||
|
different approach to lexing and parsing JSON during unmarshaling. This means
|
||||||
|
easyjson is roughly 2-3x faster for unmarshaling and 1.5-2x faster for
|
||||||
|
non-concurrent unmarshaling.
|
||||||
|
|
||||||
|
As of this writing, `ffjson` seems to have issues when used concurrently:
|
||||||
|
specifically, large request pooling hurts `ffjson`'s performance and causes
|
||||||
|
scalability issues. These issues with `ffjson` can likely be fixed, but as of
|
||||||
|
writing remain outstanding/known issues with `ffjson`.
|
||||||
|
|
||||||
|
easyjson and `ffjson` have similar performance for small requests, however
|
||||||
|
easyjson outperforms `ffjson` by roughly 2-5x times for large requests when
|
||||||
|
used with a writer.
|
||||||
|
|
||||||
|
### easyjson vs. go/codec
|
||||||
|
|
||||||
|
[go/codec](https://github.com/ugorji/go) provides
|
||||||
|
compile-time helpers for JSON generation. In this case, helpers do not work
|
||||||
|
like marshalers as they are encoding-independent.
|
||||||
|
|
||||||
|
easyjson is generally 2x faster than `go/codec` for non-concurrent benchmarks
|
||||||
|
and about 3x faster for concurrent encoding (without marshaling to a writer).
|
||||||
|
|
||||||
|
In an attempt to measure marshaling performance of `go/codec` (as opposed to
|
||||||
|
allocations/memcpy/writer interface invocations), a benchmark was done with
|
||||||
|
resetting length of a byte slice rather than resetting the whole slice to nil.
|
||||||
|
However, the optimization in this exact form may not be applicable in practice,
|
||||||
|
since the memory is not freed between marshaling operations.
|
||||||
|
|
||||||
|
### easyjson vs 'ujson' python module
|
||||||
|
|
||||||
|
[ujson](https://github.com/esnme/ultrajson) is using C code for parsing, so it
|
||||||
|
is interesting to see how plain golang compares to that. It is imporant to note
|
||||||
|
that the resulting object for python is slower to access, since the library
|
||||||
|
parses JSON object into dictionaries.
|
||||||
|
|
||||||
|
easyjson is slightly faster for unmarshaling and 2-3x faster than `ujson` for
|
||||||
|
marshaling.
|
||||||
|
|
||||||
|
### Benchmark Results
|
||||||
|
|
||||||
|
`ffjson` results are from February 4th, 2016, using the latest `ffjson` and go1.6.
|
||||||
|
`go/codec` results are from March 4th, 2016, using the latest `go/codec` and go1.6.
|
||||||
|
|
||||||
|
#### Unmarshaling
|
||||||
|
|
||||||
|
| lib | json size | MB/s | allocs/op | B/op |
|
||||||
|
|:---------|:----------|-----:|----------:|------:|
|
||||||
|
| standard | regular | 22 | 218 | 10229 |
|
||||||
|
| standard | small | 9.7 | 14 | 720 |
|
||||||
|
| | | | | |
|
||||||
|
| easyjson | regular | 125 | 128 | 9794 |
|
||||||
|
| easyjson | small | 67 | 3 | 128 |
|
||||||
|
| | | | | |
|
||||||
|
| ffjson | regular | 66 | 141 | 9985 |
|
||||||
|
| ffjson | small | 17.6 | 10 | 488 |
|
||||||
|
| | | | | |
|
||||||
|
| codec | regular | 55 | 434 | 19299 |
|
||||||
|
| codec | small | 29 | 7 | 336 |
|
||||||
|
| | | | | |
|
||||||
|
| ujson | regular | 103 | N/A | N/A |
|
||||||
|
|
||||||
|
#### Marshaling, one goroutine.
|
||||||
|
|
||||||
|
| lib | json size | MB/s | allocs/op | B/op |
|
||||||
|
|:----------|:----------|-----:|----------:|------:|
|
||||||
|
| standard | regular | 75 | 9 | 23256 |
|
||||||
|
| standard | small | 32 | 3 | 328 |
|
||||||
|
| standard | large | 80 | 17 | 1.2M |
|
||||||
|
| | | | | |
|
||||||
|
| easyjson | regular | 213 | 9 | 10260 |
|
||||||
|
| easyjson* | regular | 263 | 8 | 742 |
|
||||||
|
| easyjson | small | 125 | 1 | 128 |
|
||||||
|
| easyjson | large | 212 | 33 | 490k |
|
||||||
|
| easyjson* | large | 262 | 25 | 2879 |
|
||||||
|
| | | | | |
|
||||||
|
| ffjson | regular | 122 | 153 | 21340 |
|
||||||
|
| ffjson** | regular | 146 | 152 | 4897 |
|
||||||
|
| ffjson | small | 36 | 5 | 384 |
|
||||||
|
| ffjson** | small | 64 | 4 | 128 |
|
||||||
|
| ffjson | large | 134 | 7317 | 818k |
|
||||||
|
| ffjson** | large | 125 | 7320 | 827k |
|
||||||
|
| | | | | |
|
||||||
|
| codec | regular | 80 | 17 | 33601 |
|
||||||
|
| codec*** | regular | 108 | 9 | 1153 |
|
||||||
|
| codec | small | 42 | 3 | 304 |
|
||||||
|
| codec*** | small | 56 | 1 | 48 |
|
||||||
|
| codec | large | 73 | 483 | 2.5M |
|
||||||
|
| codec*** | large | 103 | 451 | 66007 |
|
||||||
|
| | | | | |
|
||||||
|
| ujson | regular | 92 | N/A | N/A |
|
||||||
|
|
||||||
|
\* marshaling to a writer,
|
||||||
|
\*\* using `ffjson.Pool()`,
|
||||||
|
\*\*\* reusing output slice instead of resetting it to nil
|
||||||
|
|
||||||
|
#### Marshaling, concurrent.
|
||||||
|
|
||||||
|
| lib | json size | MB/s | allocs/op | B/op |
|
||||||
|
|:----------|:----------|-----:|----------:|------:|
|
||||||
|
| standard | regular | 252 | 9 | 23257 |
|
||||||
|
| standard | small | 124 | 3 | 328 |
|
||||||
|
| standard | large | 289 | 17 | 1.2M |
|
||||||
|
| | | | | |
|
||||||
|
| easyjson | regular | 792 | 9 | 10597 |
|
||||||
|
| easyjson* | regular | 1748 | 8 | 779 |
|
||||||
|
| easyjson | small | 333 | 1 | 128 |
|
||||||
|
| easyjson | large | 718 | 36 | 548k |
|
||||||
|
| easyjson* | large | 2134 | 25 | 4957 |
|
||||||
|
| | | | | |
|
||||||
|
| ffjson | regular | 301 | 153 | 21629 |
|
||||||
|
| ffjson** | regular | 707 | 152 | 5148 |
|
||||||
|
| ffjson | small | 62 | 5 | 384 |
|
||||||
|
| ffjson** | small | 282 | 4 | 128 |
|
||||||
|
| ffjson | large | 438 | 7330 | 1.0M |
|
||||||
|
| ffjson** | large | 131 | 7319 | 820k |
|
||||||
|
| | | | | |
|
||||||
|
| codec | regular | 183 | 17 | 33603 |
|
||||||
|
| codec*** | regular | 671 | 9 | 1157 |
|
||||||
|
| codec | small | 147 | 3 | 304 |
|
||||||
|
| codec*** | small | 299 | 1 | 48 |
|
||||||
|
| codec | large | 190 | 483 | 2.5M |
|
||||||
|
| codec*** | large | 752 | 451 | 77574 |
|
||||||
|
|
||||||
|
\* marshaling to a writer,
|
||||||
|
\*\* using `ffjson.Pool()`,
|
||||||
|
\*\*\* reusing output slice instead of resetting it to nil
|
3
vendor/github.com/mailru/easyjson/go.mod
generated
vendored
Normal file
3
vendor/github.com/mailru/easyjson/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
module github.com/mailru/easyjson
|
||||||
|
|
||||||
|
go 1.12
|
78
vendor/github.com/mailru/easyjson/helpers.go
generated
vendored
Normal file
78
vendor/github.com/mailru/easyjson/helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
// Package easyjson contains marshaler/unmarshaler interfaces and helper functions.
|
||||||
|
package easyjson
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/mailru/easyjson/jlexer"
|
||||||
|
"github.com/mailru/easyjson/jwriter"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshaler is an easyjson-compatible marshaler interface.
|
||||||
|
type Marshaler interface {
|
||||||
|
MarshalEasyJSON(w *jwriter.Writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshaler is an easyjson-compatible unmarshaler interface.
|
||||||
|
type Unmarshaler interface {
|
||||||
|
UnmarshalEasyJSON(w *jlexer.Lexer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional defines an undefined-test method for a type to integrate with 'omitempty' logic.
|
||||||
|
type Optional interface {
|
||||||
|
IsDefined() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal returns data as a single byte slice. Method is suboptimal as the data is likely to be copied
|
||||||
|
// from a chain of smaller chunks.
|
||||||
|
func Marshal(v Marshaler) ([]byte, error) {
|
||||||
|
w := jwriter.Writer{}
|
||||||
|
v.MarshalEasyJSON(&w)
|
||||||
|
return w.BuildBytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalToWriter marshals the data to an io.Writer.
|
||||||
|
func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {
|
||||||
|
jw := jwriter.Writer{}
|
||||||
|
v.MarshalEasyJSON(&jw)
|
||||||
|
return jw.DumpTo(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalToHTTPResponseWriter sets Content-Length and Content-Type headers for the
|
||||||
|
// http.ResponseWriter, and send the data to the writer. started will be equal to
|
||||||
|
// false if an error occurred before any http.ResponseWriter methods were actually
|
||||||
|
// invoked (in this case a 500 reply is possible).
|
||||||
|
func MarshalToHTTPResponseWriter(v Marshaler, w http.ResponseWriter) (started bool, written int, err error) {
|
||||||
|
jw := jwriter.Writer{}
|
||||||
|
v.MarshalEasyJSON(&jw)
|
||||||
|
if jw.Error != nil {
|
||||||
|
return false, 0, jw.Error
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.Header().Set("Content-Length", strconv.Itoa(jw.Size()))
|
||||||
|
|
||||||
|
started = true
|
||||||
|
written, err = jw.DumpTo(w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal decodes the JSON in data into the object.
|
||||||
|
func Unmarshal(data []byte, v Unmarshaler) error {
|
||||||
|
l := jlexer.Lexer{Data: data}
|
||||||
|
v.UnmarshalEasyJSON(&l)
|
||||||
|
return l.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalFromReader reads all the data in the reader and decodes as JSON into the object.
|
||||||
|
func UnmarshalFromReader(r io.Reader, v Unmarshaler) error {
|
||||||
|
data, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
l := jlexer.Lexer{Data: data}
|
||||||
|
v.UnmarshalEasyJSON(&l)
|
||||||
|
return l.Error()
|
||||||
|
}
|
45
vendor/github.com/mailru/easyjson/raw.go
generated
vendored
Normal file
45
vendor/github.com/mailru/easyjson/raw.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
package easyjson
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/mailru/easyjson/jlexer"
|
||||||
|
"github.com/mailru/easyjson/jwriter"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RawMessage is a raw piece of JSON (number, string, bool, object, array or
|
||||||
|
// null) that is extracted without parsing and output as is during marshaling.
|
||||||
|
type RawMessage []byte
|
||||||
|
|
||||||
|
// MarshalEasyJSON does JSON marshaling using easyjson interface.
|
||||||
|
func (v *RawMessage) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
if len(*v) == 0 {
|
||||||
|
w.RawString("null")
|
||||||
|
} else {
|
||||||
|
w.Raw(*v, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
|
||||||
|
func (v *RawMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {
|
||||||
|
*v = RawMessage(l.Raw())
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements encoding/json.Unmarshaler interface.
|
||||||
|
func (v *RawMessage) UnmarshalJSON(data []byte) error {
|
||||||
|
*v = data
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var nullBytes = []byte("null")
|
||||||
|
|
||||||
|
// MarshalJSON implements encoding/json.Marshaler interface.
|
||||||
|
func (v RawMessage) MarshalJSON() ([]byte, error) {
|
||||||
|
if len(v) == 0 {
|
||||||
|
return nullBytes, nil
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDefined is required for integration with omitempty easyjson logic.
|
||||||
|
func (v *RawMessage) IsDefined() bool {
|
||||||
|
return len(*v) > 0
|
||||||
|
}
|
38
vendor/github.com/olivere/elastic/v7/.fossa.yml
generated
vendored
Normal file
38
vendor/github.com/olivere/elastic/v7/.fossa.yml
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
# Generated by FOSSA CLI (https://github.com/fossas/fossa-cli)
|
||||||
|
# Visit https://fossa.io to learn more
|
||||||
|
|
||||||
|
version: 1
|
||||||
|
cli:
|
||||||
|
server: https://app.fossa.io
|
||||||
|
fetcher: git
|
||||||
|
project: git@github.com:olivere/elastic.git
|
||||||
|
analyze:
|
||||||
|
modules:
|
||||||
|
- name: github.com/olivere/elastic
|
||||||
|
path: .
|
||||||
|
target: github.com/olivere/elastic
|
||||||
|
type: go
|
||||||
|
- name: github.com/olivere/elastic/config
|
||||||
|
path: ./config
|
||||||
|
target: github.com/olivere/elastic/config
|
||||||
|
type: go
|
||||||
|
- name: github.com/olivere/elastic/uritemplates
|
||||||
|
path: ./uritemplates
|
||||||
|
target: github.com/olivere/elastic/uritemplates
|
||||||
|
type: go
|
||||||
|
- name: github.com/olivere/elastic/trace/opencensus
|
||||||
|
path: ./trace/opencensus
|
||||||
|
target: github.com/olivere/elastic/trace/opencensus
|
||||||
|
type: go
|
||||||
|
- name: github.com/olivere/elastic/trace/opentracing
|
||||||
|
path: ./trace/opentracing
|
||||||
|
target: github.com/olivere/elastic/trace/opentracing
|
||||||
|
type: go
|
||||||
|
- name: github.com/olivere/elastic/aws
|
||||||
|
path: ./aws
|
||||||
|
target: github.com/olivere/elastic/aws
|
||||||
|
type: go
|
||||||
|
- name: github.com/olivere/elastic/aws/v4
|
||||||
|
path: ./aws/v4
|
||||||
|
target: github.com/olivere/elastic/aws/v4
|
||||||
|
type: go
|
35
vendor/github.com/olivere/elastic/v7/.gitignore
generated
vendored
Normal file
35
vendor/github.com/olivere/elastic/v7/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
|
||||||
|
/.vscode/
|
||||||
|
/.idea/
|
||||||
|
/debug.test
|
||||||
|
/generator
|
||||||
|
/cluster-test/cluster-test
|
||||||
|
/cluster-test/*.log
|
||||||
|
/cluster-test/es-chaos-monkey
|
||||||
|
/go.sum
|
||||||
|
/spec
|
||||||
|
/tmp
|
||||||
|
/CHANGELOG-3.0.html
|
||||||
|
|
32
vendor/github.com/olivere/elastic/v7/.travis.yml
generated
vendored
Normal file
32
vendor/github.com/olivere/elastic/v7/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
sudo: required
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- "1.12.x"
|
||||||
|
- "1.13.x"
|
||||||
|
- tip
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
env:
|
||||||
|
- GO111MODULE=on
|
||||||
|
- GO111MODULE=off
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
update: true
|
||||||
|
packages:
|
||||||
|
- docker-ce
|
||||||
|
services:
|
||||||
|
- docker
|
||||||
|
before_install:
|
||||||
|
- if [[ "$TRAVIS_OS_NAME" == "linux" && ! $(which nc) ]] ; then sudo apt-get install -y netcat ; fi
|
||||||
|
- sudo sysctl -w vm.max_map_count=262144
|
||||||
|
- docker-compose pull
|
||||||
|
- docker-compose up -d
|
||||||
|
- go get -u github.com/google/go-cmp/cmp
|
||||||
|
- go get -u github.com/fortytw2/leaktest
|
||||||
|
- go get . ./aws/... ./config/... ./trace/... ./uritemplates/...
|
||||||
|
- while ! nc -z localhost 9200; do sleep 1; done
|
||||||
|
- while ! nc -z localhost 9210; do sleep 1; done
|
||||||
|
install: true # ignore the go get -t -v ./...
|
||||||
|
script:
|
||||||
|
- go test -race -deprecations -strict-decoder -v . ./aws/... ./config/... ./trace/... ./uritemplates/...
|
363
vendor/github.com/olivere/elastic/v7/CHANGELOG-3.0.md
generated
vendored
Normal file
363
vendor/github.com/olivere/elastic/v7/CHANGELOG-3.0.md
generated
vendored
Normal file
|
@ -0,0 +1,363 @@
|
||||||
|
# Elastic 3.0
|
||||||
|
|
||||||
|
Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes.
|
||||||
|
|
||||||
|
We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft.
|
||||||
|
|
||||||
|
So, to summarize:
|
||||||
|
|
||||||
|
1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained.
|
||||||
|
2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch.
|
||||||
|
|
||||||
|
The rest of the document is a list of all changes in Elastic 3.0.
|
||||||
|
|
||||||
|
## Pointer types
|
||||||
|
|
||||||
|
All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example:
|
||||||
|
|
||||||
|
Example for Elastic 2.0 (old):
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := elastic.NewMatchAllQuery()
|
||||||
|
res, err := elastic.Search("one").Query(&q).Do() // notice the & here
|
||||||
|
```
|
||||||
|
|
||||||
|
Example for Elastic 3.0 (new):
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := elastic.NewMatchAllQuery()
|
||||||
|
res, err := elastic.Search("one").Query(q).Do() // no more &
|
||||||
|
// ... which can be simplified as:
|
||||||
|
res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do()
|
||||||
|
```
|
||||||
|
|
||||||
|
It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046).
|
||||||
|
|
||||||
|
## Query/filter merge
|
||||||
|
|
||||||
|
One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`).
|
||||||
|
|
||||||
|
The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay!
|
||||||
|
|
||||||
|
Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before.
|
||||||
|
|
||||||
|
Example for Elastic 2.0 (old):
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := elastic.NewMatchAllQuery()
|
||||||
|
f := elastic.NewTermFilter("tag", "important")
|
||||||
|
res, err := elastic.Search().Index("one").Query(&q).PostFilter(f)
|
||||||
|
```
|
||||||
|
|
||||||
|
Example for Elastic 3.0 (new):
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := elastic.NewMatchAllQuery()
|
||||||
|
f := elastic.NewTermQuery("tag", "important") // it's a query now!
|
||||||
|
res, err := elastic.Search().Index("one").Query(q).PostFilter(f)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Facets are removed
|
||||||
|
|
||||||
|
[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now.
|
||||||
|
|
||||||
|
## Errors
|
||||||
|
|
||||||
|
Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer.
|
||||||
|
|
||||||
|
Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59).
|
||||||
|
|
||||||
|
### HTTP Status 404 (Not Found)
|
||||||
|
|
||||||
|
When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0.
|
||||||
|
|
||||||
|
Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error.
|
||||||
|
|
||||||
|
To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below).
|
||||||
|
|
||||||
|
The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0.
|
||||||
|
|
||||||
|
Example for Elastic 2.0 (old):
|
||||||
|
|
||||||
|
```go
|
||||||
|
res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do()
|
||||||
|
if err != nil {
|
||||||
|
// Something else went wrong (but 404 is NOT an error in Elastic 2.0)
|
||||||
|
}
|
||||||
|
if !res.Found {
|
||||||
|
// Document has not been found
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Example for Elastic 3.0 (new):
|
||||||
|
|
||||||
|
```go
|
||||||
|
res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do()
|
||||||
|
if err != nil {
|
||||||
|
if elastic.IsNotFound(err) {
|
||||||
|
// Document has not been found
|
||||||
|
} else {
|
||||||
|
// Something else went wrong
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### HTTP Status 408 (Timeouts)
|
||||||
|
|
||||||
|
Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API.
|
||||||
|
|
||||||
|
To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper.
|
||||||
|
|
||||||
|
Example for Elastic 2.0 (old):
|
||||||
|
|
||||||
|
```go
|
||||||
|
health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do()
|
||||||
|
if err != nil {
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
if health.TimedOut {
|
||||||
|
// We have a timeout
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Example for Elastic 3.0 (new):
|
||||||
|
|
||||||
|
```go
|
||||||
|
health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do()
|
||||||
|
if elastic.IsTimeout(err) {
|
||||||
|
// We have a timeout
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Bulk Errors
|
||||||
|
|
||||||
|
The error response of a bulk operation used to be a simple string in Elasticsearch 1.x.
|
||||||
|
In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error.
|
||||||
|
These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206).
|
||||||
|
|
||||||
|
### Removed specific Elastic errors
|
||||||
|
|
||||||
|
The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message.
|
||||||
|
|
||||||
|
## Numeric types
|
||||||
|
|
||||||
|
Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`.
|
||||||
|
|
||||||
|
## Pluralization
|
||||||
|
|
||||||
|
Some services accept zero, one or more indices or types to operate on.
|
||||||
|
E.g. in the `SearchService` accepts a list of zero, one, or more indices to
|
||||||
|
search and therefor had a func called `Index(index string)` and a func
|
||||||
|
called `Indices(indices ...string)`.
|
||||||
|
|
||||||
|
Elastic 3.0 now only uses the singular form that, when applicable, accepts a
|
||||||
|
variadic type. E.g. in the case of the `SearchService`, you now only have
|
||||||
|
one func with the following signature: `Index(indices ...string)`.
|
||||||
|
|
||||||
|
Notice this is only limited to `Index(...)` and `Type(...)`. There are other
|
||||||
|
services with variadic functions. These have not been changed.
|
||||||
|
|
||||||
|
## Multiple calls to variadic functions
|
||||||
|
|
||||||
|
Some services with variadic functions have cleared the underlying slice when
|
||||||
|
called while other services just add to the existing slice. This has now been
|
||||||
|
normalized to always add to the underlying slice.
|
||||||
|
|
||||||
|
Example for Elastic 2.0 (old):
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Would only cleared scroll id "two"
|
||||||
|
// because ScrollId cleared the values when called multiple times
|
||||||
|
client.ClearScroll().ScrollId("one").ScrollId("two").Do()
|
||||||
|
```
|
||||||
|
|
||||||
|
Example for Elastic 3.0 (new):
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Now (correctly) clears both scroll id "one" and "two"
|
||||||
|
// because ScrollId no longer clears the values when called multiple times
|
||||||
|
client.ClearScroll().ScrollId("one").ScrollId("two").Do()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Ping service requires URL
|
||||||
|
|
||||||
|
The `Ping` service raised some issues because it is different from all
|
||||||
|
other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`.
|
||||||
|
|
||||||
|
Users expected to ping the cluster, but that is not possible as the cluster
|
||||||
|
can be a set of many nodes: So which node do we ping then?
|
||||||
|
|
||||||
|
To make it more clear, the `Ping` function on the client now requires users
|
||||||
|
to explicitly set the URL of the node to ping.
|
||||||
|
|
||||||
|
## Meta fields
|
||||||
|
|
||||||
|
Many of the meta fields e.g. `_parent` or `_routing` are now
|
||||||
|
[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields)
|
||||||
|
and are no longer returned as parts of the `fields` object. We had to change
|
||||||
|
larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0.
|
||||||
|
|
||||||
|
Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default).
|
||||||
|
|
||||||
|
## HasParentQuery / HasChildQuery
|
||||||
|
|
||||||
|
`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API.
|
||||||
|
|
||||||
|
Example for Elastic 2.0 (old):
|
||||||
|
|
||||||
|
```go
|
||||||
|
allQ := elastic.NewMatchAllQuery()
|
||||||
|
q := elastic.NewHasChildFilter("tweet").Query(&allQ)
|
||||||
|
```
|
||||||
|
|
||||||
|
Example for Elastic 3.0 (new):
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery())
|
||||||
|
```
|
||||||
|
|
||||||
|
## SetBasicAuth client option
|
||||||
|
|
||||||
|
You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html).
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret"))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Delete-by-Query API
|
||||||
|
|
||||||
|
The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html).
|
||||||
|
|
||||||
|
Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404.
|
||||||
|
|
||||||
|
An older version of this document stated the following:
|
||||||
|
|
||||||
|
> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed.
|
||||||
|
>
|
||||||
|
> Example for Elastic 3.0 (new):
|
||||||
|
>
|
||||||
|
> ```go
|
||||||
|
> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do()
|
||||||
|
> if err == elastic.ErrPluginNotFound {
|
||||||
|
> // Delete By Query API is not available
|
||||||
|
> }
|
||||||
|
> ```
|
||||||
|
|
||||||
|
I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch.
|
||||||
|
|
||||||
|
If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play.
|
||||||
|
|
||||||
|
## HasPlugin and SetRequiredPlugins
|
||||||
|
|
||||||
|
Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html).
|
||||||
|
|
||||||
|
You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client.
|
||||||
|
|
||||||
|
Example for Elastic 3.0 (new):
|
||||||
|
|
||||||
|
```go
|
||||||
|
err, found := client.HasPlugin("delete-by-query")
|
||||||
|
if err == nil && found {
|
||||||
|
// ... Delete By Query API is available
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Will raise an error if the "delete-by-query" plugin is NOT installed
|
||||||
|
client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query"))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file.
|
||||||
|
|
||||||
|
## Common Query has been renamed to Common Terms Query
|
||||||
|
|
||||||
|
The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring).
|
||||||
|
|
||||||
|
## Remove `MoreLikeThis` and `MoreLikeThisField`
|
||||||
|
|
||||||
|
The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`.
|
||||||
|
|
||||||
|
## Remove Filtered Query
|
||||||
|
|
||||||
|
With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated).
|
||||||
|
|
||||||
|
## Remove FuzzyLikeThis and FuzzyLikeThisField
|
||||||
|
|
||||||
|
Both have been removed from Elasticsearch 2.0 as well.
|
||||||
|
|
||||||
|
## Remove LimitFilter
|
||||||
|
|
||||||
|
The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects.
|
||||||
|
|
||||||
|
## Remove `_cache` and `_cache_key` from filters
|
||||||
|
|
||||||
|
Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching).
|
||||||
|
|
||||||
|
## Partial fields are gone
|
||||||
|
|
||||||
|
Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html).
|
||||||
|
|
||||||
|
## Scripting
|
||||||
|
|
||||||
|
A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type.
|
||||||
|
|
||||||
|
Example for Elastic 2.0 (old):
|
||||||
|
|
||||||
|
```go
|
||||||
|
update, err := client.Update().Index("twitter").Type("tweet").Id("1").
|
||||||
|
Script("ctx._source.retweets += num").
|
||||||
|
ScriptParams(map[string]interface{}{"num": 1}).
|
||||||
|
Upsert(map[string]interface{}{"retweets": 0}).
|
||||||
|
Do()
|
||||||
|
```
|
||||||
|
|
||||||
|
Example for Elastic 3.0 (new):
|
||||||
|
|
||||||
|
```go
|
||||||
|
update, err := client.Update().Index("twitter").Type("tweet").Id("1").
|
||||||
|
Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)).
|
||||||
|
Upsert(map[string]interface{}{"retweets": 0}).
|
||||||
|
Do()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Cluster State
|
||||||
|
|
||||||
|
The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`.
|
||||||
|
|
||||||
|
## Unexported structs in response
|
||||||
|
|
||||||
|
Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example.
|
||||||
|
|
||||||
|
## Add offset to Histogram aggregation
|
||||||
|
|
||||||
|
Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option.
|
||||||
|
|
||||||
|
## Services
|
||||||
|
|
||||||
|
### REST API specification
|
||||||
|
|
||||||
|
As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure.
|
||||||
|
|
||||||
|
Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process.
|
||||||
|
|
||||||
|
This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes.
|
||||||
|
|
||||||
|
At the same time, the file names of the services are renamed to match the REST API specification naming.
|
||||||
|
|
||||||
|
### REST API Test Suite
|
||||||
|
|
||||||
|
The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well.
|
||||||
|
|
||||||
|
This process in not completed though.
|
||||||
|
|
||||||
|
|
195
vendor/github.com/olivere/elastic/v7/CHANGELOG-5.0.md
generated
vendored
Normal file
195
vendor/github.com/olivere/elastic/v7/CHANGELOG-5.0.md
generated
vendored
Normal file
|
@ -0,0 +1,195 @@
|
||||||
|
# Changes in Elastic 5.0
|
||||||
|
|
||||||
|
## Enforce context.Context in PerformRequest and Do
|
||||||
|
|
||||||
|
We enforce the usage of `context.Context` everywhere you execute a request.
|
||||||
|
You need to change all your `Do()` calls to pass a context: `Do(ctx)`.
|
||||||
|
This enables automatic request cancelation and many other patterns.
|
||||||
|
|
||||||
|
If you don't need this, simply pass `context.TODO()` or `context.Background()`.
|
||||||
|
|
||||||
|
## Warmers removed
|
||||||
|
|
||||||
|
Warmers are no longer necessary and have been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_index_apis.html#_warmers).
|
||||||
|
|
||||||
|
## Optimize removed
|
||||||
|
|
||||||
|
Optimize was deprecated in ES 2.0 and has been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_rest_api_changes.html#_literal__optimize_literal_endpoint_removed).
|
||||||
|
Use [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html) instead.
|
||||||
|
|
||||||
|
## Missing Query removed
|
||||||
|
|
||||||
|
The `missing` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-exists-query.html#_literal_missing_literal_query).
|
||||||
|
Use `exists` query with `must_not` in `bool` query instead.
|
||||||
|
|
||||||
|
## And Query removed
|
||||||
|
|
||||||
|
The `and` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||||
|
Use `must` clauses in a `bool` query instead.
|
||||||
|
|
||||||
|
## Not Query removed
|
||||||
|
|
||||||
|
TODO Is it removed?
|
||||||
|
|
||||||
|
## Or Query removed
|
||||||
|
|
||||||
|
The `or` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||||
|
Use `should` clauses in a `bool` query instead.
|
||||||
|
|
||||||
|
## Filtered Query removed
|
||||||
|
|
||||||
|
The `filtered` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||||
|
Use `bool` query instead, which supports `filter` clauses too.
|
||||||
|
|
||||||
|
## Limit Query removed
|
||||||
|
|
||||||
|
The `limit` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||||
|
Use the `terminate_after` parameter instead.
|
||||||
|
|
||||||
|
# Template Query removed
|
||||||
|
|
||||||
|
The `template` query has been [deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-template-query.html). You should use
|
||||||
|
Search Templates instead.
|
||||||
|
|
||||||
|
We remove it from Elastic 5.0 as the 5.0 update is already a good opportunity
|
||||||
|
to get rid of old stuff.
|
||||||
|
|
||||||
|
## `_timestamp` and `_ttl` removed
|
||||||
|
|
||||||
|
Both of these fields were deprecated and are now [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_mapping_changes.html#_literal__timestamp_literal_and_literal__ttl_literal).
|
||||||
|
|
||||||
|
## Search template Put/Delete API returns `acknowledged` only
|
||||||
|
|
||||||
|
The response type for Put/Delete search templates has changed.
|
||||||
|
It only returns a single `acknowledged` flag now.
|
||||||
|
|
||||||
|
## Fields has been renamed to Stored Fields
|
||||||
|
|
||||||
|
The `fields` parameter has been renamed to `stored_fields`.
|
||||||
|
See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fields_literal_parameter).
|
||||||
|
|
||||||
|
## Fielddatafields has been renamed to Docvaluefields
|
||||||
|
|
||||||
|
The `fielddata_fields` parameter [has been renamed](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fielddata_fields_literal_parameter)
|
||||||
|
to `docvalue_fields`.
|
||||||
|
|
||||||
|
## Type exists endpoint changed
|
||||||
|
|
||||||
|
The endpoint for checking whether a type exists has been changed from
|
||||||
|
`HEAD {index}/{type}` to `HEAD {index}/_mapping/{type}`.
|
||||||
|
See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking_50_rest_api_changes.html#_literal_head_index_type_literal_replaced_with_literal_head_index__mapping_type_literal).
|
||||||
|
|
||||||
|
## Refresh parameter changed
|
||||||
|
|
||||||
|
The `?refresh` parameter previously could be a boolean value. It indicated
|
||||||
|
whether changes made by a request (e.g. by the Bulk API) should be immediately
|
||||||
|
visible in search, or not. Using `refresh=true` had the positive effect of
|
||||||
|
immediately seeing the changes when searching; the negative effect is that
|
||||||
|
it is a rather big performance hit.
|
||||||
|
|
||||||
|
With 5.0, you now have the choice between these 3 values.
|
||||||
|
|
||||||
|
* `"true"` - Refresh immediately
|
||||||
|
* `"false"` - Do not refresh (the default value)
|
||||||
|
* `"wait_for"` - Wait until ES made the document visible in search
|
||||||
|
|
||||||
|
See [?refresh](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-refresh.html) in the documentation.
|
||||||
|
|
||||||
|
Notice that `true` and `false` (the boolean values) are no longer available
|
||||||
|
now in Elastic. You must use a string instead, with one of the above values.
|
||||||
|
|
||||||
|
## ReindexerService removed
|
||||||
|
|
||||||
|
The `ReindexerService` was a custom solution that was started in the ES 1.x era
|
||||||
|
to automate reindexing data, from one index to another or even between clusters.
|
||||||
|
|
||||||
|
ES 2.3 introduced its own [Reindex API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html)
|
||||||
|
so we're going to remove our custom solution and ask you to use the native reindexer.
|
||||||
|
|
||||||
|
The `ReindexService` is available via `client.Reindex()` (which used to point
|
||||||
|
to the custom reindexer).
|
||||||
|
|
||||||
|
## Delete By Query back in core
|
||||||
|
|
||||||
|
The [Delete By Query API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html)
|
||||||
|
was moved into a plugin in 2.0. Now its back in core with a complete rewrite based on the Bulk API.
|
||||||
|
|
||||||
|
It has it's own endpoint at `/_delete_by_query`.
|
||||||
|
|
||||||
|
Delete By Query, Reindex, and Update By Query are very similar under the hood.
|
||||||
|
|
||||||
|
## Reindex, Delete By Query, and Update By Query response changed
|
||||||
|
|
||||||
|
The response from the above APIs changed a bit. E.g. the `retries` value
|
||||||
|
used to be an `int64` and returns separate values for `bulk` and `search` now:
|
||||||
|
|
||||||
|
```
|
||||||
|
// Old
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"retries": 123,
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
// New
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"retries": {
|
||||||
|
"bulk": 123,
|
||||||
|
"search": 0
|
||||||
|
},
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## ScanService removed
|
||||||
|
|
||||||
|
The `ScanService` is removed. Use the (new) `ScrollService` instead.
|
||||||
|
|
||||||
|
## New ScrollService
|
||||||
|
|
||||||
|
There was confusion around `ScanService` and `ScrollService` doing basically
|
||||||
|
the same. One was returning slices and didn't support all query details, the
|
||||||
|
other returned one document after another and wasn't safe for concurrent use.
|
||||||
|
So we merged the two and merged it into a new `ScrollService` that
|
||||||
|
removes all the problems with the older services.
|
||||||
|
|
||||||
|
In other words:
|
||||||
|
If you used `ScanService`, switch to `ScrollService`.
|
||||||
|
If you used the old `ScrollService`, you might need to fix some things but
|
||||||
|
overall it should just work.
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
- We replaced `elastic.EOS` with `io.EOF` to indicate the "end of scroll".
|
||||||
|
|
||||||
|
TODO Not implemented yet
|
||||||
|
|
||||||
|
## Suggesters
|
||||||
|
|
||||||
|
They have been [completely rewritten in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html).
|
||||||
|
|
||||||
|
Some changes:
|
||||||
|
- Suggesters no longer have an [output](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html#_simpler_completion_indexing).
|
||||||
|
|
||||||
|
TODO Fix all structural changes in suggesters
|
||||||
|
|
||||||
|
## Percolator
|
||||||
|
|
||||||
|
Percolator has [changed considerably](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_percolator.html).
|
||||||
|
|
||||||
|
Elastic 5.0 adds the new
|
||||||
|
[Percolator Query](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html)
|
||||||
|
which can be used in combination with the new
|
||||||
|
[Percolator type](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/percolator.html).
|
||||||
|
|
||||||
|
The Percolate service is removed from Elastic 5.0.
|
||||||
|
|
||||||
|
## Remove Consistency, add WaitForActiveShards
|
||||||
|
|
||||||
|
The `consistency` parameter has been removed in a lot of places, e.g. the Bulk,
|
||||||
|
Index, Delete, Delete-by-Query, Reindex, Update, and Update-by-Query API.
|
||||||
|
|
||||||
|
It has been replaced by a somewhat similar `wait_for_active_shards` parameter.
|
||||||
|
See https://github.com/elastic/elasticsearch/pull/19454.
|
18
vendor/github.com/olivere/elastic/v7/CHANGELOG-6.0.md
generated
vendored
Normal file
18
vendor/github.com/olivere/elastic/v7/CHANGELOG-6.0.md
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
# Changes from 5.0 to 6.0
|
||||||
|
|
||||||
|
See [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-6.0.html).
|
||||||
|
|
||||||
|
## _all removed
|
||||||
|
|
||||||
|
6.0 has removed support for the `_all` field.
|
||||||
|
|
||||||
|
## Boolean values coerced
|
||||||
|
|
||||||
|
Only use `true` or `false` for boolean values, not `0` or `1` or `on` or `off`.
|
||||||
|
|
||||||
|
## Single Type Indices
|
||||||
|
|
||||||
|
Notice that 6.0 and future versions will default to single type indices, i.e. you may not use multiple types when e.g. adding an index with a mapping.
|
||||||
|
|
||||||
|
See [here for details](https://www.elastic.co/guide/en/elasticsearch/reference/6.7/removal-of-types.html#_what_are_mapping_types).
|
||||||
|
|
55
vendor/github.com/olivere/elastic/v7/CHANGELOG-7.0.md
generated
vendored
Normal file
55
vendor/github.com/olivere/elastic/v7/CHANGELOG-7.0.md
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
# Changes from 6.0 to 7.0
|
||||||
|
|
||||||
|
See [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/7.x/breaking-changes-7.0.html).
|
||||||
|
|
||||||
|
## SearchHit.Source changed from `*json.RawMessage` to `json.RawMessage`
|
||||||
|
|
||||||
|
The `SearchHit` structure changed from
|
||||||
|
|
||||||
|
```
|
||||||
|
// SearchHit is a single hit.
|
||||||
|
type SearchHit struct {
|
||||||
|
...
|
||||||
|
Source *json.RawMessage `json:"_source,omitempty"` // stored document source
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
to
|
||||||
|
|
||||||
|
```
|
||||||
|
// SearchHit is a single hit.
|
||||||
|
type SearchHit struct {
|
||||||
|
...
|
||||||
|
Source json.RawMessage `json:"_source,omitempty"` // stored document source
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
As `json.RawMessage` is a `[]byte`, there is no need to specify it
|
||||||
|
as `*json.RawMessage` as `json.RawMessage` is perfectly ok to represent
|
||||||
|
a `nil` value.
|
||||||
|
|
||||||
|
So when deserializing the search hits, you need to change your code from:
|
||||||
|
|
||||||
|
```
|
||||||
|
for _, hit := range searchResult.Hits.Hits {
|
||||||
|
var doc Doc
|
||||||
|
err := json.Unmarshal(*hit.Source, &doc) // notice the * here
|
||||||
|
if err != nil {
|
||||||
|
// Deserialization failed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
to
|
||||||
|
|
||||||
|
```
|
||||||
|
for _, hit := range searchResult.Hits.Hits {
|
||||||
|
var doc Doc
|
||||||
|
err := json.Unmarshal(hit.Source, &doc) // it's missing here
|
||||||
|
if err != nil {
|
||||||
|
// Deserialization failed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
46
vendor/github.com/olivere/elastic/v7/CODE_OF_CONDUCT.md
generated
vendored
Normal file
46
vendor/github.com/olivere/elastic/v7/CODE_OF_CONDUCT.md
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to creating a positive environment include:
|
||||||
|
|
||||||
|
* Using welcoming and inclusive language
|
||||||
|
* Being respectful of differing viewpoints and experiences
|
||||||
|
* Gracefully accepting constructive criticism
|
||||||
|
* Focusing on what is best for the community
|
||||||
|
* Showing empathy towards other community members
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||||
|
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||||
|
|
||||||
|
## Our Responsibilities
|
||||||
|
|
||||||
|
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at oliver@eilhard.net. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||||
|
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||||
|
|
||||||
|
[homepage]: http://contributor-covenant.org
|
||||||
|
[version]: http://contributor-covenant.org/version/1/4/
|
40
vendor/github.com/olivere/elastic/v7/CONTRIBUTING.md
generated
vendored
Normal file
40
vendor/github.com/olivere/elastic/v7/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
# How to contribute
|
||||||
|
|
||||||
|
Elastic is an open-source project and we are looking forward to each
|
||||||
|
contribution.
|
||||||
|
|
||||||
|
Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level
|
||||||
|
overview of the features of Elasticsearch. However, Elastic tries to resemble
|
||||||
|
the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch).
|
||||||
|
|
||||||
|
This explains why you might think that some options are strange or missing
|
||||||
|
in Elastic, while often they're just different. Please check the Java API first.
|
||||||
|
|
||||||
|
Having said that: Elasticsearch is moving fast and it might be very likely
|
||||||
|
that we missed some features or changes. Feel free to change that.
|
||||||
|
|
||||||
|
## Your Pull Request
|
||||||
|
|
||||||
|
To make it easy to review and understand your changes, please keep the
|
||||||
|
following things in mind before submitting your pull request:
|
||||||
|
|
||||||
|
* You compared the existing implementation with the Java API, did you?
|
||||||
|
* Please work on the latest possible state of `olivere/elastic`.
|
||||||
|
Use `release-branch.v2` for targeting Elasticsearch 1.x and
|
||||||
|
`release-branch.v3` for targeting 2.x.
|
||||||
|
* Create a branch dedicated to your change.
|
||||||
|
* If possible, write a test case which confirms your change.
|
||||||
|
* Make sure your changes and your tests work with all recent versions of
|
||||||
|
Elasticsearch. We currently support Elasticsearch 1.7.x in the
|
||||||
|
release-branch.v2 and Elasticsearch 2.x in the release-branch.v3.
|
||||||
|
* Test your changes before creating a pull request (`go test ./...`).
|
||||||
|
* Don't mix several features or bug fixes in one pull request.
|
||||||
|
* Create a meaningful commit message.
|
||||||
|
* Explain your change, e.g. provide a link to the issue you are fixing and
|
||||||
|
probably a link to the Elasticsearch documentation and/or source code.
|
||||||
|
* Format your source with `go fmt`.
|
||||||
|
|
||||||
|
## Additional Resources
|
||||||
|
|
||||||
|
* [GitHub documentation](https://help.github.com/)
|
||||||
|
* [GitHub pull request documentation](https://help.github.com/en/articles/creating-a-pull-request)
|
193
vendor/github.com/olivere/elastic/v7/CONTRIBUTORS
generated
vendored
Normal file
193
vendor/github.com/olivere/elastic/v7/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,193 @@
|
||||||
|
# This is a list of people who have contributed code
|
||||||
|
# to the Elastic repository.
|
||||||
|
#
|
||||||
|
# It is just my small "thank you" to all those that helped
|
||||||
|
# making Elastic what it is.
|
||||||
|
#
|
||||||
|
# Please keep this list sorted.
|
||||||
|
|
||||||
|
0x6875790d0a [@huydx](https://github.com/huydx)
|
||||||
|
Aaron Tami [@aarontami](https://github.com/aarontami)
|
||||||
|
Adam Alix [@adamalix](https://github.com/adamalix)
|
||||||
|
Adam Weiner [@adamweiner](https://github.com/adamweiner)
|
||||||
|
Adrian Lungu [@AdrianLungu](https://github.com/AdrianLungu)
|
||||||
|
alehano [@alehano](https://github.com/alehano)
|
||||||
|
Alejandro Carstens [@alejandro-carstens](https://github.com/alejandro-carstens)
|
||||||
|
Alex [@akotlar](https://github.com/akotlar)
|
||||||
|
Alexander Sack [@asac](https://github.com/asac)
|
||||||
|
Alexandre Olivier [@aliphen](https://github.com/aliphen)
|
||||||
|
Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
|
||||||
|
Anders [@ANerd](https://github.com/ANerd)
|
||||||
|
AndreKR [@AndreKR](https://github.com/AndreKR)
|
||||||
|
André Bierlein [@ligustah](https://github.com/ligustah)
|
||||||
|
Andrew Dunham [@andrew-d](https://github.com/andrew-d)
|
||||||
|
Andrew Gaul [@andrewgaul](https://github.com/andrewgaul)
|
||||||
|
Andy Walker [@alaska](https://github.com/alaska)
|
||||||
|
Arpit Agarwal [@arpiagar](https://github.com/arpiagar)
|
||||||
|
Arquivei [@arquivei](https://github.com/arquivei)
|
||||||
|
Artemiy Elozhenko [@artezh](https://github.com/artezh)
|
||||||
|
arthurgustin [@arthurgustin](https://github.com/arthurgustin)
|
||||||
|
Bas van Dijk [@basvandijk](https://github.com/basvandijk)
|
||||||
|
Benjamin Fernandes [@LotharSee](https://github.com/LotharSee)
|
||||||
|
Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux)
|
||||||
|
Björn Gerdau [@kernle32dll](https://github.com/kernle32dll)
|
||||||
|
Boris Popovschi [@Zyqsempai](https://github.com/Zyqsempai)
|
||||||
|
Bowei Xu [@vancexu](https://github.com/vancexu)
|
||||||
|
Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va)
|
||||||
|
Brady Love [@bradylove](https://github.com/bradylove)
|
||||||
|
Bryan Conklin [@bmconklin](https://github.com/bmconklin)
|
||||||
|
Bruce Zhou [@brucez-isell](https://github.com/brucez-isell)
|
||||||
|
Carl Dunham [@carldunham](https://github.com/carldunham)
|
||||||
|
Carl Johan Gustavsson [@cjgu](https://github.com/cjgu)
|
||||||
|
Cat [@cat-turner](https://github.com/cat-turner)
|
||||||
|
César Jiménez [@cesarjimenez](https://github.com/cesarjimenez)
|
||||||
|
cforbes [@cforbes](https://github.com/cforbes)
|
||||||
|
張泰瑋(Chang Tai Wei) [@david30907d](https://github.com/david30907d)
|
||||||
|
cheshire [@NikitaSerenko](https://github.com/NikitaSerenko)
|
||||||
|
Chris M [@tebriel](https://github.com/tebriel)
|
||||||
|
Chris Rice [@donutmonger](https://github.com/donutmonger)
|
||||||
|
Claudiu Olteanu [@claudiuolteanu](https://github.com/claudiuolteanu)
|
||||||
|
Chris Duncan [@veqryn](https://github.com/veqryn)
|
||||||
|
Chris Ludden [@cludden](https://github.com/cludden)
|
||||||
|
Christophe Courtaut [@kri5](https://github.com/kri5)
|
||||||
|
cmitchell [@cmitchell](https://github.com/cmitchell)
|
||||||
|
Connor Peet [@connor4312](https://github.com/connor4312)
|
||||||
|
Conrad Pankoff [@deoxxa](https://github.com/deoxxa)
|
||||||
|
Corey Scott [@corsc](https://github.com/corsc)
|
||||||
|
Chris Petersen [@ex-nerd](https://github.com/ex-nerd)
|
||||||
|
Daniel Barrett [@shendaras](https://github.com/shendaras)
|
||||||
|
Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath)
|
||||||
|
Daniel Imfeld [@dimfeld](https://github.com/dimfeld)
|
||||||
|
Daniel Santos [@danlsgiga](https://github.com/danlsgiga)
|
||||||
|
David Emanuel Buchmann [@wuurrd](https://github.com/wuurrd)
|
||||||
|
diacone [@diacone](https://github.com/diacone)
|
||||||
|
Diego Becciolini [@itizir](https://github.com/itizir)
|
||||||
|
Dwayne Schultz [@myshkin5](https://github.com/myshkin5)
|
||||||
|
Elliot Williams [@elliotwms](https://github.com/elliotwms)
|
||||||
|
Ellison Leão [@ellisonleao](https://github.com/ellisonleao)
|
||||||
|
Emil Gedda [@EmilGedda](https://github.com/EmilGedda)
|
||||||
|
Erik Grinaker [@erikgrinaker](https://github.com/erikgrinaker)
|
||||||
|
Erwin [@eticzon](https://github.com/eticzon)
|
||||||
|
Etienne Lafarge [@elafarge](https://github.com/elafarge)
|
||||||
|
Eugene Egorov [@EugeneEgorov](https://github.com/EugeneEgorov)
|
||||||
|
Evan Shaw [@edsrzf](https://github.com/edsrzf)
|
||||||
|
Fanfan [@wenpos](https://github.com/wenpos)
|
||||||
|
Faolan C-P [@fcheslack](https://github.com/fcheslack)
|
||||||
|
Filip Tepper [@filiptepper](https://github.com/filiptepper)
|
||||||
|
Garrett Kelley [@GarrettKelley](https://github.com/GarrettKelley)
|
||||||
|
Gaspard Douady [@plopik](https://github.com/plopik)
|
||||||
|
Gaylord Aulke [@blafasel42](https://github.com/blafasel42)
|
||||||
|
Gerhard Häring [@ghaering](https://github.com/ghaering)
|
||||||
|
gregoryfranklin [@gregoryfranklin](https://github.com/gregoryfranklin)
|
||||||
|
Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos)
|
||||||
|
Guillaume J. Charmes [@creack](https://github.com/creack)
|
||||||
|
Guiseppe [@gm42](https://github.com/gm42)
|
||||||
|
Han Yu [@MoonighT](https://github.com/MoonighT)
|
||||||
|
Harmen [@alicebob](https://github.com/alicebob)
|
||||||
|
Harrison Wright [@wright8191](https://github.com/wright8191)
|
||||||
|
Henry Clifford [@hcliff](https://github.com/hcliff)
|
||||||
|
Henry Stern [@hstern](https://github.com/hstern)
|
||||||
|
Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy)
|
||||||
|
initialcontext [@initialcontext](https://github.com/initialcontext)
|
||||||
|
Isaac Saldana [@isaldana](https://github.com/isaldana)
|
||||||
|
J Barkey Wolf [@jjhbw](https://github.com/jjhbw)
|
||||||
|
Jack Lindamood [@cep21](https://github.com/cep21)
|
||||||
|
Jacob [@jdelgad](https://github.com/jdelgad)
|
||||||
|
Jan Düpmeier [@jduepmeier](https://github.com/jduepmeier)
|
||||||
|
Jayme Rotsaert [@jrots](https://github.com/jrots)
|
||||||
|
Jean-Alexandre Beaumont [@Enteris](https://github.com/Enteris)
|
||||||
|
Jean-François Roche [@jfroche](https://github.com/jfroche)
|
||||||
|
Jeff Rand [@jeffrand](https://github.com/jeffrand)
|
||||||
|
Jeremy Canady [@jrmycanady](https://github.com/jrmycanady)
|
||||||
|
Jérémie Vexiau [@texvex](https://github.com/texvex)
|
||||||
|
Jesper Bränn [@Yopi](https://github.com/Yopi)
|
||||||
|
Jim Berlage [@jimberlage](https://github.com/jimberlage)
|
||||||
|
Joe Buck [@four2five](https://github.com/four2five)
|
||||||
|
John Barker [@j16r](https://github.com/j16r)
|
||||||
|
John Goodall [@jgoodall](https://github.com/jgoodall)
|
||||||
|
John Stanford [@jxstanford](https://github.com/jxstanford)
|
||||||
|
Jonas Groenaas Drange [@semafor](https://github.com/semafor)
|
||||||
|
Josef Fröhle [@Dexus](https://github.com/Dexus)
|
||||||
|
José Martínez [@xose](https://github.com/xose)
|
||||||
|
Josh Chorlton [@jchorl](https://github.com/jchorl)
|
||||||
|
Jpnock [@Jpnock](https://github.com/Jpnock)
|
||||||
|
jun [@coseyo](https://github.com/coseyo)
|
||||||
|
Junpei Tsuji [@jun06t](https://github.com/jun06t)
|
||||||
|
kartlee [@kartlee](https://github.com/kartlee)
|
||||||
|
Keith Hatton [@khatton-ft](https://github.com/khatton-ft)
|
||||||
|
kel [@liketic](https://github.com/liketic)
|
||||||
|
Kenta SUZUKI [@suzuken](https://github.com/suzuken)
|
||||||
|
Kevin Mulvey [@kmulvey](https://github.com/kmulvey)
|
||||||
|
Kyle Brandt [@kylebrandt](https://github.com/kylebrandt)
|
||||||
|
Larry Cinnabar [@larrycinnabar](https://github.com/larrycinnabar)
|
||||||
|
Leandro Piccilli [@lpic10](https://github.com/lpic10)
|
||||||
|
Lee [@leezhm](https://github.com/leezhm)
|
||||||
|
lechnertech [@lechnertech](https://github.com/lechnertech)
|
||||||
|
M. Zulfa Achsani [@misterciput](https://github.com/misterciput)
|
||||||
|
Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh)
|
||||||
|
Mara Kim [@autochthe](https://github.com/autochthe)
|
||||||
|
Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato)
|
||||||
|
Mark Costello [@mcos](https://github.com/mcos)
|
||||||
|
Martin Häger [@protomouse](https://github.com/protomouse)
|
||||||
|
Matt Braymer-Hayes [@mattayes](https://github.com/mattayes)
|
||||||
|
Medhi Bechina [@mdzor](https://github.com/mdzor)
|
||||||
|
Mike Beshai [@mbesh](https://github.com/mbesh)
|
||||||
|
mmfrb [@mmfrb](https://github.com/mmfrb)
|
||||||
|
mnpritula [@mnpritula](https://github.com/mnpritula)
|
||||||
|
mosa [@mosasiru](https://github.com/mosasiru)
|
||||||
|
Muhammet Çakır [@cakirmuha](https://github.com/cakirmuha)
|
||||||
|
naimulhaider [@naimulhaider](https://github.com/naimulhaider)
|
||||||
|
Naoya Yoshizawa [@azihsoyn](https://github.com/azihsoyn)
|
||||||
|
navins [@ishare](https://github.com/ishare)
|
||||||
|
Naoya Tsutsumi [@tutuming](https://github.com/tutuming)
|
||||||
|
Nathan Lacey [@nlacey](https://github.com/nlacey)
|
||||||
|
NeoCN [@NeoCN](https://github.com/NeoCN)
|
||||||
|
Nicholas Wolff [@nwolff](https://github.com/nwolff)
|
||||||
|
Nick K [@utrack](https://github.com/utrack)
|
||||||
|
Nick Whyte [@nickw444](https://github.com/nickw444)
|
||||||
|
Nicolae Vartolomei [@nvartolomei](https://github.com/nvartolomei)
|
||||||
|
okhowang [@okhowang](https://github.com/okhowang)
|
||||||
|
Orne Brocaar [@brocaar](https://github.com/brocaar)
|
||||||
|
Paul [@eyeamera](https://github.com/eyeamera)
|
||||||
|
Paul Oldenburg [@lr-paul](https://github.com/lr-paul)
|
||||||
|
Pete C [@peteclark-ft](https://github.com/peteclark-ft)
|
||||||
|
Peter Nagy [@nagypeterjob](https://github.com/nagypeterjob)
|
||||||
|
Paolo [@ppiccolo](https://github.com/ppiccolo)
|
||||||
|
Igor Panychek [@panychek](https://github.com/panychek)
|
||||||
|
Radoslaw Wesolowski [@r--w](https://github.com/r--w)
|
||||||
|
Rafał Gałus [@rgalus](https://github.com/rgalus)
|
||||||
|
rchicoli [@rchicoli](https://github.com/rchicoli)
|
||||||
|
Roman Colohanin [@zuzmic](https://github.com/zuzmic)
|
||||||
|
Ryan Schmukler [@rschmukler](https://github.com/rschmukler)
|
||||||
|
Ryan Wynn [@rwynn](https://github.com/rwynn)
|
||||||
|
Sacheendra talluri [@sacheendra](https://github.com/sacheendra)
|
||||||
|
Sean DuBois [@Sean-Der](https://github.com/Sean-Der)
|
||||||
|
Sagan Yaroslav [@sgnrslv](https://github.com/sgnrslv)
|
||||||
|
Shalin LK [@shalinlk](https://github.com/shalinlk)
|
||||||
|
Simon Schneider [@raynigon](https://github.com/raynigon)
|
||||||
|
singham [@zhaochenxiao90](https://github.com/zhaochenxiao90)
|
||||||
|
Slawomir CALUCH [@slawo](https://github.com/slawo)
|
||||||
|
soarpenguin [@soarpenguin](https://github.com/soarpenguin)
|
||||||
|
Stephan Krynauw [@skrynauw](https://github.com/skrynauw)
|
||||||
|
Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic)
|
||||||
|
Stuart Warren [@Woz](https://github.com/stuart-warren)
|
||||||
|
Sulaiman [@salajlan](https://github.com/salajlan)
|
||||||
|
Sundar [@sundarv85](https://github.com/sundarv85)
|
||||||
|
Swarlston [@Swarlston](https://github.com/Swarlston)
|
||||||
|
Take [ww24](https://github.com/ww24)
|
||||||
|
Tetsuya Morimoto [@t2y](https://github.com/t2y)
|
||||||
|
TheZeroSlave [@TheZeroSlave](https://github.com/TheZeroSlave)
|
||||||
|
Tomasz Elendt [@telendt](https://github.com/telendt)
|
||||||
|
TimeEmit [@TimeEmit](https://github.com/timeemit)
|
||||||
|
TusharM [@tusharm](https://github.com/tusharm)
|
||||||
|
wangtuo [@wangtuo](https://github.com/wangtuo)
|
||||||
|
Wédney Yuri [@wedneyyuri](https://github.com/wedneyyuri)
|
||||||
|
Wesley Kim [@wesleyk](https://github.com/wesleyk)
|
||||||
|
wolfkdy [@wolfkdy](https://github.com/wolfkdy)
|
||||||
|
Wyndham Blanton [@wyndhblb](https://github.com/wyndhblb)
|
||||||
|
Yarden Bar [@ayashjorden](https://github.com/ayashjorden)
|
||||||
|
Yuya Kusakabe [@higebu](https://github.com/higebu)
|
||||||
|
zakthomas [@zakthomas](https://github.com/zakthomas)
|
||||||
|
Zach [@snowzach](https://github.com/snowzach)
|
||||||
|
zhangxin [@visaxin](https://github.com/visaxin)
|
||||||
|
@林 [@zplzpl](https://github.com/zplzpl)
|
19
vendor/github.com/olivere/elastic/v7/ISSUE_TEMPLATE.md
generated
vendored
Normal file
19
vendor/github.com/olivere/elastic/v7/ISSUE_TEMPLATE.md
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
Please use the following questions as a guideline to help me answer
|
||||||
|
your issue/question without further inquiry. Thank you.
|
||||||
|
|
||||||
|
### Which version of Elastic are you using?
|
||||||
|
|
||||||
|
[ ] elastic.v7 (for Elasticsearch 7.x)
|
||||||
|
[ ] elastic.v6 (for Elasticsearch 6.x)
|
||||||
|
[ ] elastic.v5 (for Elasticsearch 5.x)
|
||||||
|
[ ] elastic.v3 (for Elasticsearch 2.x)
|
||||||
|
[ ] elastic.v2 (for Elasticsearch 1.x)
|
||||||
|
|
||||||
|
### Please describe the expected behavior
|
||||||
|
|
||||||
|
|
||||||
|
### Please describe the actual behavior
|
||||||
|
|
||||||
|
|
||||||
|
### Any steps to reproduce the behavior?
|
||||||
|
|
20
vendor/github.com/olivere/elastic/v7/LICENSE
generated
vendored
Normal file
20
vendor/github.com/olivere/elastic/v7/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright © 2012-2015 Oliver Eilhard
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the “Software”), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included
|
||||||
|
in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||||
|
IN THE SOFTWARE.
|
432
vendor/github.com/olivere/elastic/v7/README.md
generated
vendored
Normal file
432
vendor/github.com/olivere/elastic/v7/README.md
generated
vendored
Normal file
|
@ -0,0 +1,432 @@
|
||||||
|
# Elastic
|
||||||
|
|
||||||
|
**This is a development branch that is actively being worked on. DO NOT USE IN PRODUCTION! If you want to use stable versions of Elastic, please use Go modules for the 7.x release (or later) or a dependency manager like [dep](https://github.com/golang/dep) for earlier releases.**
|
||||||
|
|
||||||
|
Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
|
||||||
|
[Go](http://www.golang.org/) programming language.
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v6)](https://travis-ci.org/olivere/elastic)
|
||||||
|
[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/github.com/olivere/elastic)
|
||||||
|
[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
|
||||||
|
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Folivere%2Felastic.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Folivere%2Felastic?ref=badge_shield)
|
||||||
|
|
||||||
|
See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
|
||||||
|
|
||||||
|
<a href="https://www.buymeacoffee.com/Bjd96U8fm" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 41px !important;width: 174px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a>
|
||||||
|
|
||||||
|
## Releases
|
||||||
|
|
||||||
|
**The release branches (e.g. [`release-branch.v7`](https://github.com/olivere/elastic/tree/release-branch.v7))
|
||||||
|
are actively being worked on and can break at any time.
|
||||||
|
If you want to use stable versions of Elastic, please use Go modules.**
|
||||||
|
|
||||||
|
Here's the version matrix:
|
||||||
|
|
||||||
|
Elasticsearch version | Elastic version | Package URL | Remarks |
|
||||||
|
----------------------|------------------|-------------|---------|
|
||||||
|
7.x | 7.0 | [`github.com/olivere/elastic/v7`](https://github.com/olivere/elastic) ([source](https://github.com/olivere/elastic/tree/release-branch.v7) [doc](http://godoc.org/github.com/olivere/elastic)) | Use Go modules.
|
||||||
|
6.x | 6.0 | [`github.com/olivere/elastic`](https://github.com/olivere/elastic) ([source](https://github.com/olivere/elastic/tree/release-branch.v6) [doc](http://godoc.org/github.com/olivere/elastic)) | Use a dependency manager (see below).
|
||||||
|
5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5)) | Actively maintained.
|
||||||
|
2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) | Deprecated. Please update.
|
||||||
|
1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) | Deprecated. Please update.
|
||||||
|
0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) | Deprecated. Please update.
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
You have installed Elasticsearch 7.0.0 and want to use Elastic.
|
||||||
|
As listed above, you should use Elastic 7.0 (code is in `release-branch.v7`).
|
||||||
|
|
||||||
|
To use the required version of Elastic in your application, you
|
||||||
|
should use [Go modules](https://github.com/golang/go/wiki/Modules)
|
||||||
|
to manage dependencies. Make sure to use a version such as `7.0.0` or later.
|
||||||
|
|
||||||
|
To use Elastic, import:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/olivere/elastic/v7"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Elastic 7.0
|
||||||
|
|
||||||
|
Elastic 7.0 targets Elasticsearch 7.x which [was released on April 10th 2019](https://www.elastic.co/guide/en/elasticsearch/reference/7.0/release-notes-7.0.0.html).
|
||||||
|
|
||||||
|
As always with major version, there are a lot of [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/7.0/release-notes-7.0.0.html#breaking-7.0.0).
|
||||||
|
We will use this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v7/CHANGELOG-7.0.md),
|
||||||
|
as we already did in earlier (major) releases.
|
||||||
|
|
||||||
|
### Elastic 6.0
|
||||||
|
|
||||||
|
Elastic 6.0 targets Elasticsearch 6.x which was [released on 14th November 2017](https://www.elastic.co/blog/elasticsearch-6-0-0-released).
|
||||||
|
|
||||||
|
Notice that there are a lot of [breaking changes in Elasticsearch 6.0](https://www.elastic.co/guide/en/elasticsearch/reference/6.7/breaking-changes-6.0.html)
|
||||||
|
and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v6/CHANGELOG-6.0.md)
|
||||||
|
as we did in the transition from earlier versions of Elastic.
|
||||||
|
|
||||||
|
### Elastic 5.0
|
||||||
|
|
||||||
|
Elastic 5.0 targets Elasticsearch 5.0.0 and later. Elasticsearch 5.0.0 was
|
||||||
|
[released on 26th October 2016](https://www.elastic.co/blog/elasticsearch-5-0-0-released).
|
||||||
|
|
||||||
|
Notice that there are will be a lot of [breaking changes in Elasticsearch 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking-changes-5.0.html)
|
||||||
|
and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v5/CHANGELOG-5.0.md)
|
||||||
|
as we did in the transition from Elastic 2.0 (for Elasticsearch 1.x) to Elastic 3.0 (for Elasticsearch 2.x).
|
||||||
|
|
||||||
|
Furthermore, the jump in version numbers will give us a chance to be in sync with the Elastic Stack.
|
||||||
|
|
||||||
|
### Elastic 3.0
|
||||||
|
|
||||||
|
Elastic 3.0 targets Elasticsearch 2.x and is published via [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3).
|
||||||
|
|
||||||
|
Elastic 3.0 will only get critical bug fixes. You should update to a recent version.
|
||||||
|
|
||||||
|
### Elastic 2.0
|
||||||
|
|
||||||
|
Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2).
|
||||||
|
|
||||||
|
Elastic 2.0 will only get critical bug fixes. You should update to a recent version.
|
||||||
|
|
||||||
|
### Elastic 1.0
|
||||||
|
|
||||||
|
Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic
|
||||||
|
to a recent version.
|
||||||
|
|
||||||
|
However, if you cannot update for some reason, don't worry. Version 1.0 is
|
||||||
|
still available. All you need to do is go-get it and change your import path
|
||||||
|
as described above.
|
||||||
|
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
We use Elastic in production since 2012. Elastic is stable but the API changes
|
||||||
|
now and then. We strive for API compatibility.
|
||||||
|
However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html)
|
||||||
|
and we sometimes have to adapt.
|
||||||
|
|
||||||
|
Having said that, there have been no big API changes that required you
|
||||||
|
to rewrite your application big time. More often than not it's renaming APIs
|
||||||
|
and adding/removing features so that Elastic is in sync with Elasticsearch.
|
||||||
|
|
||||||
|
Elastic has been used in production starting with Elasticsearch 0.90 up to recent 7.x
|
||||||
|
versions. Furthermore, we use [Travis CI](https://travis-ci.org/)
|
||||||
|
to test Elastic with the most recent versions of Elasticsearch and Go.
|
||||||
|
See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml)
|
||||||
|
file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic)
|
||||||
|
for the results.
|
||||||
|
|
||||||
|
Elasticsearch has quite a few features. Most of them are implemented
|
||||||
|
by Elastic. I add features and APIs as required. It's straightforward
|
||||||
|
to implement missing pieces. I'm accepting pull requests :-)
|
||||||
|
|
||||||
|
Having said that, I hope you find the project useful.
|
||||||
|
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go).
|
||||||
|
The client connects to Elasticsearch on `http://127.0.0.1:9200` by default.
|
||||||
|
|
||||||
|
You typically create one client for your app. Here's a complete example of
|
||||||
|
creating a client, creating an index, adding a document, executing a search etc.
|
||||||
|
|
||||||
|
An example is available [here](https://olivere.github.io/elastic/).
|
||||||
|
|
||||||
|
Here's a [link to a complete working example for v6](https://gist.github.com/olivere/e4a376b4783c0914e44ea4f745ce2ebf).
|
||||||
|
|
||||||
|
Here are a few tips on how to get used to Elastic:
|
||||||
|
|
||||||
|
1. Head over to the [Wiki](https://github.com/olivere/elastic/wiki) for detailed information and
|
||||||
|
topics like e.g. [how to add a middleware](https://github.com/olivere/elastic/wiki/HttpTransport)
|
||||||
|
or how to [connect to AWS](https://github.com/olivere/elastic/wiki/Using-with-AWS-Elasticsearch-Service).
|
||||||
|
2. If you are unsure how to implement something, read the tests (all `_test.go` files).
|
||||||
|
They not only serve as a guard against changes, but also as a reference.
|
||||||
|
3. The [recipes](https://github.com/olivere/elastic/tree/release-branch.v6/recipes)
|
||||||
|
contains small examples on how to implement something, e.g. bulk indexing, scrolling etc.
|
||||||
|
|
||||||
|
|
||||||
|
## API Status
|
||||||
|
|
||||||
|
### Document APIs
|
||||||
|
|
||||||
|
- [x] Index API
|
||||||
|
- [x] Get API
|
||||||
|
- [x] Delete API
|
||||||
|
- [x] Delete By Query API
|
||||||
|
- [x] Update API
|
||||||
|
- [x] Update By Query API
|
||||||
|
- [x] Multi Get API
|
||||||
|
- [x] Bulk API
|
||||||
|
- [x] Reindex API
|
||||||
|
- [x] Term Vectors
|
||||||
|
- [x] Multi termvectors API
|
||||||
|
|
||||||
|
### Search APIs
|
||||||
|
|
||||||
|
- [x] Search
|
||||||
|
- [x] Search Template
|
||||||
|
- [ ] Multi Search Template
|
||||||
|
- [x] Search Shards API
|
||||||
|
- [x] Suggesters
|
||||||
|
- [x] Term Suggester
|
||||||
|
- [x] Phrase Suggester
|
||||||
|
- [x] Completion Suggester
|
||||||
|
- [x] Context Suggester
|
||||||
|
- [x] Multi Search API
|
||||||
|
- [x] Count API
|
||||||
|
- [x] Validate API
|
||||||
|
- [x] Explain API
|
||||||
|
- [x] Profile API
|
||||||
|
- [x] Field Capabilities API
|
||||||
|
|
||||||
|
### Aggregations
|
||||||
|
|
||||||
|
- Metrics Aggregations
|
||||||
|
- [x] Avg
|
||||||
|
- [x] Cardinality
|
||||||
|
- [x] Extended Stats
|
||||||
|
- [x] Geo Bounds
|
||||||
|
- [x] Geo Centroid
|
||||||
|
- [x] Max
|
||||||
|
- [x] Min
|
||||||
|
- [x] Percentiles
|
||||||
|
- [x] Percentile Ranks
|
||||||
|
- [ ] Scripted Metric
|
||||||
|
- [x] Stats
|
||||||
|
- [x] Sum
|
||||||
|
- [x] Top Hits
|
||||||
|
- [x] Value Count
|
||||||
|
- Bucket Aggregations
|
||||||
|
- [x] Adjacency Matrix
|
||||||
|
- [x] Children
|
||||||
|
- [x] Auto-interval Date Histogram
|
||||||
|
- [x] Date Histogram
|
||||||
|
- [x] Date Range
|
||||||
|
- [x] Diversified Sampler
|
||||||
|
- [x] Filter
|
||||||
|
- [x] Filters
|
||||||
|
- [x] Geo Distance
|
||||||
|
- [ ] GeoHash Grid
|
||||||
|
- [x] Global
|
||||||
|
- [x] Histogram
|
||||||
|
- [x] IP Range
|
||||||
|
- [x] Missing
|
||||||
|
- [x] Nested
|
||||||
|
- [x] Range
|
||||||
|
- [x] Reverse Nested
|
||||||
|
- [x] Sampler
|
||||||
|
- [x] Significant Terms
|
||||||
|
- [x] Significant Text
|
||||||
|
- [x] Terms
|
||||||
|
- [x] Composite
|
||||||
|
- Pipeline Aggregations
|
||||||
|
- [x] Avg Bucket
|
||||||
|
- [x] Derivative
|
||||||
|
- [x] Max Bucket
|
||||||
|
- [x] Min Bucket
|
||||||
|
- [x] Sum Bucket
|
||||||
|
- [x] Stats Bucket
|
||||||
|
- [ ] Extended Stats Bucket
|
||||||
|
- [x] Percentiles Bucket
|
||||||
|
- [x] Moving Average
|
||||||
|
- [x] Cumulative Sum
|
||||||
|
- [x] Bucket Script
|
||||||
|
- [x] Bucket Selector
|
||||||
|
- [x] Bucket Sort
|
||||||
|
- [x] Serial Differencing
|
||||||
|
- [x] Matrix Aggregations
|
||||||
|
- [x] Matrix Stats
|
||||||
|
- [x] Aggregation Metadata
|
||||||
|
|
||||||
|
### Indices APIs
|
||||||
|
|
||||||
|
- [x] Create Index
|
||||||
|
- [x] Delete Index
|
||||||
|
- [x] Get Index
|
||||||
|
- [x] Indices Exists
|
||||||
|
- [x] Open / Close Index
|
||||||
|
- [x] Shrink Index
|
||||||
|
- [x] Rollover Index
|
||||||
|
- [x] Put Mapping
|
||||||
|
- [x] Get Mapping
|
||||||
|
- [x] Get Field Mapping
|
||||||
|
- [x] Types Exists
|
||||||
|
- [x] Index Aliases
|
||||||
|
- [x] Update Indices Settings
|
||||||
|
- [x] Get Settings
|
||||||
|
- [x] Analyze
|
||||||
|
- [x] Explain Analyze
|
||||||
|
- [x] Index Templates
|
||||||
|
- [x] Indices Stats
|
||||||
|
- [x] Indices Segments
|
||||||
|
- [ ] Indices Recovery
|
||||||
|
- [ ] Indices Shard Stores
|
||||||
|
- [ ] Clear Cache
|
||||||
|
- [x] Flush
|
||||||
|
- [x] Synced Flush
|
||||||
|
- [x] Refresh
|
||||||
|
- [x] Force Merge
|
||||||
|
|
||||||
|
### Index Lifecycle Management APIs
|
||||||
|
|
||||||
|
- [x] Create Policy
|
||||||
|
- [x] Get Policy
|
||||||
|
- [x] Delete Policy
|
||||||
|
- [ ] Move to Step
|
||||||
|
- [ ] Remove Policy
|
||||||
|
- [ ] Retry Policy
|
||||||
|
- [ ] Get Ilm Status
|
||||||
|
- [ ] Explain Lifecycle
|
||||||
|
- [ ] Start Ilm
|
||||||
|
- [ ] Stop Ilm
|
||||||
|
|
||||||
|
### cat APIs
|
||||||
|
|
||||||
|
- [X] cat aliases
|
||||||
|
- [X] cat allocation
|
||||||
|
- [X] cat count
|
||||||
|
- [ ] cat fielddata
|
||||||
|
- [X] cat health
|
||||||
|
- [X] cat indices
|
||||||
|
- [ ] cat master
|
||||||
|
- [ ] cat nodeattrs
|
||||||
|
- [ ] cat nodes
|
||||||
|
- [ ] cat pending tasks
|
||||||
|
- [ ] cat plugins
|
||||||
|
- [ ] cat recovery
|
||||||
|
- [ ] cat repositories
|
||||||
|
- [ ] cat thread pool
|
||||||
|
- [ ] cat shards
|
||||||
|
- [ ] cat segments
|
||||||
|
- [ ] cat snapshots
|
||||||
|
- [ ] cat templates
|
||||||
|
|
||||||
|
### Cluster APIs
|
||||||
|
|
||||||
|
- [x] Cluster Health
|
||||||
|
- [x] Cluster State
|
||||||
|
- [x] Cluster Stats
|
||||||
|
- [ ] Pending Cluster Tasks
|
||||||
|
- [x] Cluster Reroute
|
||||||
|
- [ ] Cluster Update Settings
|
||||||
|
- [x] Nodes Stats
|
||||||
|
- [x] Nodes Info
|
||||||
|
- [ ] Nodes Feature Usage
|
||||||
|
- [ ] Remote Cluster Info
|
||||||
|
- [x] Task Management API
|
||||||
|
- [ ] Nodes hot_threads
|
||||||
|
- [ ] Cluster Allocation Explain API
|
||||||
|
|
||||||
|
### Query DSL
|
||||||
|
|
||||||
|
- [x] Match All Query
|
||||||
|
- [x] Inner hits
|
||||||
|
- Full text queries
|
||||||
|
- [x] Match Query
|
||||||
|
- [x] Match Phrase Query
|
||||||
|
- [x] Match Phrase Prefix Query
|
||||||
|
- [x] Multi Match Query
|
||||||
|
- [x] Common Terms Query
|
||||||
|
- [x] Query String Query
|
||||||
|
- [x] Simple Query String Query
|
||||||
|
- Term level queries
|
||||||
|
- [x] Term Query
|
||||||
|
- [x] Terms Query
|
||||||
|
- [x] Terms Set Query
|
||||||
|
- [x] Range Query
|
||||||
|
- [x] Exists Query
|
||||||
|
- [x] Prefix Query
|
||||||
|
- [x] Wildcard Query
|
||||||
|
- [x] Regexp Query
|
||||||
|
- [x] Fuzzy Query
|
||||||
|
- [x] Type Query
|
||||||
|
- [x] Ids Query
|
||||||
|
- Compound queries
|
||||||
|
- [x] Constant Score Query
|
||||||
|
- [x] Bool Query
|
||||||
|
- [x] Dis Max Query
|
||||||
|
- [x] Function Score Query
|
||||||
|
- [x] Boosting Query
|
||||||
|
- Joining queries
|
||||||
|
- [x] Nested Query
|
||||||
|
- [x] Has Child Query
|
||||||
|
- [x] Has Parent Query
|
||||||
|
- [x] Parent Id Query
|
||||||
|
- Geo queries
|
||||||
|
- [ ] GeoShape Query
|
||||||
|
- [x] Geo Bounding Box Query
|
||||||
|
- [x] Geo Distance Query
|
||||||
|
- [x] Geo Polygon Query
|
||||||
|
- Specialized queries
|
||||||
|
- [x] Distance Feature Query
|
||||||
|
- [x] More Like This Query
|
||||||
|
- [x] Script Query
|
||||||
|
- [x] Script Score Query
|
||||||
|
- [x] Percolate Query
|
||||||
|
- Span queries
|
||||||
|
- [ ] Span Term Query
|
||||||
|
- [ ] Span Multi Term Query
|
||||||
|
- [ ] Span First Query
|
||||||
|
- [ ] Span Near Query
|
||||||
|
- [ ] Span Or Query
|
||||||
|
- [ ] Span Not Query
|
||||||
|
- [ ] Span Containing Query
|
||||||
|
- [ ] Span Within Query
|
||||||
|
- [ ] Span Field Masking Query
|
||||||
|
- [ ] Minimum Should Match
|
||||||
|
- [ ] Multi Term Query Rewrite
|
||||||
|
|
||||||
|
### Modules
|
||||||
|
|
||||||
|
- Snapshot and Restore
|
||||||
|
- [x] Repositories
|
||||||
|
- [x] Snapshot get
|
||||||
|
- [x] Snapshot create
|
||||||
|
- [x] Snapshot delete
|
||||||
|
- [ ] Restore
|
||||||
|
- [ ] Snapshot status
|
||||||
|
- [ ] Monitoring snapshot/restore status
|
||||||
|
- [ ] Stopping currently running snapshot and restore
|
||||||
|
- Scripting
|
||||||
|
- [x] GetScript
|
||||||
|
- [x] PutScript
|
||||||
|
- [x] DeleteScript
|
||||||
|
|
||||||
|
### Sorting
|
||||||
|
|
||||||
|
- [x] Sort by score
|
||||||
|
- [x] Sort by field
|
||||||
|
- [x] Sort by geo distance
|
||||||
|
- [x] Sort by script
|
||||||
|
- [x] Sort by doc
|
||||||
|
|
||||||
|
### Scrolling
|
||||||
|
|
||||||
|
Scrolling is supported via a `ScrollService`. It supports an iterator-like interface.
|
||||||
|
The `ClearScroll` API is implemented as well.
|
||||||
|
|
||||||
|
A pattern for [efficiently scrolling in parallel](https://github.com/olivere/elastic/wiki/ScrollParallel)
|
||||||
|
is described in the [Wiki](https://github.com/olivere/elastic/wiki).
|
||||||
|
|
||||||
|
## How to contribute
|
||||||
|
|
||||||
|
Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
|
## Credits
|
||||||
|
|
||||||
|
Thanks a lot for the great folks working hard on
|
||||||
|
[Elasticsearch](https://www.elastic.co/products/elasticsearch)
|
||||||
|
and
|
||||||
|
[Go](https://golang.org/).
|
||||||
|
|
||||||
|
Elastic uses portions of the
|
||||||
|
[uritemplates](https://github.com/jtacoma/uritemplates) library
|
||||||
|
by Joshua Tacoma,
|
||||||
|
[backoff](https://github.com/cenkalti/backoff) by Cenk Altı and
|
||||||
|
[leaktest](https://github.com/fortytw2/leaktest) by Ian Chiles.
|
||||||
|
|
||||||
|
## LICENSE
|
||||||
|
|
||||||
|
MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
|
||||||
|
or the LICENSE file provided in the repository for details.
|
||||||
|
|
||||||
|
|
||||||
|
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Folivere%2Felastic.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Folivere%2Felastic?ref=badge_large)
|
13
vendor/github.com/olivere/elastic/v7/acknowledged_response.go
generated
vendored
Normal file
13
vendor/github.com/olivere/elastic/v7/acknowledged_response.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
// AcknowledgedResponse is returned from various APIs. It simply indicates
|
||||||
|
// whether the operation is ack'd or not.
|
||||||
|
type AcknowledgedResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
}
|
148
vendor/github.com/olivere/elastic/v7/backoff.go
generated
vendored
Normal file
148
vendor/github.com/olivere/elastic/v7/backoff.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BackoffFunc specifies the signature of a function that returns the
|
||||||
|
// time to wait before the next call to a resource. To stop retrying
|
||||||
|
// return false in the 2nd return value.
|
||||||
|
type BackoffFunc func(retry int) (time.Duration, bool)
|
||||||
|
|
||||||
|
// Backoff allows callers to implement their own Backoff strategy.
|
||||||
|
type Backoff interface {
|
||||||
|
// Next implements a BackoffFunc.
|
||||||
|
Next(retry int) (time.Duration, bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- ZeroBackoff --
|
||||||
|
|
||||||
|
// ZeroBackoff is a fixed backoff policy whose backoff time is always zero,
|
||||||
|
// meaning that the operation is retried immediately without waiting,
|
||||||
|
// indefinitely.
|
||||||
|
type ZeroBackoff struct{}
|
||||||
|
|
||||||
|
// Next implements BackoffFunc for ZeroBackoff.
|
||||||
|
func (b ZeroBackoff) Next(retry int) (time.Duration, bool) {
|
||||||
|
return 0, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- StopBackoff --
|
||||||
|
|
||||||
|
// StopBackoff is a fixed backoff policy that always returns false for
|
||||||
|
// Next(), meaning that the operation should never be retried.
|
||||||
|
type StopBackoff struct{}
|
||||||
|
|
||||||
|
// Next implements BackoffFunc for StopBackoff.
|
||||||
|
func (b StopBackoff) Next(retry int) (time.Duration, bool) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- ConstantBackoff --
|
||||||
|
|
||||||
|
// ConstantBackoff is a backoff policy that always returns the same delay.
|
||||||
|
type ConstantBackoff struct {
|
||||||
|
interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConstantBackoff returns a new ConstantBackoff.
|
||||||
|
func NewConstantBackoff(interval time.Duration) *ConstantBackoff {
|
||||||
|
return &ConstantBackoff{interval: interval}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next implements BackoffFunc for ConstantBackoff.
|
||||||
|
func (b *ConstantBackoff) Next(retry int) (time.Duration, bool) {
|
||||||
|
return b.interval, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Exponential --
|
||||||
|
|
||||||
|
// ExponentialBackoff implements the simple exponential backoff described by
|
||||||
|
// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html.
|
||||||
|
type ExponentialBackoff struct {
|
||||||
|
t float64 // initial timeout (in msec)
|
||||||
|
f float64 // exponential factor (e.g. 2)
|
||||||
|
m float64 // maximum timeout (in msec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExponentialBackoff returns a ExponentialBackoff backoff policy.
|
||||||
|
// Use initialTimeout to set the first/minimal interval
|
||||||
|
// and maxTimeout to set the maximum wait interval.
|
||||||
|
func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff {
|
||||||
|
return &ExponentialBackoff{
|
||||||
|
t: float64(int64(initialTimeout / time.Millisecond)),
|
||||||
|
f: 2.0,
|
||||||
|
m: float64(int64(maxTimeout / time.Millisecond)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next implements BackoffFunc for ExponentialBackoff.
|
||||||
|
func (b *ExponentialBackoff) Next(retry int) (time.Duration, bool) {
|
||||||
|
r := 1.0 + rand.Float64() // random number in [1..2]
|
||||||
|
m := math.Min(r*b.t*math.Pow(b.f, float64(retry)), b.m)
|
||||||
|
if m >= b.m {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
d := time.Duration(int64(m)) * time.Millisecond
|
||||||
|
return d, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Simple Backoff --
|
||||||
|
|
||||||
|
// SimpleBackoff takes a list of fixed values for backoff intervals.
|
||||||
|
// Each call to Next returns the next value from that fixed list.
|
||||||
|
// After each value is returned, subsequent calls to Next will only return
|
||||||
|
// the last element. The values are optionally "jittered" (off by default).
|
||||||
|
type SimpleBackoff struct {
|
||||||
|
sync.Mutex
|
||||||
|
ticks []int
|
||||||
|
jitter bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified
|
||||||
|
// list of fixed intervals in milliseconds.
|
||||||
|
func NewSimpleBackoff(ticks ...int) *SimpleBackoff {
|
||||||
|
return &SimpleBackoff{
|
||||||
|
ticks: ticks,
|
||||||
|
jitter: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Jitter enables or disables jittering values.
|
||||||
|
func (b *SimpleBackoff) Jitter(flag bool) *SimpleBackoff {
|
||||||
|
b.Lock()
|
||||||
|
b.jitter = flag
|
||||||
|
b.Unlock()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis].
|
||||||
|
func jitter(millis int) int {
|
||||||
|
if millis <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return millis/2 + rand.Intn(millis)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next implements BackoffFunc for SimpleBackoff.
|
||||||
|
func (b *SimpleBackoff) Next(retry int) (time.Duration, bool) {
|
||||||
|
b.Lock()
|
||||||
|
defer b.Unlock()
|
||||||
|
|
||||||
|
if retry >= len(b.ticks) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
ms := b.ticks[retry]
|
||||||
|
if b.jitter {
|
||||||
|
ms = jitter(ms)
|
||||||
|
}
|
||||||
|
return time.Duration(ms) * time.Millisecond, true
|
||||||
|
}
|
470
vendor/github.com/olivere/elastic/v7/bulk.go
generated
vendored
Normal file
470
vendor/github.com/olivere/elastic/v7/bulk.go
generated
vendored
Normal file
|
@ -0,0 +1,470 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BulkService allows for batching bulk requests and sending them to
|
||||||
|
// Elasticsearch in one roundtrip. Use the Add method with BulkIndexRequest,
|
||||||
|
// BulkUpdateRequest, and BulkDeleteRequest to add bulk requests to a batch,
|
||||||
|
// then use Do to send them to Elasticsearch.
|
||||||
|
//
|
||||||
|
// BulkService will be reset after each Do call. In other words, you can
|
||||||
|
// reuse BulkService to send many batches. You do not have to create a new
|
||||||
|
// BulkService for each batch.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html
|
||||||
|
// for more details.
|
||||||
|
type BulkService struct {
|
||||||
|
client *Client
|
||||||
|
retrier Retrier
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index string
|
||||||
|
typ string
|
||||||
|
requests []BulkableRequest
|
||||||
|
pipeline string
|
||||||
|
timeout string
|
||||||
|
refresh string
|
||||||
|
routing string
|
||||||
|
waitForActiveShards string
|
||||||
|
|
||||||
|
// estimated bulk size in bytes, up to the request index sizeInBytesCursor
|
||||||
|
sizeInBytes int64
|
||||||
|
sizeInBytesCursor int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBulkService initializes a new BulkService.
|
||||||
|
func NewBulkService(client *Client) *BulkService {
|
||||||
|
builder := &BulkService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
return builder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *BulkService) Pretty(pretty bool) *BulkService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *BulkService) Human(human bool) *BulkService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *BulkService) ErrorTrace(errorTrace bool) *BulkService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *BulkService) FilterPath(filterPath ...string) *BulkService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *BulkService) Header(name string, value string) *BulkService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *BulkService) Headers(headers http.Header) *BulkService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset cleans up the request queue
|
||||||
|
func (s *BulkService) Reset() {
|
||||||
|
s.requests = make([]BulkableRequest, 0)
|
||||||
|
s.sizeInBytes = 0
|
||||||
|
s.sizeInBytesCursor = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrier allows to set specific retry logic for this BulkService.
|
||||||
|
// If not specified, it will use the client's default retrier.
|
||||||
|
func (s *BulkService) Retrier(retrier Retrier) *BulkService {
|
||||||
|
s.retrier = retrier
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index specifies the index to use for all batches. You may also leave
|
||||||
|
// this blank and specify the index in the individual bulk requests.
|
||||||
|
func (s *BulkService) Index(index string) *BulkService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type specifies the type to use for all batches. You may also leave
|
||||||
|
// this blank and specify the type in the individual bulk requests.
|
||||||
|
func (s *BulkService) Type(typ string) *BulkService {
|
||||||
|
s.typ = typ
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is a global timeout for processing bulk requests. This is a
|
||||||
|
// server-side timeout, i.e. it tells Elasticsearch the time after which
|
||||||
|
// it should stop processing.
|
||||||
|
func (s *BulkService) Timeout(timeout string) *BulkService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh controls when changes made by this request are made visible
|
||||||
|
// to search. The allowed values are: "true" (refresh the relevant
|
||||||
|
// primary and replica shards immediately), "wait_for" (wait for the
|
||||||
|
// changes to be made visible by a refresh before reying), or "false"
|
||||||
|
// (no refresh related actions). The default value is "false".
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html
|
||||||
|
// for details.
|
||||||
|
func (s *BulkService) Refresh(refresh string) *BulkService {
|
||||||
|
s.refresh = refresh
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing specifies the routing value.
|
||||||
|
func (s *BulkService) Routing(routing string) *BulkService {
|
||||||
|
s.routing = routing
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pipeline specifies the pipeline id to preprocess incoming documents with.
|
||||||
|
func (s *BulkService) Pipeline(pipeline string) *BulkService {
|
||||||
|
s.pipeline = pipeline
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of shard copies that must be active
|
||||||
|
// before proceeding with the bulk operation. Defaults to 1, meaning the
|
||||||
|
// primary shard only. Set to `all` for all shard copies, otherwise set to
|
||||||
|
// any non-negative value less than or equal to the total number of copies
|
||||||
|
// for the shard (number of replicas + 1).
|
||||||
|
func (s *BulkService) WaitForActiveShards(waitForActiveShards string) *BulkService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds bulkable requests, i.e. BulkIndexRequest, BulkUpdateRequest,
|
||||||
|
// and/or BulkDeleteRequest.
|
||||||
|
func (s *BulkService) Add(requests ...BulkableRequest) *BulkService {
|
||||||
|
s.requests = append(s.requests, requests...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// EstimatedSizeInBytes returns the estimated size of all bulkable
|
||||||
|
// requests added via Add.
|
||||||
|
func (s *BulkService) EstimatedSizeInBytes() int64 {
|
||||||
|
if s.sizeInBytesCursor == len(s.requests) {
|
||||||
|
return s.sizeInBytes
|
||||||
|
}
|
||||||
|
for _, r := range s.requests[s.sizeInBytesCursor:] {
|
||||||
|
s.sizeInBytes += s.estimateSizeInBytes(r)
|
||||||
|
s.sizeInBytesCursor++
|
||||||
|
}
|
||||||
|
return s.sizeInBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
// estimateSizeInBytes returns the estimates size of the given
|
||||||
|
// bulkable request, i.e. BulkIndexRequest, BulkUpdateRequest, and
|
||||||
|
// BulkDeleteRequest.
|
||||||
|
func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 {
|
||||||
|
lines, _ := r.Source()
|
||||||
|
size := 0
|
||||||
|
for _, line := range lines {
|
||||||
|
// +1 for the \n
|
||||||
|
size += len(line) + 1
|
||||||
|
}
|
||||||
|
return int64(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumberOfActions returns the number of bulkable requests that need to
|
||||||
|
// be sent to Elasticsearch on the next batch.
|
||||||
|
func (s *BulkService) NumberOfActions() int {
|
||||||
|
return len(s.requests)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BulkService) bodyAsString() (string, error) {
|
||||||
|
// Pre-allocate to reduce allocs
|
||||||
|
var buf strings.Builder
|
||||||
|
buf.Grow(int(s.EstimatedSizeInBytes()))
|
||||||
|
|
||||||
|
for _, req := range s.requests {
|
||||||
|
source, err := req.Source()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
for _, line := range source {
|
||||||
|
buf.WriteString(line)
|
||||||
|
buf.WriteByte('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do sends the batched requests to Elasticsearch. Note that, when successful,
|
||||||
|
// you can reuse the BulkService for the next batch as the list of bulk
|
||||||
|
// requests is cleared on success.
|
||||||
|
func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) {
|
||||||
|
// No actions?
|
||||||
|
if s.NumberOfActions() == 0 {
|
||||||
|
return nil, errors.New("elastic: No bulk actions to commit")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get body
|
||||||
|
body, err := s.bodyAsString()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build url
|
||||||
|
path := "/"
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
index, err := uritemplates.Expand("{index}", map[string]string{
|
||||||
|
"index": s.index,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
path += index + "/"
|
||||||
|
}
|
||||||
|
if len(s.typ) > 0 {
|
||||||
|
typ, err := uritemplates.Expand("{type}", map[string]string{
|
||||||
|
"type": s.typ,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
path += typ + "/"
|
||||||
|
}
|
||||||
|
path += "_bulk"
|
||||||
|
|
||||||
|
// Parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.pipeline != "" {
|
||||||
|
params.Set("pipeline", s.pipeline)
|
||||||
|
}
|
||||||
|
if s.refresh != "" {
|
||||||
|
params.Set("refresh", s.refresh)
|
||||||
|
}
|
||||||
|
if s.routing != "" {
|
||||||
|
params.Set("routing", s.routing)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
ContentType: "application/x-ndjson",
|
||||||
|
Retrier: s.retrier,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return results
|
||||||
|
ret := new(BulkResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset so the request can be reused
|
||||||
|
s.Reset()
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BulkResponse is a response to a bulk execution.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// {
|
||||||
|
// "took":3,
|
||||||
|
// "errors":false,
|
||||||
|
// "items":[{
|
||||||
|
// "index":{
|
||||||
|
// "_index":"index1",
|
||||||
|
// "_type":"tweet",
|
||||||
|
// "_id":"1",
|
||||||
|
// "_version":3,
|
||||||
|
// "status":201
|
||||||
|
// }
|
||||||
|
// },{
|
||||||
|
// "index":{
|
||||||
|
// "_index":"index2",
|
||||||
|
// "_type":"tweet",
|
||||||
|
// "_id":"2",
|
||||||
|
// "_version":3,
|
||||||
|
// "status":200
|
||||||
|
// }
|
||||||
|
// },{
|
||||||
|
// "delete":{
|
||||||
|
// "_index":"index1",
|
||||||
|
// "_type":"tweet",
|
||||||
|
// "_id":"1",
|
||||||
|
// "_version":4,
|
||||||
|
// "status":200,
|
||||||
|
// "found":true
|
||||||
|
// }
|
||||||
|
// },{
|
||||||
|
// "update":{
|
||||||
|
// "_index":"index2",
|
||||||
|
// "_type":"tweet",
|
||||||
|
// "_id":"2",
|
||||||
|
// "_version":4,
|
||||||
|
// "status":200
|
||||||
|
// }
|
||||||
|
// }]
|
||||||
|
// }
|
||||||
|
type BulkResponse struct {
|
||||||
|
Took int `json:"took,omitempty"`
|
||||||
|
Errors bool `json:"errors,omitempty"`
|
||||||
|
Items []map[string]*BulkResponseItem `json:"items,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BulkResponseItem is the result of a single bulk request.
|
||||||
|
type BulkResponseItem struct {
|
||||||
|
Index string `json:"_index,omitempty"`
|
||||||
|
Type string `json:"_type,omitempty"`
|
||||||
|
Id string `json:"_id,omitempty"`
|
||||||
|
Version int64 `json:"_version,omitempty"`
|
||||||
|
Result string `json:"result,omitempty"`
|
||||||
|
Shards *ShardsInfo `json:"_shards,omitempty"`
|
||||||
|
SeqNo int64 `json:"_seq_no,omitempty"`
|
||||||
|
PrimaryTerm int64 `json:"_primary_term,omitempty"`
|
||||||
|
Status int `json:"status,omitempty"`
|
||||||
|
ForcedRefresh bool `json:"forced_refresh,omitempty"`
|
||||||
|
Error *ErrorDetails `json:"error,omitempty"`
|
||||||
|
GetResult *GetResult `json:"get,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Indexed returns all bulk request results of "index" actions.
|
||||||
|
func (r *BulkResponse) Indexed() []*BulkResponseItem {
|
||||||
|
return r.ByAction("index")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Created returns all bulk request results of "create" actions.
|
||||||
|
func (r *BulkResponse) Created() []*BulkResponseItem {
|
||||||
|
return r.ByAction("create")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Updated returns all bulk request results of "update" actions.
|
||||||
|
func (r *BulkResponse) Updated() []*BulkResponseItem {
|
||||||
|
return r.ByAction("update")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deleted returns all bulk request results of "delete" actions.
|
||||||
|
func (r *BulkResponse) Deleted() []*BulkResponseItem {
|
||||||
|
return r.ByAction("delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAction returns all bulk request results of a certain action,
|
||||||
|
// e.g. "index" or "delete".
|
||||||
|
func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
|
||||||
|
if r.Items == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var items []*BulkResponseItem
|
||||||
|
for _, item := range r.Items {
|
||||||
|
if result, found := item[action]; found {
|
||||||
|
items = append(items, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return items
|
||||||
|
}
|
||||||
|
|
||||||
|
// ById returns all bulk request results of a given document id,
|
||||||
|
// regardless of the action ("index", "delete" etc.).
|
||||||
|
func (r *BulkResponse) ById(id string) []*BulkResponseItem {
|
||||||
|
if r.Items == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var items []*BulkResponseItem
|
||||||
|
for _, item := range r.Items {
|
||||||
|
for _, result := range item {
|
||||||
|
if result.Id == id {
|
||||||
|
items = append(items, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return items
|
||||||
|
}
|
||||||
|
|
||||||
|
// Failed returns those items of a bulk response that have errors,
|
||||||
|
// i.e. those that don't have a status code between 200 and 299.
|
||||||
|
func (r *BulkResponse) Failed() []*BulkResponseItem {
|
||||||
|
if r.Items == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var errors []*BulkResponseItem
|
||||||
|
for _, item := range r.Items {
|
||||||
|
for _, result := range item {
|
||||||
|
if !(result.Status >= 200 && result.Status <= 299) {
|
||||||
|
errors = append(errors, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errors
|
||||||
|
}
|
||||||
|
|
||||||
|
// Succeeded returns those items of a bulk response that have no errors,
|
||||||
|
// i.e. those have a status code between 200 and 299.
|
||||||
|
func (r *BulkResponse) Succeeded() []*BulkResponseItem {
|
||||||
|
if r.Items == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var succeeded []*BulkResponseItem
|
||||||
|
for _, item := range r.Items {
|
||||||
|
for _, result := range item {
|
||||||
|
if result.Status >= 200 && result.Status <= 299 {
|
||||||
|
succeeded = append(succeeded, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return succeeded
|
||||||
|
}
|
186
vendor/github.com/olivere/elastic/v7/bulk_delete_request.go
generated
vendored
Normal file
186
vendor/github.com/olivere/elastic/v7/bulk_delete_request.go
generated
vendored
Normal file
|
@ -0,0 +1,186 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
//go:generate easyjson bulk_delete_request.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// -- Bulk delete request --
|
||||||
|
|
||||||
|
// BulkDeleteRequest is a request to remove a document from Elasticsearch.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html
|
||||||
|
// for details.
|
||||||
|
type BulkDeleteRequest struct {
|
||||||
|
BulkableRequest
|
||||||
|
index string
|
||||||
|
typ string
|
||||||
|
id string
|
||||||
|
parent string
|
||||||
|
routing string
|
||||||
|
version int64 // default is MATCH_ANY
|
||||||
|
versionType string // default is "internal"
|
||||||
|
ifSeqNo *int64
|
||||||
|
ifPrimaryTerm *int64
|
||||||
|
|
||||||
|
source []string
|
||||||
|
|
||||||
|
useEasyJSON bool
|
||||||
|
}
|
||||||
|
|
||||||
|
//easyjson:json
|
||||||
|
type bulkDeleteRequestCommand map[string]bulkDeleteRequestCommandOp
|
||||||
|
|
||||||
|
//easyjson:json
|
||||||
|
type bulkDeleteRequestCommandOp struct {
|
||||||
|
Index string `json:"_index,omitempty"`
|
||||||
|
Type string `json:"_type,omitempty"`
|
||||||
|
Id string `json:"_id,omitempty"`
|
||||||
|
Parent string `json:"parent,omitempty"`
|
||||||
|
Routing string `json:"routing,omitempty"`
|
||||||
|
Version int64 `json:"version,omitempty"`
|
||||||
|
VersionType string `json:"version_type,omitempty"`
|
||||||
|
IfSeqNo *int64 `json:"if_seq_no,omitempty"`
|
||||||
|
IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBulkDeleteRequest returns a new BulkDeleteRequest.
|
||||||
|
func NewBulkDeleteRequest() *BulkDeleteRequest {
|
||||||
|
return &BulkDeleteRequest{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseEasyJSON is an experimental setting that enables serialization
|
||||||
|
// with github.com/mailru/easyjson, which should in faster serialization
|
||||||
|
// time and less allocations, but removed compatibility with encoding/json,
|
||||||
|
// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
|
||||||
|
// for details. This setting is disabled by default.
|
||||||
|
func (r *BulkDeleteRequest) UseEasyJSON(enable bool) *BulkDeleteRequest {
|
||||||
|
r.useEasyJSON = enable
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index specifies the Elasticsearch index to use for this delete request.
|
||||||
|
// If unspecified, the index set on the BulkService will be used.
|
||||||
|
func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
|
||||||
|
r.index = index
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type specifies the Elasticsearch type to use for this delete request.
|
||||||
|
// If unspecified, the type set on the BulkService will be used.
|
||||||
|
func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
|
||||||
|
r.typ = typ
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id specifies the identifier of the document to delete.
|
||||||
|
func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
|
||||||
|
r.id = id
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent specifies the parent of the request, which is used in parent/child
|
||||||
|
// mappings.
|
||||||
|
func (r *BulkDeleteRequest) Parent(parent string) *BulkDeleteRequest {
|
||||||
|
r.parent = parent
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing specifies a routing value for the request.
|
||||||
|
func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
|
||||||
|
r.routing = routing
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version indicates the version to be deleted as part of an optimistic
|
||||||
|
// concurrency model.
|
||||||
|
func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
|
||||||
|
r.version = version
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionType can be "internal" (default), "external", "external_gte",
|
||||||
|
// or "external_gt".
|
||||||
|
func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
|
||||||
|
r.versionType = versionType
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfSeqNo indicates to only perform the delete operation if the last
|
||||||
|
// operation that has changed the document has the specified sequence number.
|
||||||
|
func (r *BulkDeleteRequest) IfSeqNo(ifSeqNo int64) *BulkDeleteRequest {
|
||||||
|
r.ifSeqNo = &ifSeqNo
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfPrimaryTerm indicates to only perform the delete operation if the
|
||||||
|
// last operation that has changed the document has the specified primary term.
|
||||||
|
func (r *BulkDeleteRequest) IfPrimaryTerm(ifPrimaryTerm int64) *BulkDeleteRequest {
|
||||||
|
r.ifPrimaryTerm = &ifPrimaryTerm
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the on-wire representation of the delete request,
|
||||||
|
// concatenated as a single string.
|
||||||
|
func (r *BulkDeleteRequest) String() string {
|
||||||
|
lines, err := r.Source()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("error: %v", err)
|
||||||
|
}
|
||||||
|
return strings.Join(lines, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the on-wire representation of the delete request,
|
||||||
|
// split into an action-and-meta-data line and an (optional) source line.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html
|
||||||
|
// for details.
|
||||||
|
func (r *BulkDeleteRequest) Source() ([]string, error) {
|
||||||
|
if r.source != nil {
|
||||||
|
return r.source, nil
|
||||||
|
}
|
||||||
|
command := bulkDeleteRequestCommand{
|
||||||
|
"delete": bulkDeleteRequestCommandOp{
|
||||||
|
Index: r.index,
|
||||||
|
Type: r.typ,
|
||||||
|
Id: r.id,
|
||||||
|
Routing: r.routing,
|
||||||
|
Parent: r.parent,
|
||||||
|
Version: r.version,
|
||||||
|
VersionType: r.versionType,
|
||||||
|
IfSeqNo: r.ifSeqNo,
|
||||||
|
IfPrimaryTerm: r.ifPrimaryTerm,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var body []byte
|
||||||
|
if r.useEasyJSON {
|
||||||
|
// easyjson
|
||||||
|
body, err = command.MarshalJSON()
|
||||||
|
} else {
|
||||||
|
// encoding/json
|
||||||
|
body, err = json.Marshal(command)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lines := []string{string(body)}
|
||||||
|
r.source = lines
|
||||||
|
|
||||||
|
return lines, nil
|
||||||
|
}
|
266
vendor/github.com/olivere/elastic/v7/bulk_delete_request_easyjson.go
generated
vendored
Normal file
266
vendor/github.com/olivere/elastic/v7/bulk_delete_request_easyjson.go
generated
vendored
Normal file
|
@ -0,0 +1,266 @@
|
||||||
|
// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
json "encoding/json"
|
||||||
|
easyjson "github.com/mailru/easyjson"
|
||||||
|
jlexer "github.com/mailru/easyjson/jlexer"
|
||||||
|
jwriter "github.com/mailru/easyjson/jwriter"
|
||||||
|
)
|
||||||
|
|
||||||
|
// suppress unused package warning
|
||||||
|
var (
|
||||||
|
_ *json.RawMessage
|
||||||
|
_ *jlexer.Lexer
|
||||||
|
_ *jwriter.Writer
|
||||||
|
_ easyjson.Marshaler
|
||||||
|
)
|
||||||
|
|
||||||
|
func easyjson8092efb6DecodeGithubComOlivereElasticV7(in *jlexer.Lexer, out *bulkDeleteRequestCommandOp) {
|
||||||
|
isTopLevel := in.IsStart()
|
||||||
|
if in.IsNull() {
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
in.Skip()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
in.Delim('{')
|
||||||
|
for !in.IsDelim('}') {
|
||||||
|
key := in.UnsafeString()
|
||||||
|
in.WantColon()
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
in.WantComma()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch key {
|
||||||
|
case "_index":
|
||||||
|
out.Index = string(in.String())
|
||||||
|
case "_type":
|
||||||
|
out.Type = string(in.String())
|
||||||
|
case "_id":
|
||||||
|
out.Id = string(in.String())
|
||||||
|
case "parent":
|
||||||
|
out.Parent = string(in.String())
|
||||||
|
case "routing":
|
||||||
|
out.Routing = string(in.String())
|
||||||
|
case "version":
|
||||||
|
out.Version = int64(in.Int64())
|
||||||
|
case "version_type":
|
||||||
|
out.VersionType = string(in.String())
|
||||||
|
case "if_seq_no":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.IfSeqNo = nil
|
||||||
|
} else {
|
||||||
|
if out.IfSeqNo == nil {
|
||||||
|
out.IfSeqNo = new(int64)
|
||||||
|
}
|
||||||
|
*out.IfSeqNo = int64(in.Int64())
|
||||||
|
}
|
||||||
|
case "if_primary_term":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.IfPrimaryTerm = nil
|
||||||
|
} else {
|
||||||
|
if out.IfPrimaryTerm == nil {
|
||||||
|
out.IfPrimaryTerm = new(int64)
|
||||||
|
}
|
||||||
|
*out.IfPrimaryTerm = int64(in.Int64())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
in.SkipRecursive()
|
||||||
|
}
|
||||||
|
in.WantComma()
|
||||||
|
}
|
||||||
|
in.Delim('}')
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func easyjson8092efb6EncodeGithubComOlivereElasticV7(out *jwriter.Writer, in bulkDeleteRequestCommandOp) {
|
||||||
|
out.RawByte('{')
|
||||||
|
first := true
|
||||||
|
_ = first
|
||||||
|
if in.Index != "" {
|
||||||
|
const prefix string = ",\"_index\":"
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
out.String(string(in.Index))
|
||||||
|
}
|
||||||
|
if in.Type != "" {
|
||||||
|
const prefix string = ",\"_type\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Type))
|
||||||
|
}
|
||||||
|
if in.Id != "" {
|
||||||
|
const prefix string = ",\"_id\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Id))
|
||||||
|
}
|
||||||
|
if in.Parent != "" {
|
||||||
|
const prefix string = ",\"parent\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Parent))
|
||||||
|
}
|
||||||
|
if in.Routing != "" {
|
||||||
|
const prefix string = ",\"routing\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Routing))
|
||||||
|
}
|
||||||
|
if in.Version != 0 {
|
||||||
|
const prefix string = ",\"version\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int64(int64(in.Version))
|
||||||
|
}
|
||||||
|
if in.VersionType != "" {
|
||||||
|
const prefix string = ",\"version_type\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.VersionType))
|
||||||
|
}
|
||||||
|
if in.IfSeqNo != nil {
|
||||||
|
const prefix string = ",\"if_seq_no\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int64(int64(*in.IfSeqNo))
|
||||||
|
}
|
||||||
|
if in.IfPrimaryTerm != nil {
|
||||||
|
const prefix string = ",\"if_primary_term\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int64(int64(*in.IfPrimaryTerm))
|
||||||
|
}
|
||||||
|
out.RawByte('}')
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON supports json.Marshaler interface
|
||||||
|
func (v bulkDeleteRequestCommandOp) MarshalJSON() ([]byte, error) {
|
||||||
|
w := jwriter.Writer{}
|
||||||
|
easyjson8092efb6EncodeGithubComOlivereElasticV7(&w, v)
|
||||||
|
return w.Buffer.BuildBytes(), w.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalEasyJSON supports easyjson.Marshaler interface
|
||||||
|
func (v bulkDeleteRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
easyjson8092efb6EncodeGithubComOlivereElasticV7(w, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON supports json.Unmarshaler interface
|
||||||
|
func (v *bulkDeleteRequestCommandOp) UnmarshalJSON(data []byte) error {
|
||||||
|
r := jlexer.Lexer{Data: data}
|
||||||
|
easyjson8092efb6DecodeGithubComOlivereElasticV7(&r, v)
|
||||||
|
return r.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
|
||||||
|
func (v *bulkDeleteRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
|
||||||
|
easyjson8092efb6DecodeGithubComOlivereElasticV7(l, v)
|
||||||
|
}
|
||||||
|
func easyjson8092efb6DecodeGithubComOlivereElasticV71(in *jlexer.Lexer, out *bulkDeleteRequestCommand) {
|
||||||
|
isTopLevel := in.IsStart()
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
} else {
|
||||||
|
in.Delim('{')
|
||||||
|
if !in.IsDelim('}') {
|
||||||
|
*out = make(bulkDeleteRequestCommand)
|
||||||
|
} else {
|
||||||
|
*out = nil
|
||||||
|
}
|
||||||
|
for !in.IsDelim('}') {
|
||||||
|
key := string(in.String())
|
||||||
|
in.WantColon()
|
||||||
|
var v1 bulkDeleteRequestCommandOp
|
||||||
|
(v1).UnmarshalEasyJSON(in)
|
||||||
|
(*out)[key] = v1
|
||||||
|
in.WantComma()
|
||||||
|
}
|
||||||
|
in.Delim('}')
|
||||||
|
}
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func easyjson8092efb6EncodeGithubComOlivereElasticV71(out *jwriter.Writer, in bulkDeleteRequestCommand) {
|
||||||
|
if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
|
||||||
|
out.RawString(`null`)
|
||||||
|
} else {
|
||||||
|
out.RawByte('{')
|
||||||
|
v2First := true
|
||||||
|
for v2Name, v2Value := range in {
|
||||||
|
if v2First {
|
||||||
|
v2First = false
|
||||||
|
} else {
|
||||||
|
out.RawByte(',')
|
||||||
|
}
|
||||||
|
out.String(string(v2Name))
|
||||||
|
out.RawByte(':')
|
||||||
|
(v2Value).MarshalEasyJSON(out)
|
||||||
|
}
|
||||||
|
out.RawByte('}')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON supports json.Marshaler interface
|
||||||
|
func (v bulkDeleteRequestCommand) MarshalJSON() ([]byte, error) {
|
||||||
|
w := jwriter.Writer{}
|
||||||
|
easyjson8092efb6EncodeGithubComOlivereElasticV71(&w, v)
|
||||||
|
return w.Buffer.BuildBytes(), w.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalEasyJSON supports easyjson.Marshaler interface
|
||||||
|
func (v bulkDeleteRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
easyjson8092efb6EncodeGithubComOlivereElasticV71(w, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON supports json.Unmarshaler interface
|
||||||
|
func (v *bulkDeleteRequestCommand) UnmarshalJSON(data []byte) error {
|
||||||
|
r := jlexer.Lexer{Data: data}
|
||||||
|
easyjson8092efb6DecodeGithubComOlivereElasticV71(&r, v)
|
||||||
|
return r.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
|
||||||
|
func (v *bulkDeleteRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
|
||||||
|
easyjson8092efb6DecodeGithubComOlivereElasticV71(l, v)
|
||||||
|
}
|
260
vendor/github.com/olivere/elastic/v7/bulk_index_request.go
generated
vendored
Normal file
260
vendor/github.com/olivere/elastic/v7/bulk_index_request.go
generated
vendored
Normal file
|
@ -0,0 +1,260 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
//go:generate easyjson bulk_index_request.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BulkIndexRequest is a request to add a document to Elasticsearch.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html
|
||||||
|
// for details.
|
||||||
|
type BulkIndexRequest struct {
|
||||||
|
BulkableRequest
|
||||||
|
index string
|
||||||
|
typ string
|
||||||
|
id string
|
||||||
|
opType string
|
||||||
|
routing string
|
||||||
|
parent string
|
||||||
|
version *int64 // default is MATCH_ANY
|
||||||
|
versionType string // default is "internal"
|
||||||
|
doc interface{}
|
||||||
|
pipeline string
|
||||||
|
retryOnConflict *int
|
||||||
|
ifSeqNo *int64
|
||||||
|
ifPrimaryTerm *int64
|
||||||
|
|
||||||
|
source []string
|
||||||
|
|
||||||
|
useEasyJSON bool
|
||||||
|
}
|
||||||
|
|
||||||
|
//easyjson:json
|
||||||
|
type bulkIndexRequestCommand map[string]bulkIndexRequestCommandOp
|
||||||
|
|
||||||
|
//easyjson:json
|
||||||
|
type bulkIndexRequestCommandOp struct {
|
||||||
|
Index string `json:"_index,omitempty"`
|
||||||
|
Id string `json:"_id,omitempty"`
|
||||||
|
Type string `json:"_type,omitempty"`
|
||||||
|
Parent string `json:"parent,omitempty"`
|
||||||
|
// RetryOnConflict is "_retry_on_conflict" for 6.0 and "retry_on_conflict" for 6.1+.
|
||||||
|
RetryOnConflict *int `json:"retry_on_conflict,omitempty"`
|
||||||
|
Routing string `json:"routing,omitempty"`
|
||||||
|
Version *int64 `json:"version,omitempty"`
|
||||||
|
VersionType string `json:"version_type,omitempty"`
|
||||||
|
Pipeline string `json:"pipeline,omitempty"`
|
||||||
|
IfSeqNo *int64 `json:"if_seq_no,omitempty"`
|
||||||
|
IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBulkIndexRequest returns a new BulkIndexRequest.
|
||||||
|
// The operation type is "index" by default.
|
||||||
|
func NewBulkIndexRequest() *BulkIndexRequest {
|
||||||
|
return &BulkIndexRequest{
|
||||||
|
opType: "index",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseEasyJSON is an experimental setting that enables serialization
|
||||||
|
// with github.com/mailru/easyjson, which should in faster serialization
|
||||||
|
// time and less allocations, but removed compatibility with encoding/json,
|
||||||
|
// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
|
||||||
|
// for details. This setting is disabled by default.
|
||||||
|
func (r *BulkIndexRequest) UseEasyJSON(enable bool) *BulkIndexRequest {
|
||||||
|
r.useEasyJSON = enable
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index specifies the Elasticsearch index to use for this index request.
|
||||||
|
// If unspecified, the index set on the BulkService will be used.
|
||||||
|
func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
|
||||||
|
r.index = index
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type specifies the Elasticsearch type to use for this index request.
|
||||||
|
// If unspecified, the type set on the BulkService will be used.
|
||||||
|
func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
|
||||||
|
r.typ = typ
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id specifies the identifier of the document to index.
|
||||||
|
func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
|
||||||
|
r.id = id
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpType specifies if this request should follow create-only or upsert
|
||||||
|
// behavior. This follows the OpType of the standard document index API.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html#operation-type
|
||||||
|
// for details.
|
||||||
|
func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
|
||||||
|
r.opType = opType
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing specifies a routing value for the request.
|
||||||
|
func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
|
||||||
|
r.routing = routing
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent specifies the identifier of the parent document (if available).
|
||||||
|
func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
|
||||||
|
r.parent = parent
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version indicates the version of the document as part of an optimistic
|
||||||
|
// concurrency model.
|
||||||
|
func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
|
||||||
|
v := version
|
||||||
|
r.version = &v
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionType specifies how versions are created. It can be e.g. internal,
|
||||||
|
// external, external_gte, or force.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html#index-versioning
|
||||||
|
// for details.
|
||||||
|
func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
|
||||||
|
r.versionType = versionType
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Doc specifies the document to index.
|
||||||
|
func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
|
||||||
|
r.doc = doc
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryOnConflict specifies how often to retry in case of a version conflict.
|
||||||
|
func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexRequest {
|
||||||
|
r.retryOnConflict = &retryOnConflict
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pipeline to use while processing the request.
|
||||||
|
func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest {
|
||||||
|
r.pipeline = pipeline
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfSeqNo indicates to only perform the index operation if the last
|
||||||
|
// operation that has changed the document has the specified sequence number.
|
||||||
|
func (r *BulkIndexRequest) IfSeqNo(ifSeqNo int64) *BulkIndexRequest {
|
||||||
|
r.ifSeqNo = &ifSeqNo
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfPrimaryTerm indicates to only perform the index operation if the
|
||||||
|
// last operation that has changed the document has the specified primary term.
|
||||||
|
func (r *BulkIndexRequest) IfPrimaryTerm(ifPrimaryTerm int64) *BulkIndexRequest {
|
||||||
|
r.ifPrimaryTerm = &ifPrimaryTerm
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the on-wire representation of the index request,
|
||||||
|
// concatenated as a single string.
|
||||||
|
func (r *BulkIndexRequest) String() string {
|
||||||
|
lines, err := r.Source()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("error: %v", err)
|
||||||
|
}
|
||||||
|
return strings.Join(lines, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the on-wire representation of the index request,
|
||||||
|
// split into an action-and-meta-data line and an (optional) source line.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html
|
||||||
|
// for details.
|
||||||
|
func (r *BulkIndexRequest) Source() ([]string, error) {
|
||||||
|
// { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
|
||||||
|
// { "field1" : "value1" }
|
||||||
|
|
||||||
|
if r.source != nil {
|
||||||
|
return r.source, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lines := make([]string, 2)
|
||||||
|
|
||||||
|
// "index" ...
|
||||||
|
indexCommand := bulkIndexRequestCommandOp{
|
||||||
|
Index: r.index,
|
||||||
|
Type: r.typ,
|
||||||
|
Id: r.id,
|
||||||
|
Routing: r.routing,
|
||||||
|
Parent: r.parent,
|
||||||
|
Version: r.version,
|
||||||
|
VersionType: r.versionType,
|
||||||
|
RetryOnConflict: r.retryOnConflict,
|
||||||
|
Pipeline: r.pipeline,
|
||||||
|
IfSeqNo: r.ifSeqNo,
|
||||||
|
IfPrimaryTerm: r.ifPrimaryTerm,
|
||||||
|
}
|
||||||
|
command := bulkIndexRequestCommand{
|
||||||
|
r.opType: indexCommand,
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var body []byte
|
||||||
|
if r.useEasyJSON {
|
||||||
|
// easyjson
|
||||||
|
body, err = command.MarshalJSON()
|
||||||
|
} else {
|
||||||
|
// encoding/json
|
||||||
|
body, err = json.Marshal(command)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lines[0] = string(body)
|
||||||
|
|
||||||
|
// "field1" ...
|
||||||
|
if r.doc != nil {
|
||||||
|
switch t := r.doc.(type) {
|
||||||
|
default:
|
||||||
|
body, err := json.Marshal(r.doc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
lines[1] = string(body)
|
||||||
|
case json.RawMessage:
|
||||||
|
lines[1] = string(t)
|
||||||
|
case *json.RawMessage:
|
||||||
|
lines[1] = string(*t)
|
||||||
|
case string:
|
||||||
|
lines[1] = t
|
||||||
|
case *string:
|
||||||
|
lines[1] = *t
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
lines[1] = "{}"
|
||||||
|
}
|
||||||
|
|
||||||
|
r.source = lines
|
||||||
|
return lines, nil
|
||||||
|
}
|
306
vendor/github.com/olivere/elastic/v7/bulk_index_request_easyjson.go
generated
vendored
Normal file
306
vendor/github.com/olivere/elastic/v7/bulk_index_request_easyjson.go
generated
vendored
Normal file
|
@ -0,0 +1,306 @@
|
||||||
|
// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
json "encoding/json"
|
||||||
|
easyjson "github.com/mailru/easyjson"
|
||||||
|
jlexer "github.com/mailru/easyjson/jlexer"
|
||||||
|
jwriter "github.com/mailru/easyjson/jwriter"
|
||||||
|
)
|
||||||
|
|
||||||
|
// suppress unused package warning
|
||||||
|
var (
|
||||||
|
_ *json.RawMessage
|
||||||
|
_ *jlexer.Lexer
|
||||||
|
_ *jwriter.Writer
|
||||||
|
_ easyjson.Marshaler
|
||||||
|
)
|
||||||
|
|
||||||
|
func easyjson9de0fcbfDecodeGithubComOlivereElasticV7(in *jlexer.Lexer, out *bulkIndexRequestCommandOp) {
|
||||||
|
isTopLevel := in.IsStart()
|
||||||
|
if in.IsNull() {
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
in.Skip()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
in.Delim('{')
|
||||||
|
for !in.IsDelim('}') {
|
||||||
|
key := in.UnsafeString()
|
||||||
|
in.WantColon()
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
in.WantComma()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch key {
|
||||||
|
case "_index":
|
||||||
|
out.Index = string(in.String())
|
||||||
|
case "_id":
|
||||||
|
out.Id = string(in.String())
|
||||||
|
case "_type":
|
||||||
|
out.Type = string(in.String())
|
||||||
|
case "parent":
|
||||||
|
out.Parent = string(in.String())
|
||||||
|
case "retry_on_conflict":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.RetryOnConflict = nil
|
||||||
|
} else {
|
||||||
|
if out.RetryOnConflict == nil {
|
||||||
|
out.RetryOnConflict = new(int)
|
||||||
|
}
|
||||||
|
*out.RetryOnConflict = int(in.Int())
|
||||||
|
}
|
||||||
|
case "routing":
|
||||||
|
out.Routing = string(in.String())
|
||||||
|
case "version":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.Version = nil
|
||||||
|
} else {
|
||||||
|
if out.Version == nil {
|
||||||
|
out.Version = new(int64)
|
||||||
|
}
|
||||||
|
*out.Version = int64(in.Int64())
|
||||||
|
}
|
||||||
|
case "version_type":
|
||||||
|
out.VersionType = string(in.String())
|
||||||
|
case "pipeline":
|
||||||
|
out.Pipeline = string(in.String())
|
||||||
|
case "if_seq_no":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.IfSeqNo = nil
|
||||||
|
} else {
|
||||||
|
if out.IfSeqNo == nil {
|
||||||
|
out.IfSeqNo = new(int64)
|
||||||
|
}
|
||||||
|
*out.IfSeqNo = int64(in.Int64())
|
||||||
|
}
|
||||||
|
case "if_primary_term":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.IfPrimaryTerm = nil
|
||||||
|
} else {
|
||||||
|
if out.IfPrimaryTerm == nil {
|
||||||
|
out.IfPrimaryTerm = new(int64)
|
||||||
|
}
|
||||||
|
*out.IfPrimaryTerm = int64(in.Int64())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
in.SkipRecursive()
|
||||||
|
}
|
||||||
|
in.WantComma()
|
||||||
|
}
|
||||||
|
in.Delim('}')
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func easyjson9de0fcbfEncodeGithubComOlivereElasticV7(out *jwriter.Writer, in bulkIndexRequestCommandOp) {
|
||||||
|
out.RawByte('{')
|
||||||
|
first := true
|
||||||
|
_ = first
|
||||||
|
if in.Index != "" {
|
||||||
|
const prefix string = ",\"_index\":"
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
out.String(string(in.Index))
|
||||||
|
}
|
||||||
|
if in.Id != "" {
|
||||||
|
const prefix string = ",\"_id\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Id))
|
||||||
|
}
|
||||||
|
if in.Type != "" {
|
||||||
|
const prefix string = ",\"_type\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Type))
|
||||||
|
}
|
||||||
|
if in.Parent != "" {
|
||||||
|
const prefix string = ",\"parent\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Parent))
|
||||||
|
}
|
||||||
|
if in.RetryOnConflict != nil {
|
||||||
|
const prefix string = ",\"retry_on_conflict\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int(int(*in.RetryOnConflict))
|
||||||
|
}
|
||||||
|
if in.Routing != "" {
|
||||||
|
const prefix string = ",\"routing\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Routing))
|
||||||
|
}
|
||||||
|
if in.Version != nil {
|
||||||
|
const prefix string = ",\"version\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int64(int64(*in.Version))
|
||||||
|
}
|
||||||
|
if in.VersionType != "" {
|
||||||
|
const prefix string = ",\"version_type\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.VersionType))
|
||||||
|
}
|
||||||
|
if in.Pipeline != "" {
|
||||||
|
const prefix string = ",\"pipeline\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Pipeline))
|
||||||
|
}
|
||||||
|
if in.IfSeqNo != nil {
|
||||||
|
const prefix string = ",\"if_seq_no\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int64(int64(*in.IfSeqNo))
|
||||||
|
}
|
||||||
|
if in.IfPrimaryTerm != nil {
|
||||||
|
const prefix string = ",\"if_primary_term\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int64(int64(*in.IfPrimaryTerm))
|
||||||
|
}
|
||||||
|
out.RawByte('}')
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON supports json.Marshaler interface
|
||||||
|
func (v bulkIndexRequestCommandOp) MarshalJSON() ([]byte, error) {
|
||||||
|
w := jwriter.Writer{}
|
||||||
|
easyjson9de0fcbfEncodeGithubComOlivereElasticV7(&w, v)
|
||||||
|
return w.Buffer.BuildBytes(), w.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalEasyJSON supports easyjson.Marshaler interface
|
||||||
|
func (v bulkIndexRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
easyjson9de0fcbfEncodeGithubComOlivereElasticV7(w, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON supports json.Unmarshaler interface
|
||||||
|
func (v *bulkIndexRequestCommandOp) UnmarshalJSON(data []byte) error {
|
||||||
|
r := jlexer.Lexer{Data: data}
|
||||||
|
easyjson9de0fcbfDecodeGithubComOlivereElasticV7(&r, v)
|
||||||
|
return r.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
|
||||||
|
func (v *bulkIndexRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
|
||||||
|
easyjson9de0fcbfDecodeGithubComOlivereElasticV7(l, v)
|
||||||
|
}
|
||||||
|
func easyjson9de0fcbfDecodeGithubComOlivereElasticV71(in *jlexer.Lexer, out *bulkIndexRequestCommand) {
|
||||||
|
isTopLevel := in.IsStart()
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
} else {
|
||||||
|
in.Delim('{')
|
||||||
|
if !in.IsDelim('}') {
|
||||||
|
*out = make(bulkIndexRequestCommand)
|
||||||
|
} else {
|
||||||
|
*out = nil
|
||||||
|
}
|
||||||
|
for !in.IsDelim('}') {
|
||||||
|
key := string(in.String())
|
||||||
|
in.WantColon()
|
||||||
|
var v1 bulkIndexRequestCommandOp
|
||||||
|
(v1).UnmarshalEasyJSON(in)
|
||||||
|
(*out)[key] = v1
|
||||||
|
in.WantComma()
|
||||||
|
}
|
||||||
|
in.Delim('}')
|
||||||
|
}
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func easyjson9de0fcbfEncodeGithubComOlivereElasticV71(out *jwriter.Writer, in bulkIndexRequestCommand) {
|
||||||
|
if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
|
||||||
|
out.RawString(`null`)
|
||||||
|
} else {
|
||||||
|
out.RawByte('{')
|
||||||
|
v2First := true
|
||||||
|
for v2Name, v2Value := range in {
|
||||||
|
if v2First {
|
||||||
|
v2First = false
|
||||||
|
} else {
|
||||||
|
out.RawByte(',')
|
||||||
|
}
|
||||||
|
out.String(string(v2Name))
|
||||||
|
out.RawByte(':')
|
||||||
|
(v2Value).MarshalEasyJSON(out)
|
||||||
|
}
|
||||||
|
out.RawByte('}')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON supports json.Marshaler interface
|
||||||
|
func (v bulkIndexRequestCommand) MarshalJSON() ([]byte, error) {
|
||||||
|
w := jwriter.Writer{}
|
||||||
|
easyjson9de0fcbfEncodeGithubComOlivereElasticV71(&w, v)
|
||||||
|
return w.Buffer.BuildBytes(), w.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalEasyJSON supports easyjson.Marshaler interface
|
||||||
|
func (v bulkIndexRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
easyjson9de0fcbfEncodeGithubComOlivereElasticV71(w, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON supports json.Unmarshaler interface
|
||||||
|
func (v *bulkIndexRequestCommand) UnmarshalJSON(data []byte) error {
|
||||||
|
r := jlexer.Lexer{Data: data}
|
||||||
|
easyjson9de0fcbfDecodeGithubComOlivereElasticV71(&r, v)
|
||||||
|
return r.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
|
||||||
|
func (v *bulkIndexRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
|
||||||
|
easyjson9de0fcbfDecodeGithubComOlivereElasticV71(l, v)
|
||||||
|
}
|
656
vendor/github.com/olivere/elastic/v7/bulk_processor.go
generated
vendored
Normal file
656
vendor/github.com/olivere/elastic/v7/bulk_processor.go
generated
vendored
Normal file
|
@ -0,0 +1,656 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrBulkItemRetry is returned in BulkProcessor from a worker when
|
||||||
|
// a response item needs to be retried.
|
||||||
|
ErrBulkItemRetry = errors.New("elastic: uncommitted bulk response items")
|
||||||
|
|
||||||
|
defaultRetryItemStatusCodes = []int{408, 429, 503, 507}
|
||||||
|
)
|
||||||
|
|
||||||
|
// BulkProcessorService allows to easily process bulk requests. It allows setting
|
||||||
|
// policies when to flush new bulk requests, e.g. based on a number of actions,
|
||||||
|
// on the size of the actions, and/or to flush periodically. It also allows
|
||||||
|
// to control the number of concurrent bulk requests allowed to be executed
|
||||||
|
// in parallel.
|
||||||
|
//
|
||||||
|
// BulkProcessorService, by default, commits either every 1000 requests or when the
|
||||||
|
// (estimated) size of the bulk requests exceeds 5 MB. However, it does not
|
||||||
|
// commit periodically. BulkProcessorService also does retry by default, using
|
||||||
|
// an exponential backoff algorithm. It also will automatically re-enqueue items
|
||||||
|
// returned with a status of 408, 429, 503 or 507. You can change this
|
||||||
|
// behavior with RetryItemStatusCodes.
|
||||||
|
//
|
||||||
|
// The caller is responsible for setting the index and type on every
|
||||||
|
// bulk request added to BulkProcessorService.
|
||||||
|
//
|
||||||
|
// BulkProcessorService takes ideas from the BulkProcessor of the
|
||||||
|
// Elasticsearch Java API as documented in
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html.
|
||||||
|
type BulkProcessorService struct {
|
||||||
|
c *Client
|
||||||
|
beforeFn BulkBeforeFunc
|
||||||
|
afterFn BulkAfterFunc
|
||||||
|
name string // name of processor
|
||||||
|
numWorkers int // # of workers (>= 1)
|
||||||
|
bulkActions int // # of requests after which to commit
|
||||||
|
bulkSize int // # of bytes after which to commit
|
||||||
|
flushInterval time.Duration // periodic flush interval
|
||||||
|
wantStats bool // indicates whether to gather statistics
|
||||||
|
backoff Backoff // a custom Backoff to use for errors
|
||||||
|
retryItemStatusCodes []int // array of status codes for bulk response line items that may be retried
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBulkProcessorService creates a new BulkProcessorService.
|
||||||
|
func NewBulkProcessorService(client *Client) *BulkProcessorService {
|
||||||
|
return &BulkProcessorService{
|
||||||
|
c: client,
|
||||||
|
numWorkers: 1,
|
||||||
|
bulkActions: 1000,
|
||||||
|
bulkSize: 5 << 20, // 5 MB
|
||||||
|
backoff: NewExponentialBackoff(
|
||||||
|
time.Duration(200)*time.Millisecond,
|
||||||
|
time.Duration(10000)*time.Millisecond,
|
||||||
|
),
|
||||||
|
retryItemStatusCodes: defaultRetryItemStatusCodes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BulkBeforeFunc defines the signature of callbacks that are executed
|
||||||
|
// before a commit to Elasticsearch.
|
||||||
|
type BulkBeforeFunc func(executionId int64, requests []BulkableRequest)
|
||||||
|
|
||||||
|
// BulkAfterFunc defines the signature of callbacks that are executed
|
||||||
|
// after a commit to Elasticsearch. The err parameter signals an error.
|
||||||
|
type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error)
|
||||||
|
|
||||||
|
// Before specifies a function to be executed before bulk requests get committed
|
||||||
|
// to Elasticsearch.
|
||||||
|
func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService {
|
||||||
|
s.beforeFn = fn
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// After specifies a function to be executed when bulk requests have been
|
||||||
|
// committed to Elasticsearch. The After callback executes both when the
|
||||||
|
// commit was successful as well as on failures.
|
||||||
|
func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService {
|
||||||
|
s.afterFn = fn
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name is an optional name to identify this bulk processor.
|
||||||
|
func (s *BulkProcessorService) Name(name string) *BulkProcessorService {
|
||||||
|
s.name = name
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Workers is the number of concurrent workers allowed to be
|
||||||
|
// executed. Defaults to 1 and must be greater or equal to 1.
|
||||||
|
func (s *BulkProcessorService) Workers(num int) *BulkProcessorService {
|
||||||
|
s.numWorkers = num
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BulkActions specifies when to flush based on the number of actions
|
||||||
|
// currently added. Defaults to 1000 and can be set to -1 to be disabled.
|
||||||
|
func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService {
|
||||||
|
s.bulkActions = bulkActions
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BulkSize specifies when to flush based on the size (in bytes) of the actions
|
||||||
|
// currently added. Defaults to 5 MB and can be set to -1 to be disabled.
|
||||||
|
func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService {
|
||||||
|
s.bulkSize = bulkSize
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlushInterval specifies when to flush at the end of the given interval.
|
||||||
|
// This is disabled by default. If you want the bulk processor to
|
||||||
|
// operate completely asynchronously, set both BulkActions and BulkSize to
|
||||||
|
// -1 and set the FlushInterval to a meaningful interval.
|
||||||
|
func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService {
|
||||||
|
s.flushInterval = interval
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats tells bulk processor to gather stats while running.
|
||||||
|
// Use Stats to return the stats. This is disabled by default.
|
||||||
|
func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService {
|
||||||
|
s.wantStats = wantStats
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backoff sets the backoff strategy to use for errors.
|
||||||
|
func (s *BulkProcessorService) Backoff(backoff Backoff) *BulkProcessorService {
|
||||||
|
s.backoff = backoff
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryItemStatusCodes sets an array of status codes that indicate that a bulk
|
||||||
|
// response line item should be retried.
|
||||||
|
func (s *BulkProcessorService) RetryItemStatusCodes(retryItemStatusCodes ...int) *BulkProcessorService {
|
||||||
|
s.retryItemStatusCodes = retryItemStatusCodes
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do creates a new BulkProcessor and starts it.
|
||||||
|
// Consider the BulkProcessor as a running instance that accepts bulk requests
|
||||||
|
// and commits them to Elasticsearch, spreading the work across one or more
|
||||||
|
// workers.
|
||||||
|
//
|
||||||
|
// You can interoperate with the BulkProcessor returned by Do, e.g. Start and
|
||||||
|
// Stop (or Close) it.
|
||||||
|
//
|
||||||
|
// Context is an optional context that is passed into the bulk request
|
||||||
|
// service calls. In contrast to other operations, this context is used in
|
||||||
|
// a long running process. You could use it to pass e.g. loggers, but you
|
||||||
|
// shouldn't use it for cancellation.
|
||||||
|
//
|
||||||
|
// Calling Do several times returns new BulkProcessors. You probably don't
|
||||||
|
// want to do this. BulkProcessorService implements just a builder pattern.
|
||||||
|
func (s *BulkProcessorService) Do(ctx context.Context) (*BulkProcessor, error) {
|
||||||
|
|
||||||
|
retryItemStatusCodes := make(map[int]struct{})
|
||||||
|
for _, code := range s.retryItemStatusCodes {
|
||||||
|
retryItemStatusCodes[code] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := newBulkProcessor(
|
||||||
|
s.c,
|
||||||
|
s.beforeFn,
|
||||||
|
s.afterFn,
|
||||||
|
s.name,
|
||||||
|
s.numWorkers,
|
||||||
|
s.bulkActions,
|
||||||
|
s.bulkSize,
|
||||||
|
s.flushInterval,
|
||||||
|
s.wantStats,
|
||||||
|
s.backoff,
|
||||||
|
retryItemStatusCodes)
|
||||||
|
|
||||||
|
err := p.Start(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Bulk Processor Statistics --
|
||||||
|
|
||||||
|
// BulkProcessorStats contains various statistics of a bulk processor
|
||||||
|
// while it is running. Use the Stats func to return it while running.
|
||||||
|
type BulkProcessorStats struct {
|
||||||
|
Flushed int64 // number of times the flush interval has been invoked
|
||||||
|
Committed int64 // # of times workers committed bulk requests
|
||||||
|
Indexed int64 // # of requests indexed
|
||||||
|
Created int64 // # of requests that ES reported as creates (201)
|
||||||
|
Updated int64 // # of requests that ES reported as updates
|
||||||
|
Deleted int64 // # of requests that ES reported as deletes
|
||||||
|
Succeeded int64 // # of requests that ES reported as successful
|
||||||
|
Failed int64 // # of requests that ES reported as failed
|
||||||
|
|
||||||
|
Workers []*BulkProcessorWorkerStats // stats for each worker
|
||||||
|
}
|
||||||
|
|
||||||
|
// BulkProcessorWorkerStats represents per-worker statistics.
|
||||||
|
type BulkProcessorWorkerStats struct {
|
||||||
|
Queued int64 // # of requests queued in this worker
|
||||||
|
LastDuration time.Duration // duration of last commit
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBulkProcessorStats initializes and returns a BulkProcessorStats struct.
|
||||||
|
func newBulkProcessorStats(workers int) *BulkProcessorStats {
|
||||||
|
stats := &BulkProcessorStats{
|
||||||
|
Workers: make([]*BulkProcessorWorkerStats, workers),
|
||||||
|
}
|
||||||
|
for i := 0; i < workers; i++ {
|
||||||
|
stats.Workers[i] = &BulkProcessorWorkerStats{}
|
||||||
|
}
|
||||||
|
return stats
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *BulkProcessorStats) dup() *BulkProcessorStats {
|
||||||
|
dst := new(BulkProcessorStats)
|
||||||
|
dst.Flushed = st.Flushed
|
||||||
|
dst.Committed = st.Committed
|
||||||
|
dst.Indexed = st.Indexed
|
||||||
|
dst.Created = st.Created
|
||||||
|
dst.Updated = st.Updated
|
||||||
|
dst.Deleted = st.Deleted
|
||||||
|
dst.Succeeded = st.Succeeded
|
||||||
|
dst.Failed = st.Failed
|
||||||
|
for _, src := range st.Workers {
|
||||||
|
dst.Workers = append(dst.Workers, src.dup())
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *BulkProcessorWorkerStats) dup() *BulkProcessorWorkerStats {
|
||||||
|
dst := new(BulkProcessorWorkerStats)
|
||||||
|
dst.Queued = st.Queued
|
||||||
|
dst.LastDuration = st.LastDuration
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Bulk Processor --
|
||||||
|
|
||||||
|
// BulkProcessor encapsulates a task that accepts bulk requests and
|
||||||
|
// orchestrates committing them to Elasticsearch via one or more workers.
|
||||||
|
//
|
||||||
|
// BulkProcessor is returned by setting up a BulkProcessorService and
|
||||||
|
// calling the Do method.
|
||||||
|
type BulkProcessor struct {
|
||||||
|
c *Client
|
||||||
|
beforeFn BulkBeforeFunc
|
||||||
|
afterFn BulkAfterFunc
|
||||||
|
name string
|
||||||
|
bulkActions int
|
||||||
|
bulkSize int
|
||||||
|
numWorkers int
|
||||||
|
executionId int64
|
||||||
|
requestsC chan BulkableRequest
|
||||||
|
workerWg sync.WaitGroup
|
||||||
|
workers []*bulkWorker
|
||||||
|
flushInterval time.Duration
|
||||||
|
flusherStopC chan struct{}
|
||||||
|
wantStats bool
|
||||||
|
retryItemStatusCodes map[int]struct{}
|
||||||
|
backoff Backoff
|
||||||
|
|
||||||
|
startedMu sync.Mutex // guards the following block
|
||||||
|
started bool
|
||||||
|
|
||||||
|
statsMu sync.Mutex // guards the following block
|
||||||
|
stats *BulkProcessorStats
|
||||||
|
|
||||||
|
stopReconnC chan struct{} // channel to signal stop reconnection attempts
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBulkProcessor(
|
||||||
|
client *Client,
|
||||||
|
beforeFn BulkBeforeFunc,
|
||||||
|
afterFn BulkAfterFunc,
|
||||||
|
name string,
|
||||||
|
numWorkers int,
|
||||||
|
bulkActions int,
|
||||||
|
bulkSize int,
|
||||||
|
flushInterval time.Duration,
|
||||||
|
wantStats bool,
|
||||||
|
backoff Backoff,
|
||||||
|
retryItemStatusCodes map[int]struct{}) *BulkProcessor {
|
||||||
|
return &BulkProcessor{
|
||||||
|
c: client,
|
||||||
|
beforeFn: beforeFn,
|
||||||
|
afterFn: afterFn,
|
||||||
|
name: name,
|
||||||
|
numWorkers: numWorkers,
|
||||||
|
bulkActions: bulkActions,
|
||||||
|
bulkSize: bulkSize,
|
||||||
|
flushInterval: flushInterval,
|
||||||
|
wantStats: wantStats,
|
||||||
|
retryItemStatusCodes: retryItemStatusCodes,
|
||||||
|
backoff: backoff,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the bulk processor. If the processor is already started,
|
||||||
|
// nil is returned.
|
||||||
|
func (p *BulkProcessor) Start(ctx context.Context) error {
|
||||||
|
p.startedMu.Lock()
|
||||||
|
defer p.startedMu.Unlock()
|
||||||
|
|
||||||
|
if p.started {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We must have at least one worker.
|
||||||
|
if p.numWorkers < 1 {
|
||||||
|
p.numWorkers = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
p.requestsC = make(chan BulkableRequest)
|
||||||
|
p.executionId = 0
|
||||||
|
p.stats = newBulkProcessorStats(p.numWorkers)
|
||||||
|
p.stopReconnC = make(chan struct{})
|
||||||
|
|
||||||
|
// Create and start up workers.
|
||||||
|
p.workers = make([]*bulkWorker, p.numWorkers)
|
||||||
|
for i := 0; i < p.numWorkers; i++ {
|
||||||
|
p.workerWg.Add(1)
|
||||||
|
p.workers[i] = newBulkWorker(p, i)
|
||||||
|
go p.workers[i].work(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the ticker for flush (if enabled)
|
||||||
|
if int64(p.flushInterval) > 0 {
|
||||||
|
p.flusherStopC = make(chan struct{})
|
||||||
|
go p.flusher(p.flushInterval)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.started = true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop is an alias for Close.
|
||||||
|
func (p *BulkProcessor) Stop() error {
|
||||||
|
return p.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close stops the bulk processor previously started with Do.
|
||||||
|
// If it is already stopped, this is a no-op and nil is returned.
|
||||||
|
//
|
||||||
|
// By implementing Close, BulkProcessor implements the io.Closer interface.
|
||||||
|
func (p *BulkProcessor) Close() error {
|
||||||
|
p.startedMu.Lock()
|
||||||
|
defer p.startedMu.Unlock()
|
||||||
|
|
||||||
|
// Already stopped? Do nothing.
|
||||||
|
if !p.started {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tell connection checkers to stop
|
||||||
|
if p.stopReconnC != nil {
|
||||||
|
close(p.stopReconnC)
|
||||||
|
p.stopReconnC = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop flusher (if enabled)
|
||||||
|
if p.flusherStopC != nil {
|
||||||
|
p.flusherStopC <- struct{}{}
|
||||||
|
<-p.flusherStopC
|
||||||
|
close(p.flusherStopC)
|
||||||
|
p.flusherStopC = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop all workers.
|
||||||
|
close(p.requestsC)
|
||||||
|
p.workerWg.Wait()
|
||||||
|
|
||||||
|
p.started = false
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns the latest bulk processor statistics.
|
||||||
|
// Collecting stats must be enabled first by calling Stats(true) on
|
||||||
|
// the service that created this processor.
|
||||||
|
func (p *BulkProcessor) Stats() BulkProcessorStats {
|
||||||
|
p.statsMu.Lock()
|
||||||
|
defer p.statsMu.Unlock()
|
||||||
|
return *p.stats.dup()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a single request to commit by the BulkProcessorService.
|
||||||
|
//
|
||||||
|
// The caller is responsible for setting the index and type on the request.
|
||||||
|
func (p *BulkProcessor) Add(request BulkableRequest) {
|
||||||
|
p.requestsC <- request
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush manually asks all workers to commit their outstanding requests.
|
||||||
|
// It returns only when all workers acknowledge completion.
|
||||||
|
func (p *BulkProcessor) Flush() error {
|
||||||
|
p.statsMu.Lock()
|
||||||
|
p.stats.Flushed++
|
||||||
|
p.statsMu.Unlock()
|
||||||
|
|
||||||
|
for _, w := range p.workers {
|
||||||
|
w.flushC <- struct{}{}
|
||||||
|
<-w.flushAckC // wait for completion
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// flusher is a single goroutine that periodically asks all workers to
|
||||||
|
// commit their outstanding bulk requests. It is only started if
|
||||||
|
// FlushInterval is greater than 0.
|
||||||
|
func (p *BulkProcessor) flusher(interval time.Duration) {
|
||||||
|
ticker := time.NewTicker(interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C: // Periodic flush
|
||||||
|
p.Flush() // TODO swallow errors here?
|
||||||
|
|
||||||
|
case <-p.flusherStopC:
|
||||||
|
p.flusherStopC <- struct{}{}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Bulk Worker --
|
||||||
|
|
||||||
|
// bulkWorker encapsulates a single worker, running in a goroutine,
|
||||||
|
// receiving bulk requests and eventually committing them to Elasticsearch.
|
||||||
|
// It is strongly bound to a BulkProcessor.
|
||||||
|
type bulkWorker struct {
|
||||||
|
p *BulkProcessor
|
||||||
|
i int
|
||||||
|
bulkActions int
|
||||||
|
bulkSize int
|
||||||
|
service *BulkService
|
||||||
|
flushC chan struct{}
|
||||||
|
flushAckC chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBulkWorker creates a new bulkWorker instance.
|
||||||
|
func newBulkWorker(p *BulkProcessor, i int) *bulkWorker {
|
||||||
|
return &bulkWorker{
|
||||||
|
p: p,
|
||||||
|
i: i,
|
||||||
|
bulkActions: p.bulkActions,
|
||||||
|
bulkSize: p.bulkSize,
|
||||||
|
service: NewBulkService(p.c),
|
||||||
|
flushC: make(chan struct{}),
|
||||||
|
flushAckC: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// work waits for bulk requests and manual flush calls on the respective
|
||||||
|
// channels and is invoked as a goroutine when the bulk processor is started.
|
||||||
|
func (w *bulkWorker) work(ctx context.Context) {
|
||||||
|
defer func() {
|
||||||
|
w.p.workerWg.Done()
|
||||||
|
close(w.flushAckC)
|
||||||
|
close(w.flushC)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var stop bool
|
||||||
|
for !stop {
|
||||||
|
var err error
|
||||||
|
select {
|
||||||
|
case req, open := <-w.p.requestsC:
|
||||||
|
if open {
|
||||||
|
// Received a new request
|
||||||
|
if _, err = req.Source(); err == nil {
|
||||||
|
w.service.Add(req)
|
||||||
|
if w.commitRequired() {
|
||||||
|
err = w.commit(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Channel closed: Stop.
|
||||||
|
stop = true
|
||||||
|
if w.service.NumberOfActions() > 0 {
|
||||||
|
err = w.commit(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-w.flushC:
|
||||||
|
// Commit outstanding requests
|
||||||
|
if w.service.NumberOfActions() > 0 {
|
||||||
|
err = w.commit(ctx)
|
||||||
|
}
|
||||||
|
w.flushAckC <- struct{}{}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
w.p.c.errorf("elastic: bulk processor %q was unable to perform work: %v", w.p.name, err)
|
||||||
|
if !stop {
|
||||||
|
waitForActive := func() {
|
||||||
|
// Add back pressure to prevent Add calls from filling up the request queue
|
||||||
|
ready := make(chan struct{})
|
||||||
|
go w.waitForActiveConnection(ready)
|
||||||
|
<-ready
|
||||||
|
}
|
||||||
|
if _, ok := err.(net.Error); ok {
|
||||||
|
waitForActive()
|
||||||
|
} else if IsConnErr(err) {
|
||||||
|
waitForActive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit commits the bulk requests in the given service,
|
||||||
|
// invoking callbacks as specified.
|
||||||
|
func (w *bulkWorker) commit(ctx context.Context) error {
|
||||||
|
var res *BulkResponse
|
||||||
|
|
||||||
|
// commitFunc will commit bulk requests and, on failure, be retried
|
||||||
|
// via exponential backoff
|
||||||
|
commitFunc := func() error {
|
||||||
|
var err error
|
||||||
|
// Save requests because they will be reset in service.Do
|
||||||
|
reqs := w.service.requests
|
||||||
|
res, err = w.service.Do(ctx)
|
||||||
|
if err == nil {
|
||||||
|
// Overall bulk request was OK. But each bulk response item also has a status
|
||||||
|
if w.p.retryItemStatusCodes != nil && len(w.p.retryItemStatusCodes) > 0 {
|
||||||
|
// Check res.Items since some might be soft failures
|
||||||
|
if res.Items != nil && res.Errors {
|
||||||
|
// res.Items will be 1 to 1 with reqs in same order
|
||||||
|
for i, item := range res.Items {
|
||||||
|
for _, result := range item {
|
||||||
|
if _, found := w.p.retryItemStatusCodes[result.Status]; found {
|
||||||
|
w.service.Add(reqs[i])
|
||||||
|
if err == nil {
|
||||||
|
err = ErrBulkItemRetry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// notifyFunc will be called if retry fails
|
||||||
|
notifyFunc := func(err error) {
|
||||||
|
w.p.c.errorf("elastic: bulk processor %q failed but may retry: %v", w.p.name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
id := atomic.AddInt64(&w.p.executionId, 1)
|
||||||
|
|
||||||
|
// Update # documents in queue before eventual retries
|
||||||
|
w.p.statsMu.Lock()
|
||||||
|
if w.p.wantStats {
|
||||||
|
w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests))
|
||||||
|
}
|
||||||
|
w.p.statsMu.Unlock()
|
||||||
|
|
||||||
|
// Save requests because they will be reset in commitFunc
|
||||||
|
reqs := w.service.requests
|
||||||
|
|
||||||
|
// Invoke before callback
|
||||||
|
if w.p.beforeFn != nil {
|
||||||
|
w.p.beforeFn(id, reqs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit bulk requests
|
||||||
|
err := RetryNotify(commitFunc, w.p.backoff, notifyFunc)
|
||||||
|
w.updateStats(res)
|
||||||
|
if err != nil {
|
||||||
|
w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invoke after callback
|
||||||
|
if w.p.afterFn != nil {
|
||||||
|
w.p.afterFn(id, reqs, res, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bulkWorker) waitForActiveConnection(ready chan<- struct{}) {
|
||||||
|
defer close(ready)
|
||||||
|
|
||||||
|
t := time.NewTicker(5 * time.Second)
|
||||||
|
defer t.Stop()
|
||||||
|
|
||||||
|
client := w.p.c
|
||||||
|
stopReconnC := w.p.stopReconnC
|
||||||
|
w.p.c.errorf("elastic: bulk processor %q is waiting for an active connection", w.p.name)
|
||||||
|
|
||||||
|
// loop until a health check finds at least 1 active connection or the reconnection channel is closed
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case _, ok := <-stopReconnC:
|
||||||
|
if !ok {
|
||||||
|
w.p.c.errorf("elastic: bulk processor %q active connection check interrupted", w.p.name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-t.C:
|
||||||
|
client.healthcheck(context.Background(), 3*time.Second, true)
|
||||||
|
if client.mustActiveConn() == nil {
|
||||||
|
// found an active connection
|
||||||
|
// exit and signal done to the WaitGroup
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bulkWorker) updateStats(res *BulkResponse) {
|
||||||
|
// Update stats
|
||||||
|
if res != nil {
|
||||||
|
w.p.statsMu.Lock()
|
||||||
|
if w.p.wantStats {
|
||||||
|
w.p.stats.Committed++
|
||||||
|
if res != nil {
|
||||||
|
w.p.stats.Indexed += int64(len(res.Indexed()))
|
||||||
|
w.p.stats.Created += int64(len(res.Created()))
|
||||||
|
w.p.stats.Updated += int64(len(res.Updated()))
|
||||||
|
w.p.stats.Deleted += int64(len(res.Deleted()))
|
||||||
|
w.p.stats.Succeeded += int64(len(res.Succeeded()))
|
||||||
|
w.p.stats.Failed += int64(len(res.Failed()))
|
||||||
|
}
|
||||||
|
w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests))
|
||||||
|
w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond
|
||||||
|
}
|
||||||
|
w.p.statsMu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// commitRequired returns true if the service has to commit its
|
||||||
|
// bulk requests. This can be either because the number of actions
|
||||||
|
// or the estimated size in bytes is larger than specified in the
|
||||||
|
// BulkProcessorService.
|
||||||
|
func (w *bulkWorker) commitRequired() bool {
|
||||||
|
if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
17
vendor/github.com/olivere/elastic/v7/bulk_request.go
generated
vendored
Normal file
17
vendor/github.com/olivere/elastic/v7/bulk_request.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// -- Bulkable request (index/update/delete) --
|
||||||
|
|
||||||
|
// BulkableRequest is a generic interface to bulkable requests.
|
||||||
|
type BulkableRequest interface {
|
||||||
|
fmt.Stringer
|
||||||
|
Source() ([]string, error)
|
||||||
|
}
|
334
vendor/github.com/olivere/elastic/v7/bulk_update_request.go
generated
vendored
Normal file
334
vendor/github.com/olivere/elastic/v7/bulk_update_request.go
generated
vendored
Normal file
|
@ -0,0 +1,334 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
//go:generate easyjson bulk_update_request.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BulkUpdateRequest is a request to update a document in Elasticsearch.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html
|
||||||
|
// for details.
|
||||||
|
type BulkUpdateRequest struct {
|
||||||
|
BulkableRequest
|
||||||
|
index string
|
||||||
|
typ string
|
||||||
|
id string
|
||||||
|
|
||||||
|
routing string
|
||||||
|
parent string
|
||||||
|
script *Script
|
||||||
|
scriptedUpsert *bool
|
||||||
|
version int64 // default is MATCH_ANY
|
||||||
|
versionType string // default is "internal"
|
||||||
|
retryOnConflict *int
|
||||||
|
upsert interface{}
|
||||||
|
docAsUpsert *bool
|
||||||
|
detectNoop *bool
|
||||||
|
doc interface{}
|
||||||
|
returnSource *bool
|
||||||
|
ifSeqNo *int64
|
||||||
|
ifPrimaryTerm *int64
|
||||||
|
|
||||||
|
source []string
|
||||||
|
|
||||||
|
useEasyJSON bool
|
||||||
|
}
|
||||||
|
|
||||||
|
//easyjson:json
|
||||||
|
type bulkUpdateRequestCommand map[string]bulkUpdateRequestCommandOp
|
||||||
|
|
||||||
|
//easyjson:json
|
||||||
|
type bulkUpdateRequestCommandOp struct {
|
||||||
|
Index string `json:"_index,omitempty"`
|
||||||
|
Type string `json:"_type,omitempty"`
|
||||||
|
Id string `json:"_id,omitempty"`
|
||||||
|
Parent string `json:"parent,omitempty"`
|
||||||
|
// RetryOnConflict is "_retry_on_conflict" for 6.0 and "retry_on_conflict" for 6.1+.
|
||||||
|
RetryOnConflict *int `json:"retry_on_conflict,omitempty"`
|
||||||
|
Routing string `json:"routing,omitempty"`
|
||||||
|
Version int64 `json:"version,omitempty"`
|
||||||
|
VersionType string `json:"version_type,omitempty"`
|
||||||
|
IfSeqNo *int64 `json:"if_seq_no,omitempty"`
|
||||||
|
IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//easyjson:json
|
||||||
|
type bulkUpdateRequestCommandData struct {
|
||||||
|
DetectNoop *bool `json:"detect_noop,omitempty"`
|
||||||
|
Doc interface{} `json:"doc,omitempty"`
|
||||||
|
DocAsUpsert *bool `json:"doc_as_upsert,omitempty"`
|
||||||
|
Script interface{} `json:"script,omitempty"`
|
||||||
|
ScriptedUpsert *bool `json:"scripted_upsert,omitempty"`
|
||||||
|
Upsert interface{} `json:"upsert,omitempty"`
|
||||||
|
Source *bool `json:"_source,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBulkUpdateRequest returns a new BulkUpdateRequest.
|
||||||
|
func NewBulkUpdateRequest() *BulkUpdateRequest {
|
||||||
|
return &BulkUpdateRequest{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseEasyJSON is an experimental setting that enables serialization
|
||||||
|
// with github.com/mailru/easyjson, which should in faster serialization
|
||||||
|
// time and less allocations, but removed compatibility with encoding/json,
|
||||||
|
// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
|
||||||
|
// for details. This setting is disabled by default.
|
||||||
|
func (r *BulkUpdateRequest) UseEasyJSON(enable bool) *BulkUpdateRequest {
|
||||||
|
r.useEasyJSON = enable
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index specifies the Elasticsearch index to use for this update request.
|
||||||
|
// If unspecified, the index set on the BulkService will be used.
|
||||||
|
func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
|
||||||
|
r.index = index
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type specifies the Elasticsearch type to use for this update request.
|
||||||
|
// If unspecified, the type set on the BulkService will be used.
|
||||||
|
func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
|
||||||
|
r.typ = typ
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id specifies the identifier of the document to update.
|
||||||
|
func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
|
||||||
|
r.id = id
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing specifies a routing value for the request.
|
||||||
|
func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
|
||||||
|
r.routing = routing
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent specifies the identifier of the parent document (if available).
|
||||||
|
func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
|
||||||
|
r.parent = parent
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Script specifies an update script.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html#bulk-update
|
||||||
|
// and https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-scripting.html
|
||||||
|
// for details.
|
||||||
|
func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest {
|
||||||
|
r.script = script
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScripedUpsert specifies if your script will run regardless of
|
||||||
|
// whether the document exists or not.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-update.html#_literal_scripted_upsert_literal
|
||||||
|
func (r *BulkUpdateRequest) ScriptedUpsert(upsert bool) *BulkUpdateRequest {
|
||||||
|
r.scriptedUpsert = &upsert
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryOnConflict specifies how often to retry in case of a version conflict.
|
||||||
|
func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
|
||||||
|
r.retryOnConflict = &retryOnConflict
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version indicates the version of the document as part of an optimistic
|
||||||
|
// concurrency model.
|
||||||
|
func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
|
||||||
|
r.version = version
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionType can be "internal" (default), "external", "external_gte",
|
||||||
|
// or "external_gt".
|
||||||
|
func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
|
||||||
|
r.versionType = versionType
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfSeqNo indicates to only perform the index operation if the last
|
||||||
|
// operation that has changed the document has the specified sequence number.
|
||||||
|
func (r *BulkUpdateRequest) IfSeqNo(ifSeqNo int64) *BulkUpdateRequest {
|
||||||
|
r.ifSeqNo = &ifSeqNo
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfPrimaryTerm indicates to only perform the index operation if the
|
||||||
|
// last operation that has changed the document has the specified primary term.
|
||||||
|
func (r *BulkUpdateRequest) IfPrimaryTerm(ifPrimaryTerm int64) *BulkUpdateRequest {
|
||||||
|
r.ifPrimaryTerm = &ifPrimaryTerm
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Doc specifies the updated document.
|
||||||
|
func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
|
||||||
|
r.doc = doc
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// DocAsUpsert indicates whether the contents of Doc should be used as
|
||||||
|
// the Upsert value.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-update.html#_literal_doc_as_upsert_literal
|
||||||
|
// for details.
|
||||||
|
func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
|
||||||
|
r.docAsUpsert = &docAsUpsert
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetectNoop specifies whether changes that don't affect the document
|
||||||
|
// should be ignored (true) or unignored (false). This is enabled by default
|
||||||
|
// in Elasticsearch.
|
||||||
|
func (r *BulkUpdateRequest) DetectNoop(detectNoop bool) *BulkUpdateRequest {
|
||||||
|
r.detectNoop = &detectNoop
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upsert specifies the document to use for upserts. It will be used for
|
||||||
|
// create if the original document does not exist.
|
||||||
|
func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
|
||||||
|
r.upsert = doc
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReturnSource specifies whether Elasticsearch should return the source
|
||||||
|
// after the update. In the request, this responds to the `_source` field.
|
||||||
|
// It is false by default.
|
||||||
|
func (r *BulkUpdateRequest) ReturnSource(source bool) *BulkUpdateRequest {
|
||||||
|
r.returnSource = &source
|
||||||
|
r.source = nil
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the on-wire representation of the update request,
|
||||||
|
// concatenated as a single string.
|
||||||
|
func (r *BulkUpdateRequest) String() string {
|
||||||
|
lines, err := r.Source()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("error: %v", err)
|
||||||
|
}
|
||||||
|
return strings.Join(lines, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the on-wire representation of the update request,
|
||||||
|
// split into an action-and-meta-data line and an (optional) source line.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html
|
||||||
|
// for details.
|
||||||
|
func (r *BulkUpdateRequest) Source() ([]string, error) {
|
||||||
|
// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
|
||||||
|
// { "doc" : { "field1" : "value1", ... } }
|
||||||
|
// or
|
||||||
|
// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
|
||||||
|
// { "script" : { ... } }
|
||||||
|
|
||||||
|
if r.source != nil {
|
||||||
|
return r.source, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lines := make([]string, 2)
|
||||||
|
|
||||||
|
// "update" ...
|
||||||
|
updateCommand := bulkUpdateRequestCommandOp{
|
||||||
|
Index: r.index,
|
||||||
|
Type: r.typ,
|
||||||
|
Id: r.id,
|
||||||
|
Routing: r.routing,
|
||||||
|
Parent: r.parent,
|
||||||
|
Version: r.version,
|
||||||
|
VersionType: r.versionType,
|
||||||
|
RetryOnConflict: r.retryOnConflict,
|
||||||
|
IfSeqNo: r.ifSeqNo,
|
||||||
|
IfPrimaryTerm: r.ifPrimaryTerm,
|
||||||
|
}
|
||||||
|
command := bulkUpdateRequestCommand{
|
||||||
|
"update": updateCommand,
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var body []byte
|
||||||
|
if r.useEasyJSON {
|
||||||
|
// easyjson
|
||||||
|
body, err = command.MarshalJSON()
|
||||||
|
} else {
|
||||||
|
// encoding/json
|
||||||
|
body, err = json.Marshal(command)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lines[0] = string(body)
|
||||||
|
|
||||||
|
// 2nd line: {"doc" : { ... }} or {"script": {...}}
|
||||||
|
var doc interface{}
|
||||||
|
if r.doc != nil {
|
||||||
|
// Automatically serialize strings as raw JSON
|
||||||
|
switch t := r.doc.(type) {
|
||||||
|
default:
|
||||||
|
doc = r.doc
|
||||||
|
case string:
|
||||||
|
if len(t) > 0 {
|
||||||
|
doc = json.RawMessage(t)
|
||||||
|
}
|
||||||
|
case *string:
|
||||||
|
if t != nil && len(*t) > 0 {
|
||||||
|
doc = json.RawMessage(*t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data := bulkUpdateRequestCommandData{
|
||||||
|
DocAsUpsert: r.docAsUpsert,
|
||||||
|
DetectNoop: r.detectNoop,
|
||||||
|
Upsert: r.upsert,
|
||||||
|
ScriptedUpsert: r.scriptedUpsert,
|
||||||
|
Doc: doc,
|
||||||
|
Source: r.returnSource,
|
||||||
|
}
|
||||||
|
if r.script != nil {
|
||||||
|
script, err := r.script.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data.Script = script
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.useEasyJSON {
|
||||||
|
// easyjson
|
||||||
|
body, err = data.MarshalJSON()
|
||||||
|
} else {
|
||||||
|
// encoding/json
|
||||||
|
body, err = json.Marshal(data)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lines[1] = string(body)
|
||||||
|
|
||||||
|
r.source = lines
|
||||||
|
return lines, nil
|
||||||
|
}
|
493
vendor/github.com/olivere/elastic/v7/bulk_update_request_easyjson.go
generated
vendored
Normal file
493
vendor/github.com/olivere/elastic/v7/bulk_update_request_easyjson.go
generated
vendored
Normal file
|
@ -0,0 +1,493 @@
|
||||||
|
// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
json "encoding/json"
|
||||||
|
easyjson "github.com/mailru/easyjson"
|
||||||
|
jlexer "github.com/mailru/easyjson/jlexer"
|
||||||
|
jwriter "github.com/mailru/easyjson/jwriter"
|
||||||
|
)
|
||||||
|
|
||||||
|
// suppress unused package warning
|
||||||
|
var (
|
||||||
|
_ *json.RawMessage
|
||||||
|
_ *jlexer.Lexer
|
||||||
|
_ *jwriter.Writer
|
||||||
|
_ easyjson.Marshaler
|
||||||
|
)
|
||||||
|
|
||||||
|
func easyjson1ed00e60DecodeGithubComOlivereElasticV7(in *jlexer.Lexer, out *bulkUpdateRequestCommandOp) {
|
||||||
|
isTopLevel := in.IsStart()
|
||||||
|
if in.IsNull() {
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
in.Skip()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
in.Delim('{')
|
||||||
|
for !in.IsDelim('}') {
|
||||||
|
key := in.UnsafeString()
|
||||||
|
in.WantColon()
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
in.WantComma()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch key {
|
||||||
|
case "_index":
|
||||||
|
out.Index = string(in.String())
|
||||||
|
case "_type":
|
||||||
|
out.Type = string(in.String())
|
||||||
|
case "_id":
|
||||||
|
out.Id = string(in.String())
|
||||||
|
case "parent":
|
||||||
|
out.Parent = string(in.String())
|
||||||
|
case "retry_on_conflict":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.RetryOnConflict = nil
|
||||||
|
} else {
|
||||||
|
if out.RetryOnConflict == nil {
|
||||||
|
out.RetryOnConflict = new(int)
|
||||||
|
}
|
||||||
|
*out.RetryOnConflict = int(in.Int())
|
||||||
|
}
|
||||||
|
case "routing":
|
||||||
|
out.Routing = string(in.String())
|
||||||
|
case "version":
|
||||||
|
out.Version = int64(in.Int64())
|
||||||
|
case "version_type":
|
||||||
|
out.VersionType = string(in.String())
|
||||||
|
case "if_seq_no":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.IfSeqNo = nil
|
||||||
|
} else {
|
||||||
|
if out.IfSeqNo == nil {
|
||||||
|
out.IfSeqNo = new(int64)
|
||||||
|
}
|
||||||
|
*out.IfSeqNo = int64(in.Int64())
|
||||||
|
}
|
||||||
|
case "if_primary_term":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.IfPrimaryTerm = nil
|
||||||
|
} else {
|
||||||
|
if out.IfPrimaryTerm == nil {
|
||||||
|
out.IfPrimaryTerm = new(int64)
|
||||||
|
}
|
||||||
|
*out.IfPrimaryTerm = int64(in.Int64())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
in.SkipRecursive()
|
||||||
|
}
|
||||||
|
in.WantComma()
|
||||||
|
}
|
||||||
|
in.Delim('}')
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func easyjson1ed00e60EncodeGithubComOlivereElasticV7(out *jwriter.Writer, in bulkUpdateRequestCommandOp) {
|
||||||
|
out.RawByte('{')
|
||||||
|
first := true
|
||||||
|
_ = first
|
||||||
|
if in.Index != "" {
|
||||||
|
const prefix string = ",\"_index\":"
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
out.String(string(in.Index))
|
||||||
|
}
|
||||||
|
if in.Type != "" {
|
||||||
|
const prefix string = ",\"_type\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Type))
|
||||||
|
}
|
||||||
|
if in.Id != "" {
|
||||||
|
const prefix string = ",\"_id\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Id))
|
||||||
|
}
|
||||||
|
if in.Parent != "" {
|
||||||
|
const prefix string = ",\"parent\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Parent))
|
||||||
|
}
|
||||||
|
if in.RetryOnConflict != nil {
|
||||||
|
const prefix string = ",\"retry_on_conflict\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int(int(*in.RetryOnConflict))
|
||||||
|
}
|
||||||
|
if in.Routing != "" {
|
||||||
|
const prefix string = ",\"routing\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.Routing))
|
||||||
|
}
|
||||||
|
if in.Version != 0 {
|
||||||
|
const prefix string = ",\"version\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int64(int64(in.Version))
|
||||||
|
}
|
||||||
|
if in.VersionType != "" {
|
||||||
|
const prefix string = ",\"version_type\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.String(string(in.VersionType))
|
||||||
|
}
|
||||||
|
if in.IfSeqNo != nil {
|
||||||
|
const prefix string = ",\"if_seq_no\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int64(int64(*in.IfSeqNo))
|
||||||
|
}
|
||||||
|
if in.IfPrimaryTerm != nil {
|
||||||
|
const prefix string = ",\"if_primary_term\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Int64(int64(*in.IfPrimaryTerm))
|
||||||
|
}
|
||||||
|
out.RawByte('}')
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON supports json.Marshaler interface
|
||||||
|
func (v bulkUpdateRequestCommandOp) MarshalJSON() ([]byte, error) {
|
||||||
|
w := jwriter.Writer{}
|
||||||
|
easyjson1ed00e60EncodeGithubComOlivereElasticV7(&w, v)
|
||||||
|
return w.Buffer.BuildBytes(), w.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalEasyJSON supports easyjson.Marshaler interface
|
||||||
|
func (v bulkUpdateRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
easyjson1ed00e60EncodeGithubComOlivereElasticV7(w, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON supports json.Unmarshaler interface
|
||||||
|
func (v *bulkUpdateRequestCommandOp) UnmarshalJSON(data []byte) error {
|
||||||
|
r := jlexer.Lexer{Data: data}
|
||||||
|
easyjson1ed00e60DecodeGithubComOlivereElasticV7(&r, v)
|
||||||
|
return r.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
|
||||||
|
func (v *bulkUpdateRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
|
||||||
|
easyjson1ed00e60DecodeGithubComOlivereElasticV7(l, v)
|
||||||
|
}
|
||||||
|
func easyjson1ed00e60DecodeGithubComOlivereElasticV71(in *jlexer.Lexer, out *bulkUpdateRequestCommandData) {
|
||||||
|
isTopLevel := in.IsStart()
|
||||||
|
if in.IsNull() {
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
in.Skip()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
in.Delim('{')
|
||||||
|
for !in.IsDelim('}') {
|
||||||
|
key := in.UnsafeString()
|
||||||
|
in.WantColon()
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
in.WantComma()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch key {
|
||||||
|
case "detect_noop":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.DetectNoop = nil
|
||||||
|
} else {
|
||||||
|
if out.DetectNoop == nil {
|
||||||
|
out.DetectNoop = new(bool)
|
||||||
|
}
|
||||||
|
*out.DetectNoop = bool(in.Bool())
|
||||||
|
}
|
||||||
|
case "doc":
|
||||||
|
if m, ok := out.Doc.(easyjson.Unmarshaler); ok {
|
||||||
|
m.UnmarshalEasyJSON(in)
|
||||||
|
} else if m, ok := out.Doc.(json.Unmarshaler); ok {
|
||||||
|
_ = m.UnmarshalJSON(in.Raw())
|
||||||
|
} else {
|
||||||
|
out.Doc = in.Interface()
|
||||||
|
}
|
||||||
|
case "doc_as_upsert":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.DocAsUpsert = nil
|
||||||
|
} else {
|
||||||
|
if out.DocAsUpsert == nil {
|
||||||
|
out.DocAsUpsert = new(bool)
|
||||||
|
}
|
||||||
|
*out.DocAsUpsert = bool(in.Bool())
|
||||||
|
}
|
||||||
|
case "script":
|
||||||
|
if m, ok := out.Script.(easyjson.Unmarshaler); ok {
|
||||||
|
m.UnmarshalEasyJSON(in)
|
||||||
|
} else if m, ok := out.Script.(json.Unmarshaler); ok {
|
||||||
|
_ = m.UnmarshalJSON(in.Raw())
|
||||||
|
} else {
|
||||||
|
out.Script = in.Interface()
|
||||||
|
}
|
||||||
|
case "scripted_upsert":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.ScriptedUpsert = nil
|
||||||
|
} else {
|
||||||
|
if out.ScriptedUpsert == nil {
|
||||||
|
out.ScriptedUpsert = new(bool)
|
||||||
|
}
|
||||||
|
*out.ScriptedUpsert = bool(in.Bool())
|
||||||
|
}
|
||||||
|
case "upsert":
|
||||||
|
if m, ok := out.Upsert.(easyjson.Unmarshaler); ok {
|
||||||
|
m.UnmarshalEasyJSON(in)
|
||||||
|
} else if m, ok := out.Upsert.(json.Unmarshaler); ok {
|
||||||
|
_ = m.UnmarshalJSON(in.Raw())
|
||||||
|
} else {
|
||||||
|
out.Upsert = in.Interface()
|
||||||
|
}
|
||||||
|
case "_source":
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
out.Source = nil
|
||||||
|
} else {
|
||||||
|
if out.Source == nil {
|
||||||
|
out.Source = new(bool)
|
||||||
|
}
|
||||||
|
*out.Source = bool(in.Bool())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
in.SkipRecursive()
|
||||||
|
}
|
||||||
|
in.WantComma()
|
||||||
|
}
|
||||||
|
in.Delim('}')
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func easyjson1ed00e60EncodeGithubComOlivereElasticV71(out *jwriter.Writer, in bulkUpdateRequestCommandData) {
|
||||||
|
out.RawByte('{')
|
||||||
|
first := true
|
||||||
|
_ = first
|
||||||
|
if in.DetectNoop != nil {
|
||||||
|
const prefix string = ",\"detect_noop\":"
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
out.Bool(bool(*in.DetectNoop))
|
||||||
|
}
|
||||||
|
if in.Doc != nil {
|
||||||
|
const prefix string = ",\"doc\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
if m, ok := in.Doc.(easyjson.Marshaler); ok {
|
||||||
|
m.MarshalEasyJSON(out)
|
||||||
|
} else if m, ok := in.Doc.(json.Marshaler); ok {
|
||||||
|
out.Raw(m.MarshalJSON())
|
||||||
|
} else {
|
||||||
|
out.Raw(json.Marshal(in.Doc))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.DocAsUpsert != nil {
|
||||||
|
const prefix string = ",\"doc_as_upsert\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Bool(bool(*in.DocAsUpsert))
|
||||||
|
}
|
||||||
|
if in.Script != nil {
|
||||||
|
const prefix string = ",\"script\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
if m, ok := in.Script.(easyjson.Marshaler); ok {
|
||||||
|
m.MarshalEasyJSON(out)
|
||||||
|
} else if m, ok := in.Script.(json.Marshaler); ok {
|
||||||
|
out.Raw(m.MarshalJSON())
|
||||||
|
} else {
|
||||||
|
out.Raw(json.Marshal(in.Script))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.ScriptedUpsert != nil {
|
||||||
|
const prefix string = ",\"scripted_upsert\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Bool(bool(*in.ScriptedUpsert))
|
||||||
|
}
|
||||||
|
if in.Upsert != nil {
|
||||||
|
const prefix string = ",\"upsert\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
if m, ok := in.Upsert.(easyjson.Marshaler); ok {
|
||||||
|
m.MarshalEasyJSON(out)
|
||||||
|
} else if m, ok := in.Upsert.(json.Marshaler); ok {
|
||||||
|
out.Raw(m.MarshalJSON())
|
||||||
|
} else {
|
||||||
|
out.Raw(json.Marshal(in.Upsert))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.Source != nil {
|
||||||
|
const prefix string = ",\"_source\":"
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
out.RawString(prefix[1:])
|
||||||
|
} else {
|
||||||
|
out.RawString(prefix)
|
||||||
|
}
|
||||||
|
out.Bool(bool(*in.Source))
|
||||||
|
}
|
||||||
|
out.RawByte('}')
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON supports json.Marshaler interface
|
||||||
|
func (v bulkUpdateRequestCommandData) MarshalJSON() ([]byte, error) {
|
||||||
|
w := jwriter.Writer{}
|
||||||
|
easyjson1ed00e60EncodeGithubComOlivereElasticV71(&w, v)
|
||||||
|
return w.Buffer.BuildBytes(), w.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalEasyJSON supports easyjson.Marshaler interface
|
||||||
|
func (v bulkUpdateRequestCommandData) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
easyjson1ed00e60EncodeGithubComOlivereElasticV71(w, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON supports json.Unmarshaler interface
|
||||||
|
func (v *bulkUpdateRequestCommandData) UnmarshalJSON(data []byte) error {
|
||||||
|
r := jlexer.Lexer{Data: data}
|
||||||
|
easyjson1ed00e60DecodeGithubComOlivereElasticV71(&r, v)
|
||||||
|
return r.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
|
||||||
|
func (v *bulkUpdateRequestCommandData) UnmarshalEasyJSON(l *jlexer.Lexer) {
|
||||||
|
easyjson1ed00e60DecodeGithubComOlivereElasticV71(l, v)
|
||||||
|
}
|
||||||
|
func easyjson1ed00e60DecodeGithubComOlivereElasticV72(in *jlexer.Lexer, out *bulkUpdateRequestCommand) {
|
||||||
|
isTopLevel := in.IsStart()
|
||||||
|
if in.IsNull() {
|
||||||
|
in.Skip()
|
||||||
|
} else {
|
||||||
|
in.Delim('{')
|
||||||
|
if !in.IsDelim('}') {
|
||||||
|
*out = make(bulkUpdateRequestCommand)
|
||||||
|
} else {
|
||||||
|
*out = nil
|
||||||
|
}
|
||||||
|
for !in.IsDelim('}') {
|
||||||
|
key := string(in.String())
|
||||||
|
in.WantColon()
|
||||||
|
var v1 bulkUpdateRequestCommandOp
|
||||||
|
(v1).UnmarshalEasyJSON(in)
|
||||||
|
(*out)[key] = v1
|
||||||
|
in.WantComma()
|
||||||
|
}
|
||||||
|
in.Delim('}')
|
||||||
|
}
|
||||||
|
if isTopLevel {
|
||||||
|
in.Consumed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func easyjson1ed00e60EncodeGithubComOlivereElasticV72(out *jwriter.Writer, in bulkUpdateRequestCommand) {
|
||||||
|
if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
|
||||||
|
out.RawString(`null`)
|
||||||
|
} else {
|
||||||
|
out.RawByte('{')
|
||||||
|
v2First := true
|
||||||
|
for v2Name, v2Value := range in {
|
||||||
|
if v2First {
|
||||||
|
v2First = false
|
||||||
|
} else {
|
||||||
|
out.RawByte(',')
|
||||||
|
}
|
||||||
|
out.String(string(v2Name))
|
||||||
|
out.RawByte(':')
|
||||||
|
(v2Value).MarshalEasyJSON(out)
|
||||||
|
}
|
||||||
|
out.RawByte('}')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON supports json.Marshaler interface
|
||||||
|
func (v bulkUpdateRequestCommand) MarshalJSON() ([]byte, error) {
|
||||||
|
w := jwriter.Writer{}
|
||||||
|
easyjson1ed00e60EncodeGithubComOlivereElasticV72(&w, v)
|
||||||
|
return w.Buffer.BuildBytes(), w.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalEasyJSON supports easyjson.Marshaler interface
|
||||||
|
func (v bulkUpdateRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
easyjson1ed00e60EncodeGithubComOlivereElasticV72(w, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON supports json.Unmarshaler interface
|
||||||
|
func (v *bulkUpdateRequestCommand) UnmarshalJSON(data []byte) error {
|
||||||
|
r := jlexer.Lexer{Data: data}
|
||||||
|
easyjson1ed00e60DecodeGithubComOlivereElasticV72(&r, v)
|
||||||
|
return r.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
|
||||||
|
func (v *bulkUpdateRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
|
||||||
|
easyjson1ed00e60DecodeGithubComOlivereElasticV72(l, v)
|
||||||
|
}
|
34
vendor/github.com/olivere/elastic/v7/canonicalize.go
generated
vendored
Normal file
34
vendor/github.com/olivere/elastic/v7/canonicalize.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import "net/url"
|
||||||
|
|
||||||
|
// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
|
||||||
|
// remove anything but scheme, userinfo, host, path, and port.
|
||||||
|
// It also removes all trailing slashes. Invalid URLs or URLs that do not
|
||||||
|
// use protocol http or https are skipped.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200
|
||||||
|
// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1
|
||||||
|
func canonicalize(rawurls ...string) []string {
|
||||||
|
var canonicalized []string
|
||||||
|
for _, rawurl := range rawurls {
|
||||||
|
u, err := url.Parse(rawurl)
|
||||||
|
if err == nil {
|
||||||
|
if u.Scheme == "http" || u.Scheme == "https" {
|
||||||
|
// Trim trailing slashes
|
||||||
|
for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' {
|
||||||
|
u.Path = u.Path[0 : len(u.Path)-1]
|
||||||
|
}
|
||||||
|
u.Fragment = ""
|
||||||
|
u.RawQuery = ""
|
||||||
|
canonicalized = append(canonicalized, u.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return canonicalized
|
||||||
|
}
|
222
vendor/github.com/olivere/elastic/v7/cat_aliases.go
generated
vendored
Normal file
222
vendor/github.com/olivere/elastic/v7/cat_aliases.go
generated
vendored
Normal file
|
@ -0,0 +1,222 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CatAliasesService shows information about currently configured aliases
|
||||||
|
// to indices including filter and routing infos.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-aliases.html
|
||||||
|
// for details.
|
||||||
|
type CatAliasesService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
local *bool
|
||||||
|
masterTimeout string
|
||||||
|
aliases []string
|
||||||
|
columns []string
|
||||||
|
sort []string // list of columns for sort order
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCatAliasesService creates a new CatAliasesService.
|
||||||
|
func NewCatAliasesService(client *Client) *CatAliasesService {
|
||||||
|
return &CatAliasesService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *CatAliasesService) Pretty(pretty bool) *CatAliasesService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *CatAliasesService) Human(human bool) *CatAliasesService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *CatAliasesService) ErrorTrace(errorTrace bool) *CatAliasesService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *CatAliasesService) FilterPath(filterPath ...string) *CatAliasesService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *CatAliasesService) Header(name string, value string) *CatAliasesService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *CatAliasesService) Headers(headers http.Header) *CatAliasesService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alias specifies one or more aliases to which information should be returned.
|
||||||
|
func (s *CatAliasesService) Alias(alias ...string) *CatAliasesService {
|
||||||
|
s.aliases = alias
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates to return local information, i.e. do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *CatAliasesService) Local(local bool) *CatAliasesService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout is the explicit operation timeout for connection to master node.
|
||||||
|
func (s *CatAliasesService) MasterTimeout(masterTimeout string) *CatAliasesService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Columns to return in the response.
|
||||||
|
// To get a list of all possible columns to return, run the following command
|
||||||
|
// in your terminal:
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// curl 'http://localhost:9200/_cat/aliases?help'
|
||||||
|
//
|
||||||
|
// You can use Columns("*") to return all possible columns. That might take
|
||||||
|
// a little longer than the default set of columns.
|
||||||
|
func (s *CatAliasesService) Columns(columns ...string) *CatAliasesService {
|
||||||
|
s.columns = columns
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort is a list of fields to sort by.
|
||||||
|
func (s *CatAliasesService) Sort(fields ...string) *CatAliasesService {
|
||||||
|
s.sort = fields
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *CatAliasesService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var (
|
||||||
|
path string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(s.aliases) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_cat/aliases/{name}", map[string]string{
|
||||||
|
"name": strings.Join(s.aliases, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_cat/aliases"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{
|
||||||
|
"format": []string{"json"}, // always returns as JSON
|
||||||
|
}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if v := s.local; v != nil {
|
||||||
|
params.Set("local", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if len(s.sort) > 0 {
|
||||||
|
params.Set("s", strings.Join(s.sort, ","))
|
||||||
|
}
|
||||||
|
if len(s.columns) > 0 {
|
||||||
|
params.Set("h", strings.Join(s.columns, ","))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *CatAliasesService) Do(ctx context.Context) (CatAliasesResponse, error) {
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
var ret CatAliasesResponse
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a get request.
|
||||||
|
|
||||||
|
// CatAliasesResponse is the outcome of CatAliasesService.Do.
|
||||||
|
type CatAliasesResponse []CatAliasesResponseRow
|
||||||
|
|
||||||
|
// CatAliasesResponseRow is a single row in a CatAliasesResponse.
|
||||||
|
// Notice that not all of these fields might be filled; that depends
|
||||||
|
// on the number of columns chose in the request (see CatAliasesService.Columns).
|
||||||
|
type CatAliasesResponseRow struct {
|
||||||
|
// Alias name.
|
||||||
|
Alias string `json:"alias"`
|
||||||
|
// Index the alias points to.
|
||||||
|
Index string `json:"index"`
|
||||||
|
// Filter, e.g. "*" or "-".
|
||||||
|
Filter string `json:"filter"`
|
||||||
|
// RoutingIndex specifies the index routing (or "-").
|
||||||
|
RoutingIndex string `json:"routing.index"`
|
||||||
|
// RoutingSearch specifies the search routing (or "-").
|
||||||
|
RoutingSearch string `json:"routing.search"`
|
||||||
|
// IsWriteIndex indicates whether the index can be written to (or "-").
|
||||||
|
IsWriteIndex string `json:"is_write_index"`
|
||||||
|
}
|
239
vendor/github.com/olivere/elastic/v7/cat_allocation.go
generated
vendored
Normal file
239
vendor/github.com/olivere/elastic/v7/cat_allocation.go
generated
vendored
Normal file
|
@ -0,0 +1,239 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CatAllocationService provides a snapshot of how many shards are allocated
|
||||||
|
// to each data node and how much disk space they are using.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-allocation.html
|
||||||
|
// for details.
|
||||||
|
type CatAllocationService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
bytes string // b, k, m, or g
|
||||||
|
local *bool
|
||||||
|
masterTimeout string
|
||||||
|
nodes []string
|
||||||
|
columns []string
|
||||||
|
sort []string // list of columns for sort order
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCatAllocationService creates a new CatAllocationService.
|
||||||
|
func NewCatAllocationService(client *Client) *CatAllocationService {
|
||||||
|
return &CatAllocationService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *CatAllocationService) Pretty(pretty bool) *CatAllocationService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *CatAllocationService) Human(human bool) *CatAllocationService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *CatAllocationService) ErrorTrace(errorTrace bool) *CatAllocationService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *CatAllocationService) FilterPath(filterPath ...string) *CatAllocationService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *CatAllocationService) Header(name string, value string) *CatAllocationService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *CatAllocationService) Headers(headers http.Header) *CatAllocationService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeID specifies one or more node IDs to for information should be returned.
|
||||||
|
func (s *CatAllocationService) NodeID(nodes ...string) *CatAllocationService {
|
||||||
|
s.nodes = nodes
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes represents the unit in which to display byte values.
|
||||||
|
// Valid values are: "b", "k", "m", or "g".
|
||||||
|
func (s *CatAllocationService) Bytes(bytes string) *CatAllocationService {
|
||||||
|
s.bytes = bytes
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates to return local information, i.e. do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *CatAllocationService) Local(local bool) *CatAllocationService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout is the explicit operation timeout for connection to master node.
|
||||||
|
func (s *CatAllocationService) MasterTimeout(masterTimeout string) *CatAllocationService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Columns to return in the response.
|
||||||
|
// To get a list of all possible columns to return, run the following command
|
||||||
|
// in your terminal:
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// curl 'http://localhost:9200/_cat/aliases?help'
|
||||||
|
//
|
||||||
|
// You can use Columns("*") to return all possible columns. That might take
|
||||||
|
// a little longer than the default set of columns.
|
||||||
|
func (s *CatAllocationService) Columns(columns ...string) *CatAllocationService {
|
||||||
|
s.columns = columns
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort is a list of fields to sort by.
|
||||||
|
func (s *CatAllocationService) Sort(fields ...string) *CatAllocationService {
|
||||||
|
s.sort = fields
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *CatAllocationService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var (
|
||||||
|
path string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(s.nodes) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_cat/allocation/{node_id}", map[string]string{
|
||||||
|
"node_id": strings.Join(s.nodes, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_cat/allocation"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{
|
||||||
|
"format": []string{"json"}, // always returns as JSON
|
||||||
|
}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.bytes != "" {
|
||||||
|
params.Set("bytes", s.bytes)
|
||||||
|
}
|
||||||
|
if v := s.local; v != nil {
|
||||||
|
params.Set("local", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if len(s.sort) > 0 {
|
||||||
|
params.Set("s", strings.Join(s.sort, ","))
|
||||||
|
}
|
||||||
|
if len(s.columns) > 0 {
|
||||||
|
params.Set("h", strings.Join(s.columns, ","))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *CatAllocationService) Do(ctx context.Context) (CatAllocationResponse, error) {
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
var ret CatAllocationResponse
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a get request.
|
||||||
|
|
||||||
|
// CatAllocationResponse is the outcome of CatAllocationService.Do.
|
||||||
|
type CatAllocationResponse []CatAllocationResponseRow
|
||||||
|
|
||||||
|
// CatAllocationResponseRow is a single row in a CatAllocationResponse.
|
||||||
|
// Notice that not all of these fields might be filled; that depends
|
||||||
|
// on the number of columns chose in the request (see CatAllocationService.Columns).
|
||||||
|
type CatAllocationResponseRow struct {
|
||||||
|
// Shards represents the number of shards on a node.
|
||||||
|
Shards int `json:"shards,string"`
|
||||||
|
// DiskIndices represents the disk used by ES indices, e.g. "46.1kb".
|
||||||
|
DiskIndices string `json:"disk.indices"`
|
||||||
|
// DiskUsed represents the disk used (total, not just ES), e.g. "34.5gb"
|
||||||
|
DiskUsed string `json:"disk.used"`
|
||||||
|
// DiskAvail represents the disk available, e.g. "53.2gb".
|
||||||
|
DiskAvail string `json:"disk.avail"`
|
||||||
|
// DiskTotal represents the total capacity of all volumes, e.g. "87.7gb".
|
||||||
|
DiskTotal string `json:"disk.total"`
|
||||||
|
// DiskPercent represents the percent of disk used, e.g. 39.
|
||||||
|
DiskPercent int `json:"disk.percent,string"`
|
||||||
|
// Host represents the hostname of the node.
|
||||||
|
Host string `json:"host"`
|
||||||
|
// IP represents the IP address of the node.
|
||||||
|
IP string `json:"ip"`
|
||||||
|
// Node represents the node ID.
|
||||||
|
Node string `json:"node"`
|
||||||
|
}
|
215
vendor/github.com/olivere/elastic/v7/cat_count.go
generated
vendored
Normal file
215
vendor/github.com/olivere/elastic/v7/cat_count.go
generated
vendored
Normal file
|
@ -0,0 +1,215 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CatCountService provides quick access to the document count of the entire cluster,
|
||||||
|
// or individual indices.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-count.html
|
||||||
|
// for details.
|
||||||
|
type CatCountService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
local *bool
|
||||||
|
masterTimeout string
|
||||||
|
columns []string
|
||||||
|
sort []string // list of columns for sort order
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCatCountService creates a new CatCountService.
|
||||||
|
func NewCatCountService(client *Client) *CatCountService {
|
||||||
|
return &CatCountService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *CatCountService) Pretty(pretty bool) *CatCountService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *CatCountService) Human(human bool) *CatCountService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *CatCountService) ErrorTrace(errorTrace bool) *CatCountService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *CatCountService) FilterPath(filterPath ...string) *CatCountService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *CatCountService) Header(name string, value string) *CatCountService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *CatCountService) Headers(headers http.Header) *CatCountService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index specifies zero or more indices for which to return counts
|
||||||
|
// (by default counts for all indices are returned).
|
||||||
|
func (s *CatCountService) Index(index ...string) *CatCountService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates to return local information, i.e. do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *CatCountService) Local(local bool) *CatCountService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout is the explicit operation timeout for connection to master node.
|
||||||
|
func (s *CatCountService) MasterTimeout(masterTimeout string) *CatCountService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Columns to return in the response.
|
||||||
|
// To get a list of all possible columns to return, run the following command
|
||||||
|
// in your terminal:
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// curl 'http://localhost:9200/_cat/count?help'
|
||||||
|
//
|
||||||
|
// You can use Columns("*") to return all possible columns. That might take
|
||||||
|
// a little longer than the default set of columns.
|
||||||
|
func (s *CatCountService) Columns(columns ...string) *CatCountService {
|
||||||
|
s.columns = columns
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort is a list of fields to sort by.
|
||||||
|
func (s *CatCountService) Sort(fields ...string) *CatCountService {
|
||||||
|
s.sort = fields
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *CatCountService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var (
|
||||||
|
path string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_cat/count/{index}", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_cat/count"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{
|
||||||
|
"format": []string{"json"}, // always returns as JSON
|
||||||
|
}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if v := s.local; v != nil {
|
||||||
|
params.Set("local", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if len(s.sort) > 0 {
|
||||||
|
params.Set("s", strings.Join(s.sort, ","))
|
||||||
|
}
|
||||||
|
if len(s.columns) > 0 {
|
||||||
|
params.Set("h", strings.Join(s.columns, ","))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *CatCountService) Do(ctx context.Context) (CatCountResponse, error) {
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
var ret CatCountResponse
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a get request.
|
||||||
|
|
||||||
|
// CatCountResponse is the outcome of CatCountService.Do.
|
||||||
|
type CatCountResponse []CatCountResponseRow
|
||||||
|
|
||||||
|
// CatCountResponseRow specifies the data returned for one index
|
||||||
|
// of a CatCountResponse. Notice that not all of these fields might
|
||||||
|
// be filled; that depends on the number of columns chose in the
|
||||||
|
// request (see CatCountService.Columns).
|
||||||
|
type CatCountResponseRow struct {
|
||||||
|
Epoch int64 `json:"epoch,string"` // e.g. 1527077996
|
||||||
|
Timestamp string `json:"timestamp"` // e.g. "12:19:56"
|
||||||
|
Count int `json:"count,string"` // number of documents
|
||||||
|
}
|
211
vendor/github.com/olivere/elastic/v7/cat_health.go
generated
vendored
Normal file
211
vendor/github.com/olivere/elastic/v7/cat_health.go
generated
vendored
Normal file
|
@ -0,0 +1,211 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CatHealthService returns a terse representation of the same information
|
||||||
|
// as /_cluster/health.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-health.html
|
||||||
|
// for details.
|
||||||
|
type CatHealthService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
local *bool
|
||||||
|
masterTimeout string
|
||||||
|
columns []string
|
||||||
|
sort []string // list of columns for sort order
|
||||||
|
disableTimestamping *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCatHealthService creates a new CatHealthService.
|
||||||
|
func NewCatHealthService(client *Client) *CatHealthService {
|
||||||
|
return &CatHealthService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *CatHealthService) Pretty(pretty bool) *CatHealthService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *CatHealthService) Human(human bool) *CatHealthService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *CatHealthService) ErrorTrace(errorTrace bool) *CatHealthService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *CatHealthService) FilterPath(filterPath ...string) *CatHealthService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *CatHealthService) Header(name string, value string) *CatHealthService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *CatHealthService) Headers(headers http.Header) *CatHealthService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates to return local information, i.e. do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *CatHealthService) Local(local bool) *CatHealthService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout is the explicit operation timeout for connection to master node.
|
||||||
|
func (s *CatHealthService) MasterTimeout(masterTimeout string) *CatHealthService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Columns to return in the response.
|
||||||
|
// To get a list of all possible columns to return, run the following command
|
||||||
|
// in your terminal:
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// curl 'http://localhost:9200/_cat/indices?help'
|
||||||
|
//
|
||||||
|
// You can use Columns("*") to return all possible columns. That might take
|
||||||
|
// a little longer than the default set of columns.
|
||||||
|
func (s *CatHealthService) Columns(columns ...string) *CatHealthService {
|
||||||
|
s.columns = columns
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort is a list of fields to sort by.
|
||||||
|
func (s *CatHealthService) Sort(fields ...string) *CatHealthService {
|
||||||
|
s.sort = fields
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableTimestamping disables timestamping (default: true).
|
||||||
|
func (s *CatHealthService) DisableTimestamping(disable bool) *CatHealthService {
|
||||||
|
s.disableTimestamping = &disable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *CatHealthService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path := "/_cat/health"
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{
|
||||||
|
"format": []string{"json"}, // always returns as JSON
|
||||||
|
}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if v := s.local; v != nil {
|
||||||
|
params.Set("local", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if len(s.sort) > 0 {
|
||||||
|
params.Set("s", strings.Join(s.sort, ","))
|
||||||
|
}
|
||||||
|
if v := s.disableTimestamping; v != nil {
|
||||||
|
params.Set("ts", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.columns) > 0 {
|
||||||
|
params.Set("h", strings.Join(s.columns, ","))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *CatHealthService) Do(ctx context.Context) (CatHealthResponse, error) {
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
var ret CatHealthResponse
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a get request.
|
||||||
|
|
||||||
|
// CatHealthResponse is the outcome of CatHealthService.Do.
|
||||||
|
type CatHealthResponse []CatHealthResponseRow
|
||||||
|
|
||||||
|
// CatHealthResponseRow is a single row in a CatHealthResponse.
|
||||||
|
// Notice that not all of these fields might be filled; that depends
|
||||||
|
// on the number of columns chose in the request (see CatHealthService.Columns).
|
||||||
|
type CatHealthResponseRow struct {
|
||||||
|
Epoch int64 `json:"epoch,string"` // e.g. 1527077996
|
||||||
|
Timestamp string `json:"timestamp"` // e.g. "12:19:56"
|
||||||
|
Cluster string `json:"cluster"` // cluster name, e.g. "elasticsearch"
|
||||||
|
Status string `json:"status"` // health status, e.g. "green", "yellow", or "red"
|
||||||
|
NodeTotal int `json:"node.total,string"` // total number of nodes
|
||||||
|
NodeData int `json:"node.data,string"` // number of nodes that can store data
|
||||||
|
Shards int `json:"shards,string"` // total number of shards
|
||||||
|
Pri int `json:"pri,string"` // number of primary shards
|
||||||
|
Relo int `json:"relo,string"` // number of relocating nodes
|
||||||
|
Init int `json:"init,string"` // number of initializing nodes
|
||||||
|
Unassign int `json:"unassign,string"` // number of unassigned shards
|
||||||
|
PendingTasks int `json:"pending_tasks,string"` // number of pending tasks
|
||||||
|
MaxTaskWaitTime string `json:"max_task_wait_time"` // wait time of longest task pending, e.g. "-" or time in millis
|
||||||
|
ActiveShardsPercent string `json:"active_shards_percent"` // active number of shards in percent, e.g. "100%"
|
||||||
|
}
|
374
vendor/github.com/olivere/elastic/v7/cat_indices.go
generated
vendored
Normal file
374
vendor/github.com/olivere/elastic/v7/cat_indices.go
generated
vendored
Normal file
|
@ -0,0 +1,374 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CatIndicesService returns the list of indices plus some additional
|
||||||
|
// information about them.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-indices.html
|
||||||
|
// for details.
|
||||||
|
type CatIndicesService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
|
||||||
|
index string
|
||||||
|
bytes string // b, k, m, or g
|
||||||
|
local *bool
|
||||||
|
masterTimeout string
|
||||||
|
columns []string
|
||||||
|
health string // green, yellow, or red
|
||||||
|
primaryOnly *bool // true for primary shards only
|
||||||
|
sort []string // list of columns for sort order
|
||||||
|
headers http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCatIndicesService creates a new CatIndicesService.
|
||||||
|
func NewCatIndicesService(client *Client) *CatIndicesService {
|
||||||
|
return &CatIndicesService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *CatIndicesService) Pretty(pretty bool) *CatIndicesService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *CatIndicesService) Human(human bool) *CatIndicesService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *CatIndicesService) ErrorTrace(errorTrace bool) *CatIndicesService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *CatIndicesService) FilterPath(filterPath ...string) *CatIndicesService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *CatIndicesService) Header(name string, value string) *CatIndicesService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *CatIndicesService) Headers(headers http.Header) *CatIndicesService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index to list (by default all indices are returned).
|
||||||
|
func (s *CatIndicesService) Index(index string) *CatIndicesService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes represents the unit in which to display byte values.
|
||||||
|
// Valid values are: "b", "k", "m", or "g".
|
||||||
|
func (s *CatIndicesService) Bytes(bytes string) *CatIndicesService {
|
||||||
|
s.bytes = bytes
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates to return local information, i.e. do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *CatIndicesService) Local(local bool) *CatIndicesService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout is the explicit operation timeout for connection to master node.
|
||||||
|
func (s *CatIndicesService) MasterTimeout(masterTimeout string) *CatIndicesService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Columns to return in the response.
|
||||||
|
// To get a list of all possible columns to return, run the following command
|
||||||
|
// in your terminal:
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// curl 'http://localhost:9200/_cat/indices?help'
|
||||||
|
//
|
||||||
|
// You can use Columns("*") to return all possible columns. That might take
|
||||||
|
// a little longer than the default set of columns.
|
||||||
|
func (s *CatIndicesService) Columns(columns ...string) *CatIndicesService {
|
||||||
|
s.columns = columns
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Health filters indices by their health status.
|
||||||
|
// Valid values are: "green", "yellow", or "red".
|
||||||
|
func (s *CatIndicesService) Health(healthState string) *CatIndicesService {
|
||||||
|
s.health = healthState
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryOnly when set to true returns stats only for primary shards (default: false).
|
||||||
|
func (s *CatIndicesService) PrimaryOnly(primaryOnly bool) *CatIndicesService {
|
||||||
|
s.primaryOnly = &primaryOnly
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort is a list of fields to sort by.
|
||||||
|
func (s *CatIndicesService) Sort(fields ...string) *CatIndicesService {
|
||||||
|
s.sort = fields
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *CatIndicesService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var (
|
||||||
|
path string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if s.index != "" {
|
||||||
|
path, err = uritemplates.Expand("/_cat/indices/{index}", map[string]string{
|
||||||
|
"index": s.index,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_cat/indices"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{
|
||||||
|
"format": []string{"json"}, // always returns as JSON
|
||||||
|
}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.bytes != "" {
|
||||||
|
params.Set("bytes", s.bytes)
|
||||||
|
}
|
||||||
|
if v := s.local; v != nil {
|
||||||
|
params.Set("local", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if len(s.columns) > 0 {
|
||||||
|
params.Set("h", strings.Join(s.columns, ","))
|
||||||
|
}
|
||||||
|
if s.health != "" {
|
||||||
|
params.Set("health", s.health)
|
||||||
|
}
|
||||||
|
if v := s.primaryOnly; v != nil {
|
||||||
|
params.Set("pri", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.sort) > 0 {
|
||||||
|
params.Set("s", strings.Join(s.sort, ","))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *CatIndicesService) Do(ctx context.Context) (CatIndicesResponse, error) {
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
var ret CatIndicesResponse
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a get request.
|
||||||
|
|
||||||
|
// CatIndicesResponse is the outcome of CatIndicesService.Do.
|
||||||
|
type CatIndicesResponse []CatIndicesResponseRow
|
||||||
|
|
||||||
|
// CatIndicesResponseRow specifies the data returned for one index
|
||||||
|
// of a CatIndicesResponse. Notice that not all of these fields might
|
||||||
|
// be filled; that depends on the number of columns chose in the
|
||||||
|
// request (see CatIndicesService.Columns).
|
||||||
|
type CatIndicesResponseRow struct {
|
||||||
|
Health string `json:"health"` // "green", "yellow", or "red"
|
||||||
|
Status string `json:"status"` // "open" or "closed"
|
||||||
|
Index string `json:"index"` // index name
|
||||||
|
UUID string `json:"uuid"` // index uuid
|
||||||
|
Pri int `json:"pri,string"` // number of primary shards
|
||||||
|
Rep int `json:"rep,string"` // number of replica shards
|
||||||
|
DocsCount int `json:"docs.count,string"` // number of available documents
|
||||||
|
DocsDeleted int `json:"docs.deleted,string"` // number of deleted documents
|
||||||
|
CreationDate int64 `json:"creation.date,string"` // index creation date (millisecond value), e.g. 1527077221644
|
||||||
|
CreationDateString string `json:"creation.date.string"` // index creation date (as string), e.g. "2018-05-23T12:07:01.644Z"
|
||||||
|
StoreSize string `json:"store.size"` // store size of primaries & replicas, e.g. "4.6kb"
|
||||||
|
PriStoreSize string `json:"pri.store.size"` // store size of primaries, e.g. "230b"
|
||||||
|
CompletionSize string `json:"completion.size"` // size of completion on primaries & replicas
|
||||||
|
PriCompletionSize string `json:"pri.completion.size"` // size of completion on primaries
|
||||||
|
FielddataMemorySize string `json:"fielddata.memory_size"` // used fielddata cache on primaries & replicas
|
||||||
|
PriFielddataMemorySize string `json:"pri.fielddata.memory_size"` // used fielddata cache on primaries
|
||||||
|
FielddataEvictions int `json:"fielddata.evictions,string"` // fielddata evictions on primaries & replicas
|
||||||
|
PriFielddataEvictions int `json:"pri.fielddata.evictions,string"` // fielddata evictions on primaries
|
||||||
|
QueryCacheMemorySize string `json:"query_cache.memory_size"` // used query cache on primaries & replicas
|
||||||
|
PriQueryCacheMemorySize string `json:"pri.query_cache.memory_size"` // used query cache on primaries
|
||||||
|
QueryCacheEvictions int `json:"query_cache.evictions,string"` // query cache evictions on primaries & replicas
|
||||||
|
PriQueryCacheEvictions int `json:"pri.query_cache.evictions,string"` // query cache evictions on primaries
|
||||||
|
RequestCacheMemorySize string `json:"request_cache.memory_size"` // used request cache on primaries & replicas
|
||||||
|
PriRequestCacheMemorySize string `json:"pri.request_cache.memory_size"` // used request cache on primaries
|
||||||
|
RequestCacheEvictions int `json:"request_cache.evictions,string"` // request cache evictions on primaries & replicas
|
||||||
|
PriRequestCacheEvictions int `json:"pri.request_cache.evictions,string"` // request cache evictions on primaries
|
||||||
|
RequestCacheHitCount int `json:"request_cache.hit_count,string"` // request cache hit count on primaries & replicas
|
||||||
|
PriRequestCacheHitCount int `json:"pri.request_cache.hit_count,string"` // request cache hit count on primaries
|
||||||
|
RequestCacheMissCount int `json:"request_cache.miss_count,string"` // request cache miss count on primaries & replicas
|
||||||
|
PriRequestCacheMissCount int `json:"pri.request_cache.miss_count,string"` // request cache miss count on primaries
|
||||||
|
FlushTotal int `json:"flush.total,string"` // number of flushes on primaries & replicas
|
||||||
|
PriFlushTotal int `json:"pri.flush.total,string"` // number of flushes on primaries
|
||||||
|
FlushTotalTime string `json:"flush.total_time"` // time spent in flush on primaries & replicas
|
||||||
|
PriFlushTotalTime string `json:"pri.flush.total_time"` // time spent in flush on primaries
|
||||||
|
GetCurrent int `json:"get.current,string"` // number of current get ops on primaries & replicas
|
||||||
|
PriGetCurrent int `json:"pri.get.current,string"` // number of current get ops on primaries
|
||||||
|
GetTime string `json:"get.time"` // time spent in get on primaries & replicas
|
||||||
|
PriGetTime string `json:"pri.get.time"` // time spent in get on primaries
|
||||||
|
GetTotal int `json:"get.total,string"` // number of get ops on primaries & replicas
|
||||||
|
PriGetTotal int `json:"pri.get.total,string"` // number of get ops on primaries
|
||||||
|
GetExistsTime string `json:"get.exists_time"` // time spent in successful gets on primaries & replicas
|
||||||
|
PriGetExistsTime string `json:"pri.get.exists_time"` // time spent in successful gets on primaries
|
||||||
|
GetExistsTotal int `json:"get.exists_total,string"` // number of successful gets on primaries & replicas
|
||||||
|
PriGetExistsTotal int `json:"pri.get.exists_total,string"` // number of successful gets on primaries
|
||||||
|
GetMissingTime string `json:"get.missing_time"` // time spent in failed gets on primaries & replicas
|
||||||
|
PriGetMissingTime string `json:"pri.get.missing_time"` // time spent in failed gets on primaries
|
||||||
|
GetMissingTotal int `json:"get.missing_total,string"` // number of failed gets on primaries & replicas
|
||||||
|
PriGetMissingTotal int `json:"pri.get.missing_total,string"` // number of failed gets on primaries
|
||||||
|
IndexingDeleteCurrent int `json:"indexing.delete_current,string"` // number of current deletions on primaries & replicas
|
||||||
|
PriIndexingDeleteCurrent int `json:"pri.indexing.delete_current,string"` // number of current deletions on primaries
|
||||||
|
IndexingDeleteTime string `json:"indexing.delete_time"` // time spent in deletions on primaries & replicas
|
||||||
|
PriIndexingDeleteTime string `json:"pri.indexing.delete_time"` // time spent in deletions on primaries
|
||||||
|
IndexingDeleteTotal int `json:"indexing.delete_total,string"` // number of delete ops on primaries & replicas
|
||||||
|
PriIndexingDeleteTotal int `json:"pri.indexing.delete_total,string"` // number of delete ops on primaries
|
||||||
|
IndexingIndexCurrent int `json:"indexing.index_current,string"` // number of current indexing on primaries & replicas
|
||||||
|
PriIndexingIndexCurrent int `json:"pri.indexing.index_current,string"` // number of current indexing on primaries
|
||||||
|
IndexingIndexTime string `json:"indexing.index_time"` // time spent in indexing on primaries & replicas
|
||||||
|
PriIndexingIndexTime string `json:"pri.indexing.index_time"` // time spent in indexing on primaries
|
||||||
|
IndexingIndexTotal int `json:"indexing.index_total,string"` // number of index ops on primaries & replicas
|
||||||
|
PriIndexingIndexTotal int `json:"pri.indexing.index_total,string"` // number of index ops on primaries
|
||||||
|
IndexingIndexFailed int `json:"indexing.index_failed,string"` // number of failed indexing ops on primaries & replicas
|
||||||
|
PriIndexingIndexFailed int `json:"pri.indexing.index_failed,string"` // number of failed indexing ops on primaries
|
||||||
|
MergesCurrent int `json:"merges.current,string"` // number of current merges on primaries & replicas
|
||||||
|
PriMergesCurrent int `json:"pri.merges.current,string"` // number of current merges on primaries
|
||||||
|
MergesCurrentDocs int `json:"merges.current_docs,string"` // number of current merging docs on primaries & replicas
|
||||||
|
PriMergesCurrentDocs int `json:"pri.merges.current_docs,string"` // number of current merging docs on primaries
|
||||||
|
MergesCurrentSize string `json:"merges.current_size"` // size of current merges on primaries & replicas
|
||||||
|
PriMergesCurrentSize string `json:"pri.merges.current_size"` // size of current merges on primaries
|
||||||
|
MergesTotal int `json:"merges.total,string"` // number of completed merge ops on primaries & replicas
|
||||||
|
PriMergesTotal int `json:"pri.merges.total,string"` // number of completed merge ops on primaries
|
||||||
|
MergesTotalDocs int `json:"merges.total_docs,string"` // docs merged on primaries & replicas
|
||||||
|
PriMergesTotalDocs int `json:"pri.merges.total_docs,string"` // docs merged on primaries
|
||||||
|
MergesTotalSize string `json:"merges.total_size"` // size merged on primaries & replicas
|
||||||
|
PriMergesTotalSize string `json:"pri.merges.total_size"` // size merged on primaries
|
||||||
|
MergesTotalTime string `json:"merges.total_time"` // time spent in merges on primaries & replicas
|
||||||
|
PriMergesTotalTime string `json:"pri.merges.total_time"` // time spent in merges on primaries
|
||||||
|
RefreshTotal int `json:"refresh.total,string"` // total refreshes on primaries & replicas
|
||||||
|
PriRefreshTotal int `json:"pri.refresh.total,string"` // total refreshes on primaries
|
||||||
|
RefreshExternalTotal int `json:"refresh.external_total,string"` // total external refreshes on primaries & replicas
|
||||||
|
PriRefreshExternalTotal int `json:"pri.refresh.external_total,string"` // total external refreshes on primaries
|
||||||
|
RefreshTime string `json:"refresh.time"` // time spent in refreshes on primaries & replicas
|
||||||
|
PriRefreshTime string `json:"pri.refresh.time"` // time spent in refreshes on primaries
|
||||||
|
RefreshExternalTime string `json:"refresh.external_time"` // external time spent in refreshes on primaries & replicas
|
||||||
|
PriRefreshExternalTime string `json:"pri.refresh.external_time"` // external time spent in refreshes on primaries
|
||||||
|
RefreshListeners int `json:"refresh.listeners,string"` // number of pending refresh listeners on primaries & replicas
|
||||||
|
PriRefreshListeners int `json:"pri.refresh.listeners,string"` // number of pending refresh listeners on primaries
|
||||||
|
SearchFetchCurrent int `json:"search.fetch_current,string"` // current fetch phase ops on primaries & replicas
|
||||||
|
PriSearchFetchCurrent int `json:"pri.search.fetch_current,string"` // current fetch phase ops on primaries
|
||||||
|
SearchFetchTime string `json:"search.fetch_time"` // time spent in fetch phase on primaries & replicas
|
||||||
|
PriSearchFetchTime string `json:"pri.search.fetch_time"` // time spent in fetch phase on primaries
|
||||||
|
SearchFetchTotal int `json:"search.fetch_total,string"` // total fetch ops on primaries & replicas
|
||||||
|
PriSearchFetchTotal int `json:"pri.search.fetch_total,string"` // total fetch ops on primaries
|
||||||
|
SearchOpenContexts int `json:"search.open_contexts,string"` // open search contexts on primaries & replicas
|
||||||
|
PriSearchOpenContexts int `json:"pri.search.open_contexts,string"` // open search contexts on primaries
|
||||||
|
SearchQueryCurrent int `json:"search.query_current,string"` // current query phase ops on primaries & replicas
|
||||||
|
PriSearchQueryCurrent int `json:"pri.search.query_current,string"` // current query phase ops on primaries
|
||||||
|
SearchQueryTime string `json:"search.query_time"` // time spent in query phase on primaries & replicas, e.g. "0s"
|
||||||
|
PriSearchQueryTime string `json:"pri.search.query_time"` // time spent in query phase on primaries, e.g. "0s"
|
||||||
|
SearchQueryTotal int `json:"search.query_total,string"` // total query phase ops on primaries & replicas
|
||||||
|
PriSearchQueryTotal int `json:"pri.search.query_total,string"` // total query phase ops on primaries
|
||||||
|
SearchScrollCurrent int `json:"search.scroll_current,string"` // open scroll contexts on primaries & replicas
|
||||||
|
PriSearchScrollCurrent int `json:"pri.search.scroll_current,string"` // open scroll contexts on primaries
|
||||||
|
SearchScrollTime string `json:"search.scroll_time"` // time scroll contexts held open on primaries & replicas, e.g. "0s"
|
||||||
|
PriSearchScrollTime string `json:"pri.search.scroll_time"` // time scroll contexts held open on primaries, e.g. "0s"
|
||||||
|
SearchScrollTotal int `json:"search.scroll_total,string"` // completed scroll contexts on primaries & replicas
|
||||||
|
PriSearchScrollTotal int `json:"pri.search.scroll_total,string"` // completed scroll contexts on primaries
|
||||||
|
SearchThrottled bool `json:"search.throttled,string"` // indicates if the index is search throttled
|
||||||
|
SegmentsCount int `json:"segments.count,string"` // number of segments on primaries & replicas
|
||||||
|
PriSegmentsCount int `json:"pri.segments.count,string"` // number of segments on primaries
|
||||||
|
SegmentsMemory string `json:"segments.memory"` // memory used by segments on primaries & replicas, e.g. "1.3kb"
|
||||||
|
PriSegmentsMemory string `json:"pri.segments.memory"` // memory used by segments on primaries, e.g. "1.3kb"
|
||||||
|
SegmentsIndexWriterMemory string `json:"segments.index_writer_memory"` // memory used by index writer on primaries & replicas, e.g. "0b"
|
||||||
|
PriSegmentsIndexWriterMemory string `json:"pri.segments.index_writer_memory"` // memory used by index writer on primaries, e.g. "0b"
|
||||||
|
SegmentsVersionMapMemory string `json:"segments.version_map_memory"` // memory used by version map on primaries & replicas, e.g. "0b"
|
||||||
|
PriSegmentsVersionMapMemory string `json:"pri.segments.version_map_memory"` // memory used by version map on primaries, e.g. "0b"
|
||||||
|
SegmentsFixedBitsetMemory string `json:"segments.fixed_bitset_memory"` // memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields on primaries & replicas, e.g. "0b"
|
||||||
|
PriSegmentsFixedBitsetMemory string `json:"pri.segments.fixed_bitset_memory"` // memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields on primaries, e.g. "0b"
|
||||||
|
WarmerCurrent int `json:"warmer.current,string"` // current warmer ops on primaries & replicas
|
||||||
|
PriWarmerCurrent int `json:"pri.warmer.current,string"` // current warmer ops on primaries
|
||||||
|
WarmerTotal int `json:"warmer.total,string"` // total warmer ops on primaries & replicas
|
||||||
|
PriWarmerTotal int `json:"pri.warmer.total,string"` // total warmer ops on primaries
|
||||||
|
WarmerTotalTime string `json:"warmer.total_time"` // time spent in warmers on primaries & replicas, e.g. "47s"
|
||||||
|
PriWarmerTotalTime string `json:"pri.warmer.total_time"` // time spent in warmers on primaries, e.g. "47s"
|
||||||
|
SuggestCurrent int `json:"suggest.current,string"` // number of current suggest ops on primaries & replicas
|
||||||
|
PriSuggestCurrent int `json:"pri.suggest.current,string"` // number of current suggest ops on primaries
|
||||||
|
SuggestTime string `json:"suggest.time"` // time spend in suggest on primaries & replicas, "31s"
|
||||||
|
PriSuggestTime string `json:"pri.suggest.time"` // time spend in suggest on primaries, e.g. "31s"
|
||||||
|
SuggestTotal int `json:"suggest.total,string"` // number of suggest ops on primaries & replicas
|
||||||
|
PriSuggestTotal int `json:"pri.suggest.total,string"` // number of suggest ops on primaries
|
||||||
|
MemoryTotal string `json:"memory.total"` // total user memory on primaries & replicas, e.g. "1.5kb"
|
||||||
|
PriMemoryTotal string `json:"pri.memory.total"` // total user memory on primaries, e.g. "1.5kb"
|
||||||
|
}
|
162
vendor/github.com/olivere/elastic/v7/clear_scroll.go
generated
vendored
Normal file
162
vendor/github.com/olivere/elastic/v7/clear_scroll.go
generated
vendored
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClearScrollService clears one or more scroll contexts by their ids.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-scroll.html#_clear_scroll_api
|
||||||
|
// for details.
|
||||||
|
type ClearScrollService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
scrollId []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClearScrollService creates a new ClearScrollService.
|
||||||
|
func NewClearScrollService(client *Client) *ClearScrollService {
|
||||||
|
return &ClearScrollService{
|
||||||
|
client: client,
|
||||||
|
scrollId: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *ClearScrollService) Human(human bool) *ClearScrollService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *ClearScrollService) ErrorTrace(errorTrace bool) *ClearScrollService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *ClearScrollService) FilterPath(filterPath ...string) *ClearScrollService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *ClearScrollService) Header(name string, value string) *ClearScrollService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *ClearScrollService) Headers(headers http.Header) *ClearScrollService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrollId is a list of scroll IDs to clear.
|
||||||
|
// Use _all to clear all search contexts.
|
||||||
|
func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService {
|
||||||
|
s.scrollId = append(s.scrollId, scrollIds...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *ClearScrollService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path := "/_search/scroll/"
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *ClearScrollService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(s.scrollId) == 0 {
|
||||||
|
invalid = append(invalid, "ScrollId")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
body := map[string][]string{
|
||||||
|
"scroll_id": s.scrollId,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "DELETE",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(ClearScrollResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearScrollResponse is the response of ClearScrollService.Do.
|
||||||
|
type ClearScrollResponse struct {
|
||||||
|
Succeeded bool `json:"succeeded,omitempty"`
|
||||||
|
NumFreed int `json:"num_freed,omitempty"`
|
||||||
|
}
|
2099
vendor/github.com/olivere/elastic/v7/client.go
generated
vendored
Normal file
2099
vendor/github.com/olivere/elastic/v7/client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
296
vendor/github.com/olivere/elastic/v7/cluster_health.go
generated
vendored
Normal file
296
vendor/github.com/olivere/elastic/v7/cluster_health.go
generated
vendored
Normal file
|
@ -0,0 +1,296 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClusterHealthService allows to get a very simple status on the health of the cluster.
|
||||||
|
//
|
||||||
|
// See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-health.html
|
||||||
|
// for details.
|
||||||
|
type ClusterHealthService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
indices []string
|
||||||
|
level string
|
||||||
|
local *bool
|
||||||
|
masterTimeout string
|
||||||
|
timeout string
|
||||||
|
waitForActiveShards *int
|
||||||
|
waitForNodes string
|
||||||
|
waitForNoRelocatingShards *bool
|
||||||
|
waitForStatus string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClusterHealthService creates a new ClusterHealthService.
|
||||||
|
func NewClusterHealthService(client *Client) *ClusterHealthService {
|
||||||
|
return &ClusterHealthService{
|
||||||
|
client: client,
|
||||||
|
indices: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *ClusterHealthService) Human(human bool) *ClusterHealthService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *ClusterHealthService) ErrorTrace(errorTrace bool) *ClusterHealthService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *ClusterHealthService) FilterPath(filterPath ...string) *ClusterHealthService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *ClusterHealthService) Header(name string, value string) *ClusterHealthService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *ClusterHealthService) Headers(headers http.Header) *ClusterHealthService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index limits the information returned to specific indices.
|
||||||
|
func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService {
|
||||||
|
s.indices = append(s.indices, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Level specifies the level of detail for returned information.
|
||||||
|
func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
|
||||||
|
s.level = level
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates whether to return local information. If it is true,
|
||||||
|
// we do not retrieve the state from master node (default: false).
|
||||||
|
func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies an explicit operation timeout for connection to master node.
|
||||||
|
func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout specifies an explicit operation timeout.
|
||||||
|
func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards can be used to wait until the specified number of shards are active.
|
||||||
|
func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
|
||||||
|
s.waitForActiveShards = &waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForNodes can be used to wait until the specified number of nodes are available.
|
||||||
|
// Example: "12" to wait for exact values, ">12" and "<12" for ranges.
|
||||||
|
func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
|
||||||
|
s.waitForNodes = waitForNodes
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForNoRelocatingShards can be used to wait until all shard relocations are finished.
|
||||||
|
func (s *ClusterHealthService) WaitForNoRelocatingShards(waitForNoRelocatingShards bool) *ClusterHealthService {
|
||||||
|
s.waitForNoRelocatingShards = &waitForNoRelocatingShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForStatus can be used to wait until the cluster is in a specific state.
|
||||||
|
// Valid values are: green, yellow, or red.
|
||||||
|
func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
|
||||||
|
s.waitForStatus = waitForStatus
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForGreenStatus will wait for the "green" state.
|
||||||
|
func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService {
|
||||||
|
return s.WaitForStatus("green")
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForYellowStatus will wait for the "yellow" state.
|
||||||
|
func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService {
|
||||||
|
return s.WaitForStatus("yellow")
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
if len(s.indices) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{
|
||||||
|
"index": strings.Join(s.indices, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_cluster/health"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.level != "" {
|
||||||
|
params.Set("level", s.level)
|
||||||
|
}
|
||||||
|
if s.local != nil {
|
||||||
|
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != nil {
|
||||||
|
params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards))
|
||||||
|
}
|
||||||
|
if s.waitForNodes != "" {
|
||||||
|
params.Set("wait_for_nodes", s.waitForNodes)
|
||||||
|
}
|
||||||
|
if s.waitForNoRelocatingShards != nil {
|
||||||
|
params.Set("wait_for_no_relocating_shards", fmt.Sprintf("%v", *s.waitForNoRelocatingShards))
|
||||||
|
}
|
||||||
|
if s.waitForStatus != "" {
|
||||||
|
params.Set("wait_for_status", s.waitForStatus)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *ClusterHealthService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *ClusterHealthService) Do(ctx context.Context) (*ClusterHealthResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(ClusterHealthResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterHealthResponse is the response of ClusterHealthService.Do.
|
||||||
|
type ClusterHealthResponse struct {
|
||||||
|
ClusterName string `json:"cluster_name"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
TimedOut bool `json:"timed_out"`
|
||||||
|
NumberOfNodes int `json:"number_of_nodes"`
|
||||||
|
NumberOfDataNodes int `json:"number_of_data_nodes"`
|
||||||
|
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||||
|
ActiveShards int `json:"active_shards"`
|
||||||
|
RelocatingShards int `json:"relocating_shards"`
|
||||||
|
InitializingShards int `json:"initializing_shards"`
|
||||||
|
UnassignedShards int `json:"unassigned_shards"`
|
||||||
|
DelayedUnassignedShards int `json:"delayed_unassigned_shards"`
|
||||||
|
NumberOfPendingTasks int `json:"number_of_pending_tasks"`
|
||||||
|
NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"`
|
||||||
|
TaskMaxWaitTimeInQueue string `json:"task_max_waiting_in_queue"` // "0s"
|
||||||
|
TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"` // 0
|
||||||
|
ActiveShardsPercent string `json:"active_shards_percent"` // "100.0%"
|
||||||
|
ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` // 100.0
|
||||||
|
|
||||||
|
// Index name -> index health
|
||||||
|
Indices map[string]*ClusterIndexHealth `json:"indices"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterIndexHealth will be returned as part of ClusterHealthResponse.
|
||||||
|
type ClusterIndexHealth struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
NumberOfShards int `json:"number_of_shards"`
|
||||||
|
NumberOfReplicas int `json:"number_of_replicas"`
|
||||||
|
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||||
|
ActiveShards int `json:"active_shards"`
|
||||||
|
RelocatingShards int `json:"relocating_shards"`
|
||||||
|
InitializingShards int `json:"initializing_shards"`
|
||||||
|
UnassignedShards int `json:"unassigned_shards"`
|
||||||
|
// Shards by id, e.g. "0" or "1"
|
||||||
|
Shards map[string]*ClusterShardHealth `json:"shards"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterShardHealth will be returned as part of ClusterHealthResponse.
|
||||||
|
type ClusterShardHealth struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
PrimaryActive bool `json:"primary_active"`
|
||||||
|
ActiveShards int `json:"active_shards"`
|
||||||
|
RelocatingShards int `json:"relocating_shards"`
|
||||||
|
InitializingShards int `json:"initializing_shards"`
|
||||||
|
UnassignedShards int `json:"unassigned_shards"`
|
||||||
|
}
|
438
vendor/github.com/olivere/elastic/v7/cluster_reroute.go
generated
vendored
Normal file
438
vendor/github.com/olivere/elastic/v7/cluster_reroute.go
generated
vendored
Normal file
|
@ -0,0 +1,438 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClusterRerouteService allows for manual changes to the allocation of
|
||||||
|
// individual shards in the cluster. For example, a shard can be moved from
|
||||||
|
// one node to another explicitly, an allocation can be cancelled, and
|
||||||
|
// an unassigned shard can be explicitly allocated to a specific node.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-reroute.html
|
||||||
|
// for details.
|
||||||
|
type ClusterRerouteService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
metrics []string
|
||||||
|
dryRun *bool
|
||||||
|
explain *bool
|
||||||
|
retryFailed *bool
|
||||||
|
masterTimeout string
|
||||||
|
timeout string
|
||||||
|
commands []AllocationCommand
|
||||||
|
body interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClusterRerouteService creates a new ClusterRerouteService.
|
||||||
|
func NewClusterRerouteService(client *Client) *ClusterRerouteService {
|
||||||
|
return &ClusterRerouteService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *ClusterRerouteService) Pretty(pretty bool) *ClusterRerouteService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *ClusterRerouteService) Human(human bool) *ClusterRerouteService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *ClusterRerouteService) ErrorTrace(errorTrace bool) *ClusterRerouteService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *ClusterRerouteService) FilterPath(filterPath ...string) *ClusterRerouteService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *ClusterRerouteService) Header(name string, value string) *ClusterRerouteService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *ClusterRerouteService) Headers(headers http.Header) *ClusterRerouteService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metric limits the information returned to the specified metric.
|
||||||
|
// It can be one of: "_all", "blocks", "metadata", "nodes", "routing_table", "master_node", "version".
|
||||||
|
// Defaults to all but metadata.
|
||||||
|
func (s *ClusterRerouteService) Metric(metrics ...string) *ClusterRerouteService {
|
||||||
|
s.metrics = append(s.metrics, metrics...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DryRun indicates whether to simulate the operation only and return the
|
||||||
|
// resulting state.
|
||||||
|
func (s *ClusterRerouteService) DryRun(dryRun bool) *ClusterRerouteService {
|
||||||
|
s.dryRun = &dryRun
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Explain, when set to true, returns an explanation of why the commands
|
||||||
|
// can or cannot be executed.
|
||||||
|
func (s *ClusterRerouteService) Explain(explain bool) *ClusterRerouteService {
|
||||||
|
s.explain = &explain
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryFailed indicates whether to retry allocation of shards that are blocked
|
||||||
|
// due to too many subsequent allocation failures.
|
||||||
|
func (s *ClusterRerouteService) RetryFailed(retryFailed bool) *ClusterRerouteService {
|
||||||
|
s.retryFailed = &retryFailed
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies an explicit timeout for connection to master.
|
||||||
|
func (s *ClusterRerouteService) MasterTimeout(masterTimeout string) *ClusterRerouteService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout specifies an explicit operationtimeout.
|
||||||
|
func (s *ClusterRerouteService) Timeout(timeout string) *ClusterRerouteService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds one or more commands to be executed.
|
||||||
|
func (s *ClusterRerouteService) Add(commands ...AllocationCommand) *ClusterRerouteService {
|
||||||
|
s.commands = append(s.commands, commands...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Body specifies the body to be sent.
|
||||||
|
// If you specify Body, the commands passed via Add are ignored.
|
||||||
|
// In other words: Body takes precedence over Add.
|
||||||
|
func (s *ClusterRerouteService) Body(body interface{}) *ClusterRerouteService {
|
||||||
|
s.body = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *ClusterRerouteService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path := "/_cluster/reroute"
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if v := s.dryRun; v != nil {
|
||||||
|
params.Set("dry_run", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.explain; v != nil {
|
||||||
|
params.Set("explain", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.retryFailed; v != nil {
|
||||||
|
params.Set("retry_failed", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.metrics) > 0 {
|
||||||
|
params.Set("metric", strings.Join(s.metrics, ","))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *ClusterRerouteService) Validate() error {
|
||||||
|
if s.body == nil && len(s.commands) == 0 {
|
||||||
|
return errors.New("missing allocate commands or raw body")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *ClusterRerouteService) Do(ctx context.Context) (*ClusterRerouteResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.body != nil {
|
||||||
|
body = s.body
|
||||||
|
} else {
|
||||||
|
commands := make([]interface{}, len(s.commands))
|
||||||
|
for i, cmd := range s.commands {
|
||||||
|
src, err := cmd.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
commands[i] = map[string]interface{}{
|
||||||
|
cmd.Name(): src,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
query := make(map[string]interface{})
|
||||||
|
query["commands"] = commands
|
||||||
|
body = query
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(ClusterRerouteResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterRerouteResponse is the response of ClusterRerouteService.Do.
|
||||||
|
type ClusterRerouteResponse struct {
|
||||||
|
State *ClusterStateResponse `json:"state"`
|
||||||
|
Explanations []RerouteExplanation `json:"explanations,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RerouteExplanation is returned in ClusterRerouteResponse if
|
||||||
|
// the "explain" parameter is set to "true".
|
||||||
|
type RerouteExplanation struct {
|
||||||
|
Command string `json:"command"`
|
||||||
|
Parameters map[string]interface{} `json:"parameters"`
|
||||||
|
Decisions []RerouteDecision `json:"decisions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RerouteDecision is a decision the decider made while rerouting.
|
||||||
|
type RerouteDecision interface{}
|
||||||
|
|
||||||
|
// -- Allocation commands --
|
||||||
|
|
||||||
|
// AllocationCommand is a command to be executed in a call
|
||||||
|
// to Cluster Reroute API.
|
||||||
|
type AllocationCommand interface {
|
||||||
|
Name() string
|
||||||
|
Source() (interface{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ AllocationCommand = (*MoveAllocationCommand)(nil)
|
||||||
|
|
||||||
|
// MoveAllocationCommand moves a shard from a specific node to
|
||||||
|
// another node.
|
||||||
|
type MoveAllocationCommand struct {
|
||||||
|
index string
|
||||||
|
shardId int
|
||||||
|
fromNode string
|
||||||
|
toNode string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMoveAllocationCommand creates a new MoveAllocationCommand.
|
||||||
|
func NewMoveAllocationCommand(index string, shardId int, fromNode, toNode string) *MoveAllocationCommand {
|
||||||
|
return &MoveAllocationCommand{
|
||||||
|
index: index,
|
||||||
|
shardId: shardId,
|
||||||
|
fromNode: fromNode,
|
||||||
|
toNode: toNode,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the command in a request to the Cluster Reroute API.
|
||||||
|
func (cmd *MoveAllocationCommand) Name() string { return "move" }
|
||||||
|
|
||||||
|
// Source generates the (inner) JSON to be used when serializing the command.
|
||||||
|
func (cmd *MoveAllocationCommand) Source() (interface{}, error) {
|
||||||
|
source := make(map[string]interface{})
|
||||||
|
source["index"] = cmd.index
|
||||||
|
source["shard"] = cmd.shardId
|
||||||
|
source["from_node"] = cmd.fromNode
|
||||||
|
source["to_node"] = cmd.toNode
|
||||||
|
return source, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ AllocationCommand = (*CancelAllocationCommand)(nil)
|
||||||
|
|
||||||
|
// CancelAllocationCommand cancels relocation, or recovery of a given shard on a node.
|
||||||
|
type CancelAllocationCommand struct {
|
||||||
|
index string
|
||||||
|
shardId int
|
||||||
|
node string
|
||||||
|
allowPrimary bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCancelAllocationCommand creates a new CancelAllocationCommand.
|
||||||
|
func NewCancelAllocationCommand(index string, shardId int, node string, allowPrimary bool) *CancelAllocationCommand {
|
||||||
|
return &CancelAllocationCommand{
|
||||||
|
index: index,
|
||||||
|
shardId: shardId,
|
||||||
|
node: node,
|
||||||
|
allowPrimary: allowPrimary,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the command in a request to the Cluster Reroute API.
|
||||||
|
func (cmd *CancelAllocationCommand) Name() string { return "cancel" }
|
||||||
|
|
||||||
|
// Source generates the (inner) JSON to be used when serializing the command.
|
||||||
|
func (cmd *CancelAllocationCommand) Source() (interface{}, error) {
|
||||||
|
source := make(map[string]interface{})
|
||||||
|
source["index"] = cmd.index
|
||||||
|
source["shard"] = cmd.shardId
|
||||||
|
source["node"] = cmd.node
|
||||||
|
source["allow_primary"] = cmd.allowPrimary
|
||||||
|
return source, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ AllocationCommand = (*AllocateStalePrimaryAllocationCommand)(nil)
|
||||||
|
|
||||||
|
// AllocateStalePrimaryAllocationCommand allocates an unassigned stale
|
||||||
|
// primary shard to a specific node. Use with extreme care as this will
|
||||||
|
// result in data loss. Allocation deciders are ignored.
|
||||||
|
type AllocateStalePrimaryAllocationCommand struct {
|
||||||
|
index string
|
||||||
|
shardId int
|
||||||
|
node string
|
||||||
|
acceptDataLoss bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAllocateStalePrimaryAllocationCommand creates a new
|
||||||
|
// AllocateStalePrimaryAllocationCommand.
|
||||||
|
func NewAllocateStalePrimaryAllocationCommand(index string, shardId int, node string, acceptDataLoss bool) *AllocateStalePrimaryAllocationCommand {
|
||||||
|
return &AllocateStalePrimaryAllocationCommand{
|
||||||
|
index: index,
|
||||||
|
shardId: shardId,
|
||||||
|
node: node,
|
||||||
|
acceptDataLoss: acceptDataLoss,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the command in a request to the Cluster Reroute API.
|
||||||
|
func (cmd *AllocateStalePrimaryAllocationCommand) Name() string { return "allocate_stale_primary" }
|
||||||
|
|
||||||
|
// Source generates the (inner) JSON to be used when serializing the command.
|
||||||
|
func (cmd *AllocateStalePrimaryAllocationCommand) Source() (interface{}, error) {
|
||||||
|
source := make(map[string]interface{})
|
||||||
|
source["index"] = cmd.index
|
||||||
|
source["shard"] = cmd.shardId
|
||||||
|
source["node"] = cmd.node
|
||||||
|
source["accept_data_loss"] = cmd.acceptDataLoss
|
||||||
|
return source, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ AllocationCommand = (*AllocateReplicaAllocationCommand)(nil)
|
||||||
|
|
||||||
|
// AllocateReplicaAllocationCommand allocates an unassigned replica shard
|
||||||
|
// to a specific node. Checks if allocation deciders allow allocation.
|
||||||
|
type AllocateReplicaAllocationCommand struct {
|
||||||
|
index string
|
||||||
|
shardId int
|
||||||
|
node string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAllocateReplicaAllocationCommand creates a new
|
||||||
|
// AllocateReplicaAllocationCommand.
|
||||||
|
func NewAllocateReplicaAllocationCommand(index string, shardId int, node string) *AllocateReplicaAllocationCommand {
|
||||||
|
return &AllocateReplicaAllocationCommand{
|
||||||
|
index: index,
|
||||||
|
shardId: shardId,
|
||||||
|
node: node,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the command in a request to the Cluster Reroute API.
|
||||||
|
func (cmd *AllocateReplicaAllocationCommand) Name() string { return "allocate_replica" }
|
||||||
|
|
||||||
|
// Source generates the (inner) JSON to be used when serializing the command.
|
||||||
|
func (cmd *AllocateReplicaAllocationCommand) Source() (interface{}, error) {
|
||||||
|
source := make(map[string]interface{})
|
||||||
|
source["index"] = cmd.index
|
||||||
|
source["shard"] = cmd.shardId
|
||||||
|
source["node"] = cmd.node
|
||||||
|
return source, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllocateEmptyPrimaryAllocationCommand allocates an unassigned empty
|
||||||
|
// primary shard to a specific node. Use with extreme care as this will
|
||||||
|
// result in data loss. Allocation deciders are ignored.
|
||||||
|
type AllocateEmptyPrimaryAllocationCommand struct {
|
||||||
|
index string
|
||||||
|
shardId int
|
||||||
|
node string
|
||||||
|
acceptDataLoss bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAllocateEmptyPrimaryAllocationCommand creates a new
|
||||||
|
// AllocateEmptyPrimaryAllocationCommand.
|
||||||
|
func NewAllocateEmptyPrimaryAllocationCommand(index string, shardId int, node string, acceptDataLoss bool) *AllocateEmptyPrimaryAllocationCommand {
|
||||||
|
return &AllocateEmptyPrimaryAllocationCommand{
|
||||||
|
index: index,
|
||||||
|
shardId: shardId,
|
||||||
|
node: node,
|
||||||
|
acceptDataLoss: acceptDataLoss,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the command in a request to the Cluster Reroute API.
|
||||||
|
func (cmd *AllocateEmptyPrimaryAllocationCommand) Name() string { return "allocate_empty_primary" }
|
||||||
|
|
||||||
|
// Source generates the (inner) JSON to be used when serializing the command.
|
||||||
|
func (cmd *AllocateEmptyPrimaryAllocationCommand) Source() (interface{}, error) {
|
||||||
|
source := make(map[string]interface{})
|
||||||
|
source["index"] = cmd.index
|
||||||
|
source["shard"] = cmd.shardId
|
||||||
|
source["node"] = cmd.node
|
||||||
|
source["accept_data_loss"] = cmd.acceptDataLoss
|
||||||
|
return source, nil
|
||||||
|
}
|
355
vendor/github.com/olivere/elastic/v7/cluster_state.go
generated
vendored
Normal file
355
vendor/github.com/olivere/elastic/v7/cluster_state.go
generated
vendored
Normal file
|
@ -0,0 +1,355 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClusterStateService allows to get a comprehensive state information of the whole cluster.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-state.html
|
||||||
|
// for details.
|
||||||
|
type ClusterStateService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
indices []string
|
||||||
|
metrics []string
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
flatSettings *bool
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
local *bool
|
||||||
|
masterTimeout string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClusterStateService creates a new ClusterStateService.
|
||||||
|
func NewClusterStateService(client *Client) *ClusterStateService {
|
||||||
|
return &ClusterStateService{
|
||||||
|
client: client,
|
||||||
|
indices: make([]string, 0),
|
||||||
|
metrics: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *ClusterStateService) Human(human bool) *ClusterStateService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *ClusterStateService) ErrorTrace(errorTrace bool) *ClusterStateService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *ClusterStateService) FilterPath(filterPath ...string) *ClusterStateService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *ClusterStateService) Header(name string, value string) *ClusterStateService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *ClusterStateService) Headers(headers http.Header) *ClusterStateService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names. Use _all or an empty string to
|
||||||
|
// perform the operation on all indices.
|
||||||
|
func (s *ClusterStateService) Index(indices ...string) *ClusterStateService {
|
||||||
|
s.indices = append(s.indices, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metric limits the information returned to the specified metric.
|
||||||
|
// It can be one of: version, master_node, nodes, routing_table, metadata,
|
||||||
|
// blocks, or customs.
|
||||||
|
func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService {
|
||||||
|
s.metrics = append(s.metrics, metrics...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices.
|
||||||
|
// (This includes `_all` string or when no indices have been specified).
|
||||||
|
func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both..
|
||||||
|
func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlatSettings, when set, returns settings in flat format (default: false).
|
||||||
|
func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
|
||||||
|
s.flatSettings = &flatSettings
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates whether to return local information. When set, it does not
|
||||||
|
// retrieve the state from master node (default: false).
|
||||||
|
func (s *ClusterStateService) Local(local bool) *ClusterStateService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies timeout for connection to master.
|
||||||
|
func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *ClusterStateService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
metrics := strings.Join(s.metrics, ",")
|
||||||
|
if metrics == "" {
|
||||||
|
metrics = "_all"
|
||||||
|
}
|
||||||
|
indices := strings.Join(s.indices, ",")
|
||||||
|
if indices == "" {
|
||||||
|
indices = "_all"
|
||||||
|
}
|
||||||
|
path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
|
||||||
|
"metrics": metrics,
|
||||||
|
"indices": indices,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.flatSettings != nil {
|
||||||
|
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.local != nil {
|
||||||
|
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *ClusterStateService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *ClusterStateService) Do(ctx context.Context) (*ClusterStateResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(ClusterStateResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterStateResponse is the response of ClusterStateService.Do.
|
||||||
|
type ClusterStateResponse struct {
|
||||||
|
ClusterName string `json:"cluster_name"`
|
||||||
|
ClusterUUID string `json:"cluster_uuid"`
|
||||||
|
Version int64 `json:"version"`
|
||||||
|
StateUUID string `json:"state_uuid"`
|
||||||
|
MasterNode string `json:"master_node"`
|
||||||
|
Blocks map[string]*clusterBlocks `json:"blocks"`
|
||||||
|
Nodes map[string]*discoveryNode `json:"nodes"`
|
||||||
|
Metadata *clusterStateMetadata `json:"metadata"`
|
||||||
|
RoutingTable *clusterStateRoutingTable `json:"routing_table"`
|
||||||
|
RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"`
|
||||||
|
Customs map[string]interface{} `json:"customs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterBlocks struct {
|
||||||
|
Global map[string]*clusterBlock `json:"global"` // id -> cluster block
|
||||||
|
Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterBlock struct {
|
||||||
|
Description string `json:"description"`
|
||||||
|
Retryable bool `json:"retryable"`
|
||||||
|
DisableStatePersistence bool `json:"disable_state_persistence"`
|
||||||
|
Levels []string `json:"levels"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterStateMetadata struct {
|
||||||
|
ClusterUUID string `json:"cluster_uuid"`
|
||||||
|
ClusterUUIDCommitted string `json:"cluster_uuid_committed"`
|
||||||
|
ClusterCoordination *clusterCoordinationMetaData `json:"cluster_coordination"`
|
||||||
|
Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata
|
||||||
|
Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data
|
||||||
|
RoutingTable struct {
|
||||||
|
Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table
|
||||||
|
} `json:"routing_table"`
|
||||||
|
RoutingNodes struct {
|
||||||
|
Unassigned []*shardRouting `json:"unassigned"`
|
||||||
|
Nodes []*shardRouting `json:"nodes"`
|
||||||
|
} `json:"routing_nodes"`
|
||||||
|
Customs map[string]interface{} `json:"customs"`
|
||||||
|
Ingest map[string]interface{} `json:"ingest"`
|
||||||
|
StoredScripts map[string]interface{} `json:"stored_scripts"`
|
||||||
|
IndexGraveyard map[string]interface{} `json:"index-graveyard"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterCoordinationMetaData struct {
|
||||||
|
Term int64 `json:"term"`
|
||||||
|
LastCommittedConfig interface{} `json:"last_committed_config,omitempty"`
|
||||||
|
LastAcceptedConfig interface{} `json:"last_accepted_config,omitempty"`
|
||||||
|
VotingConfigExclusions []interface{} `json:"voting_config_exclusions,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type discoveryNode struct {
|
||||||
|
Name string `json:"name"` // server name, e.g. "es1"
|
||||||
|
EphemeralID string `json:"ephemeral_id"` // e.g. "paHSLpn6QyuVy_n-GM1JAQ"
|
||||||
|
TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300]
|
||||||
|
Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true }
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterStateRoutingTable struct {
|
||||||
|
Indices map[string]interface{} `json:"indices"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterStateRoutingNode struct {
|
||||||
|
Unassigned []*shardRouting `json:"unassigned"`
|
||||||
|
// Node Id -> shardRouting
|
||||||
|
Nodes map[string][]*shardRouting `json:"nodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexTemplateMetaData struct {
|
||||||
|
IndexPatterns []string `json:"index_patterns"` // e.g. ["store-*"]
|
||||||
|
Order int `json:"order"`
|
||||||
|
Settings map[string]interface{} `json:"settings"` // index settings
|
||||||
|
Mappings map[string]interface{} `json:"mappings"` // type name -> mapping
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexMetaData struct {
|
||||||
|
State string `json:"state"`
|
||||||
|
Settings map[string]interface{} `json:"settings"`
|
||||||
|
Mappings map[string]interface{} `json:"mappings"`
|
||||||
|
Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ]
|
||||||
|
PrimaryTerms map[string]interface{} `json:"primary_terms"`
|
||||||
|
InSyncAllocations map[string]interface{} `json:"in_sync_allocations"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexRoutingTable struct {
|
||||||
|
Shards map[string]*shardRouting `json:"shards"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type shardRouting struct {
|
||||||
|
State string `json:"state"`
|
||||||
|
Primary bool `json:"primary"`
|
||||||
|
Node string `json:"node"`
|
||||||
|
RelocatingNode string `json:"relocating_node"`
|
||||||
|
Shard int `json:"shard"`
|
||||||
|
Index string `json:"index"`
|
||||||
|
Version int64 `json:"version"`
|
||||||
|
RestoreSource *RestoreSource `json:"restore_source"`
|
||||||
|
AllocationId *allocationId `json:"allocation_id"`
|
||||||
|
UnassignedInfo *unassignedInfo `json:"unassigned_info"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RestoreSource struct {
|
||||||
|
Repository string `json:"repository"`
|
||||||
|
Snapshot string `json:"snapshot"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
Index string `json:"index"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type allocationId struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
RelocationId string `json:"relocation_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type unassignedInfo struct {
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
At string `json:"at"`
|
||||||
|
Details string `json:"details"`
|
||||||
|
}
|
409
vendor/github.com/olivere/elastic/v7/cluster_stats.go
generated
vendored
Normal file
409
vendor/github.com/olivere/elastic/v7/cluster_stats.go
generated
vendored
Normal file
|
@ -0,0 +1,409 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClusterStatsService is documented at
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-stats.html.
|
||||||
|
type ClusterStatsService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
nodeId []string
|
||||||
|
flatSettings *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClusterStatsService creates a new ClusterStatsService.
|
||||||
|
func NewClusterStatsService(client *Client) *ClusterStatsService {
|
||||||
|
return &ClusterStatsService{
|
||||||
|
client: client,
|
||||||
|
nodeId: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *ClusterStatsService) Human(human bool) *ClusterStatsService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *ClusterStatsService) ErrorTrace(errorTrace bool) *ClusterStatsService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *ClusterStatsService) FilterPath(filterPath ...string) *ClusterStatsService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *ClusterStatsService) Header(name string, value string) *ClusterStatsService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *ClusterStatsService) Headers(headers http.Header) *ClusterStatsService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes.
|
||||||
|
func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService {
|
||||||
|
s.nodeId = nodeId
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlatSettings is documented as: Return settings in flat format (default: false).
|
||||||
|
func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService {
|
||||||
|
s.flatSettings = &flatSettings
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *ClusterStatsService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if len(s.nodeId) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{
|
||||||
|
"node_id": strings.Join(s.nodeId, ","),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
path, err = uritemplates.Expand("/_cluster/stats", map[string]string{})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.flatSettings != nil {
|
||||||
|
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *ClusterStatsService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *ClusterStatsService) Do(ctx context.Context) (*ClusterStatsResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(ClusterStatsResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterStatsResponse is the response of ClusterStatsService.Do.
|
||||||
|
type ClusterStatsResponse struct {
|
||||||
|
NodesStats *ShardsInfo `json:"_nodes,omitempty"`
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
ClusterName string `json:"cluster_name"`
|
||||||
|
ClusterUUID string `json:"cluster_uuid"`
|
||||||
|
Status string `json:"status,omitempty"` // e.g. green
|
||||||
|
Indices *ClusterStatsIndices `json:"indices"`
|
||||||
|
Nodes *ClusterStatsNodes `json:"nodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndices struct {
|
||||||
|
Count int `json:"count"` // number of indices
|
||||||
|
Shards *ClusterStatsIndicesShards `json:"shards"`
|
||||||
|
Docs *ClusterStatsIndicesDocs `json:"docs"`
|
||||||
|
Store *ClusterStatsIndicesStore `json:"store"`
|
||||||
|
FieldData *ClusterStatsIndicesFieldData `json:"fielddata"`
|
||||||
|
QueryCache *ClusterStatsIndicesQueryCache `json:"query_cache"`
|
||||||
|
Completion *ClusterStatsIndicesCompletion `json:"completion"`
|
||||||
|
Segments *IndexStatsSegments `json:"segments"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndicesShards struct {
|
||||||
|
Total int `json:"total"`
|
||||||
|
Primaries int `json:"primaries"`
|
||||||
|
Replication float64 `json:"replication"`
|
||||||
|
Index *ClusterStatsIndicesShardsIndex `json:"index"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndicesShardsIndex struct {
|
||||||
|
Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"`
|
||||||
|
Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"`
|
||||||
|
Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndicesShardsIndexIntMinMax struct {
|
||||||
|
Min int `json:"min"`
|
||||||
|
Max int `json:"max"`
|
||||||
|
Avg float64 `json:"avg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndicesShardsIndexFloat64MinMax struct {
|
||||||
|
Min float64 `json:"min"`
|
||||||
|
Max float64 `json:"max"`
|
||||||
|
Avg float64 `json:"avg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndicesDocs struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
Deleted int `json:"deleted"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndicesStore struct {
|
||||||
|
Size string `json:"size"` // e.g. "5.3gb"
|
||||||
|
SizeInBytes int64 `json:"size_in_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndicesFieldData struct {
|
||||||
|
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
|
||||||
|
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||||
|
Evictions int64 `json:"evictions"`
|
||||||
|
Fields map[string]struct {
|
||||||
|
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
|
||||||
|
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||||
|
} `json:"fields,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndicesQueryCache struct {
|
||||||
|
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
|
||||||
|
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||||
|
TotalCount int64 `json:"total_count"`
|
||||||
|
HitCount int64 `json:"hit_count"`
|
||||||
|
MissCount int64 `json:"miss_count"`
|
||||||
|
CacheSize int64 `json:"cache_size"`
|
||||||
|
CacheCount int64 `json:"cache_count"`
|
||||||
|
Evictions int64 `json:"evictions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndicesCompletion struct {
|
||||||
|
Size string `json:"size"` // e.g. "61.3kb"
|
||||||
|
SizeInBytes int64 `json:"size_in_bytes"`
|
||||||
|
Fields map[string]struct {
|
||||||
|
Size string `json:"size"` // e.g. "61.3kb"
|
||||||
|
SizeInBytes int64 `json:"size_in_bytes"`
|
||||||
|
} `json:"fields,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsIndicesSegmentsFile struct {
|
||||||
|
Size string `json:"size"` // e.g. "61.3kb"
|
||||||
|
SizeInBytes int64 `json:"size_in_bytes"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---
|
||||||
|
|
||||||
|
type ClusterStatsNodes struct {
|
||||||
|
Count *ClusterStatsNodesCount `json:"count"`
|
||||||
|
Versions []string `json:"versions"`
|
||||||
|
OS *ClusterStatsNodesOsStats `json:"os"`
|
||||||
|
Process *ClusterStatsNodesProcessStats `json:"process"`
|
||||||
|
JVM *ClusterStatsNodesJvmStats `json:"jvm"`
|
||||||
|
FS *ClusterStatsNodesFsStats `json:"fs"`
|
||||||
|
Plugins []*ClusterStatsNodesPlugin `json:"plugins"`
|
||||||
|
|
||||||
|
NetworkTypes *ClusterStatsNodesNetworkTypes `json:"network_types"`
|
||||||
|
DiscoveryTypes *ClusterStatsNodesDiscoveryTypes `json:"discovery_types"`
|
||||||
|
PackagingTypes *ClusterStatsNodesPackagingTypes `json:"packaging_types"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesCount struct {
|
||||||
|
Total int `json:"total"`
|
||||||
|
Data int `json:"data"`
|
||||||
|
CoordinatingOnly int `json:"coordinating_only"`
|
||||||
|
Master int `json:"master"`
|
||||||
|
Ingest int `json:"ingest"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesOsStats struct {
|
||||||
|
AvailableProcessors int `json:"available_processors"`
|
||||||
|
AllocatedProcessors int `json:"allocated_processors"`
|
||||||
|
Names []struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Value int `json:"count"`
|
||||||
|
} `json:"names"`
|
||||||
|
PrettyNames []struct {
|
||||||
|
PrettyName string `json:"pretty_name"`
|
||||||
|
Value int `json:"count"`
|
||||||
|
} `json:"pretty_names"`
|
||||||
|
Mem *ClusterStatsNodesOsStatsMem `json:"mem"`
|
||||||
|
// CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesOsStatsMem struct {
|
||||||
|
Total string `json:"total"` // e.g. "16gb"
|
||||||
|
TotalInBytes int64 `json:"total_in_bytes"`
|
||||||
|
Free string `json:"free"` // e.g. "12gb"
|
||||||
|
FreeInBytes int64 `json:"free_in_bytes"`
|
||||||
|
Used string `json:"used"` // e.g. "4gb"
|
||||||
|
UsedInBytes int64 `json:"used_in_bytes"`
|
||||||
|
FreePercent int `json:"free_percent"`
|
||||||
|
UsedPercent int `json:"used_percent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesOsStatsCPU struct {
|
||||||
|
Vendor string `json:"vendor"`
|
||||||
|
Model string `json:"model"`
|
||||||
|
MHz int `json:"mhz"`
|
||||||
|
TotalCores int `json:"total_cores"`
|
||||||
|
TotalSockets int `json:"total_sockets"`
|
||||||
|
CoresPerSocket int `json:"cores_per_socket"`
|
||||||
|
CacheSize string `json:"cache_size"` // e.g. "256b"
|
||||||
|
CacheSizeInBytes int64 `json:"cache_size_in_bytes"`
|
||||||
|
Count int `json:"count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesProcessStats struct {
|
||||||
|
CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"`
|
||||||
|
OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesProcessStatsCPU struct {
|
||||||
|
Percent float64 `json:"percent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesProcessStatsOpenFileDescriptors struct {
|
||||||
|
Min int64 `json:"min"`
|
||||||
|
Max int64 `json:"max"`
|
||||||
|
Avg int64 `json:"avg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesJvmStats struct {
|
||||||
|
MaxUptime string `json:"max_uptime"` // e.g. "5h"
|
||||||
|
MaxUptimeInMillis int64 `json:"max_uptime_in_millis"`
|
||||||
|
Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"`
|
||||||
|
Mem *ClusterStatsNodesJvmStatsMem `json:"mem"`
|
||||||
|
Threads int64 `json:"threads"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesJvmStatsVersion struct {
|
||||||
|
Version string `json:"version"` // e.g. "1.8.0_45"
|
||||||
|
VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
|
||||||
|
VMVersion string `json:"vm_version"` // e.g. "25.45-b02"
|
||||||
|
VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
|
||||||
|
BundledJDK bool `json:"bundled_jdk"`
|
||||||
|
UsingBundledJDK bool `json:"using_bundled_jdk"`
|
||||||
|
Count int `json:"count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesJvmStatsMem struct {
|
||||||
|
HeapUsed string `json:"heap_used"`
|
||||||
|
HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
|
||||||
|
HeapMax string `json:"heap_max"`
|
||||||
|
HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesFsStats struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
Mount string `json:"mount"`
|
||||||
|
Dev string `json:"dev"`
|
||||||
|
Total string `json:"total"` // e.g. "930.7gb"`
|
||||||
|
TotalInBytes int64 `json:"total_in_bytes"`
|
||||||
|
Free string `json:"free"` // e.g. "930.7gb"`
|
||||||
|
FreeInBytes int64 `json:"free_in_bytes"`
|
||||||
|
Available string `json:"available"` // e.g. "930.7gb"`
|
||||||
|
AvailableInBytes int64 `json:"available_in_bytes"`
|
||||||
|
DiskReads int64 `json:"disk_reads"`
|
||||||
|
DiskWrites int64 `json:"disk_writes"`
|
||||||
|
DiskIOOp int64 `json:"disk_io_op"`
|
||||||
|
DiskReadSize string `json:"disk_read_size"` // e.g. "0b"`
|
||||||
|
DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"`
|
||||||
|
DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"`
|
||||||
|
DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"`
|
||||||
|
DiskIOSize string `json:"disk_io_size"` // e.g. "0b"`
|
||||||
|
DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"`
|
||||||
|
DiskQueue string `json:"disk_queue"`
|
||||||
|
DiskServiceTime string `json:"disk_service_time"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesPlugin struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
JVM bool `json:"jvm"`
|
||||||
|
Site bool `json:"site"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesNetworkTypes struct {
|
||||||
|
TransportTypes map[string]interface{} `json:"transport_types"` // e.g. "netty4": 1
|
||||||
|
HTTPTypes map[string]interface{} `json:"http_types"` // e.g. "netty4": 1
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterStatsNodesDiscoveryTypes interface{}
|
||||||
|
|
||||||
|
type ClusterStatsNodesPackagingTypes []*ClusterStatsNodesPackagingType
|
||||||
|
|
||||||
|
type ClusterStatsNodesPackagingType struct {
|
||||||
|
Flavor string `json:"flavor"` // e.g. "oss"
|
||||||
|
Type string `json:"type"` // e.g. "docker"
|
||||||
|
Count int `json:"count"` // e.g. 1
|
||||||
|
}
|
87
vendor/github.com/olivere/elastic/v7/config/config.go
generated
vendored
Normal file
87
vendor/github.com/olivere/elastic/v7/config/config.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config represents an Elasticsearch configuration.
|
||||||
|
type Config struct {
|
||||||
|
URL string
|
||||||
|
Index string
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
Shards int
|
||||||
|
Replicas int
|
||||||
|
Sniff *bool
|
||||||
|
Healthcheck *bool
|
||||||
|
Infolog string
|
||||||
|
Errorlog string
|
||||||
|
Tracelog string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse returns the Elasticsearch configuration by extracting it
|
||||||
|
// from the URL, its path, and its query string.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// http://127.0.0.1:9200/store-blobs?shards=1&replicas=0&sniff=false&tracelog=elastic.trace.log
|
||||||
|
//
|
||||||
|
// The code above will return a URL of http://127.0.0.1:9200, an index name
|
||||||
|
// of store-blobs, and the related settings from the query string.
|
||||||
|
func Parse(elasticURL string) (*Config, error) {
|
||||||
|
cfg := &Config{
|
||||||
|
Shards: 1,
|
||||||
|
Replicas: 0,
|
||||||
|
Sniff: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
uri, err := url.Parse(elasticURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing elastic parameter %q: %v", elasticURL, err)
|
||||||
|
}
|
||||||
|
index := strings.TrimSuffix(strings.TrimPrefix(uri.Path, "/"), "/")
|
||||||
|
if uri.User != nil {
|
||||||
|
cfg.Username = uri.User.Username()
|
||||||
|
cfg.Password, _ = uri.User.Password()
|
||||||
|
}
|
||||||
|
uri.User = nil
|
||||||
|
|
||||||
|
if i, err := strconv.Atoi(uri.Query().Get("shards")); err == nil {
|
||||||
|
cfg.Shards = i
|
||||||
|
}
|
||||||
|
if i, err := strconv.Atoi(uri.Query().Get("replicas")); err == nil {
|
||||||
|
cfg.Replicas = i
|
||||||
|
}
|
||||||
|
if s := uri.Query().Get("sniff"); s != "" {
|
||||||
|
if b, err := strconv.ParseBool(s); err == nil {
|
||||||
|
cfg.Sniff = &b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s := uri.Query().Get("healthcheck"); s != "" {
|
||||||
|
if b, err := strconv.ParseBool(s); err == nil {
|
||||||
|
cfg.Healthcheck = &b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s := uri.Query().Get("infolog"); s != "" {
|
||||||
|
cfg.Infolog = s
|
||||||
|
}
|
||||||
|
if s := uri.Query().Get("errorlog"); s != "" {
|
||||||
|
cfg.Errorlog = s
|
||||||
|
}
|
||||||
|
if s := uri.Query().Get("tracelog"); s != "" {
|
||||||
|
cfg.Tracelog = s
|
||||||
|
}
|
||||||
|
|
||||||
|
uri.Path = ""
|
||||||
|
uri.RawQuery = ""
|
||||||
|
cfg.URL = uri.String()
|
||||||
|
cfg.Index = index
|
||||||
|
|
||||||
|
return cfg, nil
|
||||||
|
}
|
9
vendor/github.com/olivere/elastic/v7/config/doc.go
generated
vendored
Normal file
9
vendor/github.com/olivere/elastic/v7/config/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package config allows parsing a configuration for Elasticsearch
|
||||||
|
from a URL.
|
||||||
|
*/
|
||||||
|
package config
|
90
vendor/github.com/olivere/elastic/v7/connection.go
generated
vendored
Normal file
90
vendor/github.com/olivere/elastic/v7/connection.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// conn represents a single connection to a node in a cluster.
|
||||||
|
type conn struct {
|
||||||
|
sync.RWMutex
|
||||||
|
nodeID string // node ID
|
||||||
|
url string
|
||||||
|
failures int
|
||||||
|
dead bool
|
||||||
|
deadSince *time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// newConn creates a new connection to the given URL.
|
||||||
|
func newConn(nodeID, url string) *conn {
|
||||||
|
c := &conn{
|
||||||
|
nodeID: nodeID,
|
||||||
|
url: url,
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a representation of the connection status.
|
||||||
|
func (c *conn) String() string {
|
||||||
|
c.RLock()
|
||||||
|
defer c.RUnlock()
|
||||||
|
return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeID returns the ID of the node of this connection.
|
||||||
|
func (c *conn) NodeID() string {
|
||||||
|
c.RLock()
|
||||||
|
defer c.RUnlock()
|
||||||
|
return c.nodeID
|
||||||
|
}
|
||||||
|
|
||||||
|
// URL returns the URL of this connection.
|
||||||
|
func (c *conn) URL() string {
|
||||||
|
c.RLock()
|
||||||
|
defer c.RUnlock()
|
||||||
|
return c.url
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDead returns true if this connection is marked as dead, i.e. a previous
|
||||||
|
// request to the URL has been unsuccessful.
|
||||||
|
func (c *conn) IsDead() bool {
|
||||||
|
c.RLock()
|
||||||
|
defer c.RUnlock()
|
||||||
|
return c.dead
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkAsDead marks this connection as dead, increments the failures
|
||||||
|
// counter and stores the current time in dead since.
|
||||||
|
func (c *conn) MarkAsDead() {
|
||||||
|
c.Lock()
|
||||||
|
c.dead = true
|
||||||
|
if c.deadSince == nil {
|
||||||
|
utcNow := time.Now().UTC()
|
||||||
|
c.deadSince = &utcNow
|
||||||
|
}
|
||||||
|
c.failures += 1
|
||||||
|
c.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkAsAlive marks this connection as eligible to be returned from the
|
||||||
|
// pool of connections by the selector.
|
||||||
|
func (c *conn) MarkAsAlive() {
|
||||||
|
c.Lock()
|
||||||
|
c.dead = false
|
||||||
|
c.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkAsHealthy marks this connection as healthy, i.e. a request has been
|
||||||
|
// successfully performed with it.
|
||||||
|
func (c *conn) MarkAsHealthy() {
|
||||||
|
c.Lock()
|
||||||
|
c.dead = false
|
||||||
|
c.deadSince = nil
|
||||||
|
c.failures = 0
|
||||||
|
c.Unlock()
|
||||||
|
}
|
381
vendor/github.com/olivere/elastic/v7/count.go
generated
vendored
Normal file
381
vendor/github.com/olivere/elastic/v7/count.go
generated
vendored
Normal file
|
@ -0,0 +1,381 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CountService is a convenient service for determining the
|
||||||
|
// number of documents in an index. Use SearchService with
|
||||||
|
// a SearchType of count for counting with queries etc.
|
||||||
|
type CountService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
typ []string
|
||||||
|
allowNoIndices *bool
|
||||||
|
analyzeWildcard *bool
|
||||||
|
analyzer string
|
||||||
|
defaultOperator string
|
||||||
|
df string
|
||||||
|
expandWildcards string
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
lenient *bool
|
||||||
|
lowercaseExpandedTerms *bool
|
||||||
|
minScore interface{}
|
||||||
|
preference string
|
||||||
|
q string
|
||||||
|
query Query
|
||||||
|
routing string
|
||||||
|
terminateAfter *int
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCountService creates a new CountService.
|
||||||
|
func NewCountService(client *Client) *CountService {
|
||||||
|
return &CountService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *CountService) Pretty(pretty bool) *CountService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *CountService) Human(human bool) *CountService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *CountService) ErrorTrace(errorTrace bool) *CountService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *CountService) FilterPath(filterPath ...string) *CountService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *CountService) Header(name string, value string) *CountService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *CountService) Headers(headers http.Header) *CountService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index sets the names of the indices to restrict the results.
|
||||||
|
func (s *CountService) Index(index ...string) *CountService {
|
||||||
|
if s.index == nil {
|
||||||
|
s.index = make([]string, 0)
|
||||||
|
}
|
||||||
|
s.index = append(s.index, index...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type sets the types to use to restrict the results.
|
||||||
|
//
|
||||||
|
// Deprecated: Types are in the process of being removed. Instead of using a type, prefer to
|
||||||
|
// filter on a field on the document.
|
||||||
|
func (s *CountService) Type(typ ...string) *CountService {
|
||||||
|
if s.typ == nil {
|
||||||
|
s.typ = make([]string, 0)
|
||||||
|
}
|
||||||
|
s.typ = append(s.typ, typ...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices. (This includes "_all" string
|
||||||
|
// or when no indices have been specified).
|
||||||
|
func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnalyzeWildcard specifies whether wildcard and prefix queries should be
|
||||||
|
// analyzed (default: false).
|
||||||
|
func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService {
|
||||||
|
s.analyzeWildcard = &analyzeWildcard
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Analyzer specifies the analyzer to use for the query string.
|
||||||
|
func (s *CountService) Analyzer(analyzer string) *CountService {
|
||||||
|
s.analyzer = analyzer
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultOperator specifies the default operator for query string query (AND or OR).
|
||||||
|
func (s *CountService) DefaultOperator(defaultOperator string) *CountService {
|
||||||
|
s.defaultOperator = defaultOperator
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Df specifies the field to use as default where no field prefix is given
|
||||||
|
// in the query string.
|
||||||
|
func (s *CountService) Df(df string) *CountService {
|
||||||
|
s.df = df
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both.
|
||||||
|
func (s *CountService) ExpandWildcards(expandWildcards string) *CountService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lenient specifies whether format-based query failures (such as
|
||||||
|
// providing text to a numeric field) should be ignored.
|
||||||
|
func (s *CountService) Lenient(lenient bool) *CountService {
|
||||||
|
s.lenient = &lenient
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// LowercaseExpandedTerms specifies whether query terms should be lowercased.
|
||||||
|
func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService {
|
||||||
|
s.lowercaseExpandedTerms = &lowercaseExpandedTerms
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinScore indicates to include only documents with a specific `_score`
|
||||||
|
// value in the result.
|
||||||
|
func (s *CountService) MinScore(minScore interface{}) *CountService {
|
||||||
|
s.minScore = minScore
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preference specifies the node or shard the operation should be
|
||||||
|
// performed on (default: random).
|
||||||
|
func (s *CountService) Preference(preference string) *CountService {
|
||||||
|
s.preference = preference
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Q in the Lucene query string syntax. You can also use Query to pass
|
||||||
|
// a Query struct.
|
||||||
|
func (s *CountService) Q(q string) *CountService {
|
||||||
|
s.q = q
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query specifies the query to pass. You can also pass a query string with Q.
|
||||||
|
func (s *CountService) Query(query Query) *CountService {
|
||||||
|
s.query = query
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing specifies the routing value.
|
||||||
|
func (s *CountService) Routing(routing string) *CountService {
|
||||||
|
s.routing = routing
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// TerminateAfter indicates the maximum count for each shard, upon reaching
|
||||||
|
// which the query execution will terminate early.
|
||||||
|
func (s *CountService) TerminateAfter(terminateAfter int) *CountService {
|
||||||
|
s.terminateAfter = &terminateAfter
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson specifies the query to restrict the results specified with the
|
||||||
|
// Query DSL (optional). The interface{} will be serialized to a JSON document,
|
||||||
|
// so use a map[string]interface{}.
|
||||||
|
func (s *CountService) BodyJson(body interface{}) *CountService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Body specifies a query to restrict the results specified with
|
||||||
|
// the Query DSL (optional).
|
||||||
|
func (s *CountService) BodyString(body string) *CountService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *CountService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if len(s.index) > 0 && len(s.typ) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
"type": strings.Join(s.typ, ","),
|
||||||
|
})
|
||||||
|
} else if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_count", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
} else if len(s.typ) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{
|
||||||
|
"type": strings.Join(s.typ, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_all/_count"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.analyzeWildcard != nil {
|
||||||
|
params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
|
||||||
|
}
|
||||||
|
if s.analyzer != "" {
|
||||||
|
params.Set("analyzer", s.analyzer)
|
||||||
|
}
|
||||||
|
if s.defaultOperator != "" {
|
||||||
|
params.Set("default_operator", s.defaultOperator)
|
||||||
|
}
|
||||||
|
if s.df != "" {
|
||||||
|
params.Set("df", s.df)
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.lenient != nil {
|
||||||
|
params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
|
||||||
|
}
|
||||||
|
if s.lowercaseExpandedTerms != nil {
|
||||||
|
params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
|
||||||
|
}
|
||||||
|
if s.minScore != nil {
|
||||||
|
params.Set("min_score", fmt.Sprintf("%v", s.minScore))
|
||||||
|
}
|
||||||
|
if s.preference != "" {
|
||||||
|
params.Set("preference", s.preference)
|
||||||
|
}
|
||||||
|
if s.q != "" {
|
||||||
|
params.Set("q", s.q)
|
||||||
|
}
|
||||||
|
if s.routing != "" {
|
||||||
|
params.Set("routing", s.routing)
|
||||||
|
}
|
||||||
|
if s.terminateAfter != nil {
|
||||||
|
params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *CountService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *CountService) Do(ctx context.Context) (int64, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.query != nil {
|
||||||
|
src, err := s.query.Source()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
query := make(map[string]interface{})
|
||||||
|
query["query"] = src
|
||||||
|
body = query
|
||||||
|
} else if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else if s.bodyString != "" {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return result
|
||||||
|
ret := new(CountResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if ret != nil {
|
||||||
|
return ret.Count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return int64(0), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountResponse is the response of using the Count API.
|
||||||
|
type CountResponse struct {
|
||||||
|
Count int64 `json:"count"`
|
||||||
|
TerminatedEarly bool `json:"terminated_early,omitempty"`
|
||||||
|
Shards *ShardsInfo `json:"_shards,omitempty"`
|
||||||
|
}
|
38
vendor/github.com/olivere/elastic/v7/decoder.go
generated
vendored
Normal file
38
vendor/github.com/olivere/elastic/v7/decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decoder is used to decode responses from Elasticsearch.
|
||||||
|
// Users of elastic can implement their own marshaler for advanced purposes
|
||||||
|
// and set them per Client (see SetDecoder). If none is specified,
|
||||||
|
// DefaultDecoder is used.
|
||||||
|
type Decoder interface {
|
||||||
|
Decode(data []byte, v interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultDecoder uses json.Unmarshal from the Go standard library
|
||||||
|
// to decode JSON data.
|
||||||
|
type DefaultDecoder struct{}
|
||||||
|
|
||||||
|
// Decode decodes with json.Unmarshal from the Go standard library.
|
||||||
|
func (u *DefaultDecoder) Decode(data []byte, v interface{}) error {
|
||||||
|
return json.Unmarshal(data, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumberDecoder uses json.NewDecoder, with UseNumber() enabled, from
|
||||||
|
// the Go standard library to decode JSON data.
|
||||||
|
type NumberDecoder struct{}
|
||||||
|
|
||||||
|
// Decode decodes with json.Unmarshal from the Go standard library.
|
||||||
|
func (u *NumberDecoder) Decode(data []byte, v interface{}) error {
|
||||||
|
dec := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
dec.UseNumber()
|
||||||
|
return dec.Decode(v)
|
||||||
|
}
|
305
vendor/github.com/olivere/elastic/v7/delete.go
generated
vendored
Normal file
305
vendor/github.com/olivere/elastic/v7/delete.go
generated
vendored
Normal file
|
@ -0,0 +1,305 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeleteService allows to delete a typed JSON document from a specified
|
||||||
|
// index based on its id.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-delete.html
|
||||||
|
// for details.
|
||||||
|
type DeleteService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
id string
|
||||||
|
index string
|
||||||
|
typ string
|
||||||
|
routing string
|
||||||
|
timeout string
|
||||||
|
version interface{}
|
||||||
|
versionType string
|
||||||
|
waitForActiveShards string
|
||||||
|
parent string
|
||||||
|
refresh string
|
||||||
|
ifSeqNo *int64
|
||||||
|
ifPrimaryTerm *int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDeleteService creates a new DeleteService.
|
||||||
|
func NewDeleteService(client *Client) *DeleteService {
|
||||||
|
return &DeleteService{
|
||||||
|
client: client,
|
||||||
|
typ: "_doc",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *DeleteService) Pretty(pretty bool) *DeleteService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *DeleteService) Human(human bool) *DeleteService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *DeleteService) ErrorTrace(errorTrace bool) *DeleteService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *DeleteService) FilterPath(filterPath ...string) *DeleteService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *DeleteService) Header(name string, value string) *DeleteService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *DeleteService) Headers(headers http.Header) *DeleteService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is the type of the document.
|
||||||
|
//
|
||||||
|
// Deprecated: Types are in the process of being removed.
|
||||||
|
func (s *DeleteService) Type(typ string) *DeleteService {
|
||||||
|
s.typ = typ
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id is the document ID.
|
||||||
|
func (s *DeleteService) Id(id string) *DeleteService {
|
||||||
|
s.id = id
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index.
|
||||||
|
func (s *DeleteService) Index(index string) *DeleteService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing is a specific routing value.
|
||||||
|
func (s *DeleteService) Routing(routing string) *DeleteService {
|
||||||
|
s.routing = routing
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is an explicit operation timeout.
|
||||||
|
func (s *DeleteService) Timeout(timeout string) *DeleteService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version is an explicit version number for concurrency control.
|
||||||
|
func (s *DeleteService) Version(version interface{}) *DeleteService {
|
||||||
|
s.version = version
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionType is a specific version type.
|
||||||
|
func (s *DeleteService) VersionType(versionType string) *DeleteService {
|
||||||
|
s.versionType = versionType
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of shard copies that must be active
|
||||||
|
// before proceeding with the delete operation. Defaults to 1, meaning the
|
||||||
|
// primary shard only. Set to `all` for all shard copies, otherwise set to
|
||||||
|
// any non-negative value less than or equal to the total number of copies
|
||||||
|
// for the shard (number of replicas + 1).
|
||||||
|
func (s *DeleteService) WaitForActiveShards(waitForActiveShards string) *DeleteService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent is the ID of parent document.
|
||||||
|
func (s *DeleteService) Parent(parent string) *DeleteService {
|
||||||
|
s.parent = parent
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh the index after performing the operation.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html
|
||||||
|
// for details.
|
||||||
|
func (s *DeleteService) Refresh(refresh string) *DeleteService {
|
||||||
|
s.refresh = refresh
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfSeqNo indicates to only perform the delete operation if the last
|
||||||
|
// operation that has changed the document has the specified sequence number.
|
||||||
|
func (s *DeleteService) IfSeqNo(seqNo int64) *DeleteService {
|
||||||
|
s.ifSeqNo = &seqNo
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfPrimaryTerm indicates to only perform the delete operation if the
|
||||||
|
// last operation that has changed the document has the specified primary term.
|
||||||
|
func (s *DeleteService) IfPrimaryTerm(primaryTerm int64) *DeleteService {
|
||||||
|
s.ifPrimaryTerm = &primaryTerm
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *DeleteService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
|
||||||
|
"index": s.index,
|
||||||
|
"type": s.typ,
|
||||||
|
"id": s.id,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.refresh != "" {
|
||||||
|
params.Set("refresh", s.refresh)
|
||||||
|
}
|
||||||
|
if s.routing != "" {
|
||||||
|
params.Set("routing", s.routing)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if v := s.version; v != nil {
|
||||||
|
params.Set("version", fmt.Sprint(v))
|
||||||
|
}
|
||||||
|
if s.versionType != "" {
|
||||||
|
params.Set("version_type", s.versionType)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
if s.parent != "" {
|
||||||
|
params.Set("parent", s.parent)
|
||||||
|
}
|
||||||
|
if v := s.ifSeqNo; v != nil {
|
||||||
|
params.Set("if_seq_no", fmt.Sprintf("%d", *v))
|
||||||
|
}
|
||||||
|
if v := s.ifPrimaryTerm; v != nil {
|
||||||
|
params.Set("if_primary_term", fmt.Sprintf("%d", *v))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *DeleteService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.typ == "" {
|
||||||
|
invalid = append(invalid, "Type")
|
||||||
|
}
|
||||||
|
if s.id == "" {
|
||||||
|
invalid = append(invalid, "Id")
|
||||||
|
}
|
||||||
|
if s.index == "" {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation. If the document is not found (404), Elasticsearch will
|
||||||
|
// still return a response. This response is serialized and returned as well. In other
|
||||||
|
// words, for HTTP status code 404, both an error and a response might be returned.
|
||||||
|
func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "DELETE",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
IgnoreErrors: []int{http.StatusNotFound},
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(DeleteResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a 404, we return both a result and an error, just like ES does
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return ret, &Error{Status: http.StatusNotFound}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a delete request.
|
||||||
|
|
||||||
|
// DeleteResponse is the outcome of running DeleteService.Do.
|
||||||
|
type DeleteResponse struct {
|
||||||
|
Index string `json:"_index,omitempty"`
|
||||||
|
Type string `json:"_type,omitempty"`
|
||||||
|
Id string `json:"_id,omitempty"`
|
||||||
|
Version int64 `json:"_version,omitempty"`
|
||||||
|
Result string `json:"result,omitempty"`
|
||||||
|
Shards *ShardsInfo `json:"_shards,omitempty"`
|
||||||
|
SeqNo int64 `json:"_seq_no,omitempty"`
|
||||||
|
PrimaryTerm int64 `json:"_primary_term,omitempty"`
|
||||||
|
Status int `json:"status,omitempty"`
|
||||||
|
ForcedRefresh bool `json:"forced_refresh,omitempty"`
|
||||||
|
}
|
781
vendor/github.com/olivere/elastic/v7/delete_by_query.go
generated
vendored
Normal file
781
vendor/github.com/olivere/elastic/v7/delete_by_query.go
generated
vendored
Normal file
|
@ -0,0 +1,781 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeleteByQueryService deletes documents that match a query.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-delete-by-query.html.
|
||||||
|
type DeleteByQueryService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
typ []string
|
||||||
|
query Query
|
||||||
|
body interface{}
|
||||||
|
xSource []string
|
||||||
|
xSourceExclude []string
|
||||||
|
xSourceInclude []string
|
||||||
|
analyzer string
|
||||||
|
analyzeWildcard *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
conflicts string
|
||||||
|
defaultOperator string
|
||||||
|
df string
|
||||||
|
docvalueFields []string
|
||||||
|
expandWildcards string
|
||||||
|
explain *bool
|
||||||
|
from *int
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
lenient *bool
|
||||||
|
lowercaseExpandedTerms *bool
|
||||||
|
preference string
|
||||||
|
q string
|
||||||
|
refresh string
|
||||||
|
requestCache *bool
|
||||||
|
requestsPerSecond *int
|
||||||
|
routing []string
|
||||||
|
scroll string
|
||||||
|
scrollSize *int
|
||||||
|
searchTimeout string
|
||||||
|
searchType string
|
||||||
|
size *int
|
||||||
|
slices interface{}
|
||||||
|
sort []string
|
||||||
|
stats []string
|
||||||
|
storedFields []string
|
||||||
|
suggestField string
|
||||||
|
suggestMode string
|
||||||
|
suggestSize *int
|
||||||
|
suggestText string
|
||||||
|
terminateAfter *int
|
||||||
|
timeout string
|
||||||
|
trackScores *bool
|
||||||
|
version *bool
|
||||||
|
waitForActiveShards string
|
||||||
|
waitForCompletion *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDeleteByQueryService creates a new DeleteByQueryService.
|
||||||
|
// You typically use the client's DeleteByQuery to get a reference to
|
||||||
|
// the service.
|
||||||
|
func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
|
||||||
|
builder := &DeleteByQueryService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
return builder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *DeleteByQueryService) Human(human bool) *DeleteByQueryService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *DeleteByQueryService) ErrorTrace(errorTrace bool) *DeleteByQueryService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *DeleteByQueryService) FilterPath(filterPath ...string) *DeleteByQueryService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *DeleteByQueryService) Header(name string, value string) *DeleteByQueryService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *DeleteByQueryService) Headers(headers http.Header) *DeleteByQueryService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index sets the indices on which to perform the delete operation.
|
||||||
|
func (s *DeleteByQueryService) Index(index ...string) *DeleteByQueryService {
|
||||||
|
s.index = append(s.index, index...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type limits the delete operation to the given types.
|
||||||
|
//
|
||||||
|
// Deprecated: Types are in the process of being removed. Instead of
|
||||||
|
// using a type, prefer to filter on a field of the document.
|
||||||
|
func (s *DeleteByQueryService) Type(typ ...string) *DeleteByQueryService {
|
||||||
|
s.typ = append(s.typ, typ...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// XSource is true or false to return the _source field or not,
|
||||||
|
// or a list of fields to return.
|
||||||
|
func (s *DeleteByQueryService) XSource(xSource ...string) *DeleteByQueryService {
|
||||||
|
s.xSource = append(s.xSource, xSource...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// XSourceExclude represents a list of fields to exclude from the returned _source field.
|
||||||
|
func (s *DeleteByQueryService) XSourceExclude(xSourceExclude ...string) *DeleteByQueryService {
|
||||||
|
s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// XSourceInclude represents a list of fields to extract and return from the _source field.
|
||||||
|
func (s *DeleteByQueryService) XSourceInclude(xSourceInclude ...string) *DeleteByQueryService {
|
||||||
|
s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Analyzer to use for the query string.
|
||||||
|
func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
|
||||||
|
s.analyzer = analyzer
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnalyzeWildcard specifies whether wildcard and prefix queries should be
|
||||||
|
// analyzed (default: false).
|
||||||
|
func (s *DeleteByQueryService) AnalyzeWildcard(analyzeWildcard bool) *DeleteByQueryService {
|
||||||
|
s.analyzeWildcard = &analyzeWildcard
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices (including the _all string
|
||||||
|
// or when no indices have been specified).
|
||||||
|
func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
|
||||||
|
s.allowNoIndices = &allow
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conflicts indicates what to do when the process detects version conflicts.
|
||||||
|
// Possible values are "proceed" and "abort".
|
||||||
|
func (s *DeleteByQueryService) Conflicts(conflicts string) *DeleteByQueryService {
|
||||||
|
s.conflicts = conflicts
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AbortOnVersionConflict aborts the request on version conflicts.
|
||||||
|
// It is an alias to setting Conflicts("abort").
|
||||||
|
func (s *DeleteByQueryService) AbortOnVersionConflict() *DeleteByQueryService {
|
||||||
|
s.conflicts = "abort"
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProceedOnVersionConflict aborts the request on version conflicts.
|
||||||
|
// It is an alias to setting Conflicts("proceed").
|
||||||
|
func (s *DeleteByQueryService) ProceedOnVersionConflict() *DeleteByQueryService {
|
||||||
|
s.conflicts = "proceed"
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultOperator for query string query (AND or OR).
|
||||||
|
func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
|
||||||
|
s.defaultOperator = defaultOperator
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DF is the field to use as default where no field prefix is given in the query string.
|
||||||
|
func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
|
||||||
|
s.df = defaultField
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultField is the field to use as default where no field prefix is given in the query string.
|
||||||
|
// It is an alias to the DF func.
|
||||||
|
func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
|
||||||
|
s.df = defaultField
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit.
|
||||||
|
func (s *DeleteByQueryService) DocvalueFields(docvalueFields ...string) *DeleteByQueryService {
|
||||||
|
s.docvalueFields = docvalueFields
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both. It can be "open" or "closed".
|
||||||
|
func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
|
||||||
|
s.expandWildcards = expand
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Explain specifies whether to return detailed information about score
|
||||||
|
// computation as part of a hit.
|
||||||
|
func (s *DeleteByQueryService) Explain(explain bool) *DeleteByQueryService {
|
||||||
|
s.explain = &explain
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// From is the starting offset (default: 0).
|
||||||
|
func (s *DeleteByQueryService) From(from int) *DeleteByQueryService {
|
||||||
|
s.from = &from
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
|
||||||
|
s.ignoreUnavailable = &ignore
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lenient specifies whether format-based query failures
|
||||||
|
// (such as providing text to a numeric field) should be ignored.
|
||||||
|
func (s *DeleteByQueryService) Lenient(lenient bool) *DeleteByQueryService {
|
||||||
|
s.lenient = &lenient
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// LowercaseExpandedTerms specifies whether query terms should be lowercased.
|
||||||
|
func (s *DeleteByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *DeleteByQueryService {
|
||||||
|
s.lowercaseExpandedTerms = &lowercaseExpandedTerms
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preference specifies the node or shard the operation should be performed on
|
||||||
|
// (default: random).
|
||||||
|
func (s *DeleteByQueryService) Preference(preference string) *DeleteByQueryService {
|
||||||
|
s.preference = preference
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Q specifies the query in Lucene query string syntax. You can also use
|
||||||
|
// Query to programmatically specify the query.
|
||||||
|
func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
|
||||||
|
s.q = query
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryString is an alias to Q. Notice that you can also use Query to
|
||||||
|
// programmatically set the query.
|
||||||
|
func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
|
||||||
|
s.q = query
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query sets the query programmatically.
|
||||||
|
func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
|
||||||
|
s.query = query
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh indicates whether the effected indexes should be refreshed.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html
|
||||||
|
// for details.
|
||||||
|
func (s *DeleteByQueryService) Refresh(refresh string) *DeleteByQueryService {
|
||||||
|
s.refresh = refresh
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestCache specifies if request cache should be used for this request
|
||||||
|
// or not, defaults to index level setting.
|
||||||
|
func (s *DeleteByQueryService) RequestCache(requestCache bool) *DeleteByQueryService {
|
||||||
|
s.requestCache = &requestCache
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestsPerSecond sets the throttle on this request in sub-requests per second.
|
||||||
|
// -1 means set no throttle as does "unlimited" which is the only non-float this accepts.
|
||||||
|
func (s *DeleteByQueryService) RequestsPerSecond(requestsPerSecond int) *DeleteByQueryService {
|
||||||
|
s.requestsPerSecond = &requestsPerSecond
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing is a list of specific routing values.
|
||||||
|
func (s *DeleteByQueryService) Routing(routing ...string) *DeleteByQueryService {
|
||||||
|
s.routing = append(s.routing, routing...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scroll specifies how long a consistent view of the index should be maintained
|
||||||
|
// for scrolled search.
|
||||||
|
func (s *DeleteByQueryService) Scroll(scroll string) *DeleteByQueryService {
|
||||||
|
s.scroll = scroll
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrollSize is the size on the scroll request powering the update_by_query.
|
||||||
|
func (s *DeleteByQueryService) ScrollSize(scrollSize int) *DeleteByQueryService {
|
||||||
|
s.scrollSize = &scrollSize
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchTimeout defines an explicit timeout for each search request.
|
||||||
|
// Defaults to no timeout.
|
||||||
|
func (s *DeleteByQueryService) SearchTimeout(searchTimeout string) *DeleteByQueryService {
|
||||||
|
s.searchTimeout = searchTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchType is the search operation type. Possible values are
|
||||||
|
// "query_then_fetch" and "dfs_query_then_fetch".
|
||||||
|
func (s *DeleteByQueryService) SearchType(searchType string) *DeleteByQueryService {
|
||||||
|
s.searchType = searchType
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size represents the number of hits to return (default: 10).
|
||||||
|
func (s *DeleteByQueryService) Size(size int) *DeleteByQueryService {
|
||||||
|
s.size = &size
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slices represents the number of slices (default: 1).
|
||||||
|
// It used to be a number, but can be set to "auto" as of 6.7.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-delete-by-query.html#docs-delete-by-query-automatic-slice
|
||||||
|
// for details.
|
||||||
|
func (s *DeleteByQueryService) Slices(slices interface{}) *DeleteByQueryService {
|
||||||
|
s.slices = slices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort is a list of <field>:<direction> pairs.
|
||||||
|
func (s *DeleteByQueryService) Sort(sort ...string) *DeleteByQueryService {
|
||||||
|
s.sort = append(s.sort, sort...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortByField adds a sort order.
|
||||||
|
func (s *DeleteByQueryService) SortByField(field string, ascending bool) *DeleteByQueryService {
|
||||||
|
if ascending {
|
||||||
|
s.sort = append(s.sort, fmt.Sprintf("%s:asc", field))
|
||||||
|
} else {
|
||||||
|
s.sort = append(s.sort, fmt.Sprintf("%s:desc", field))
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats specifies specific tag(s) of the request for logging and statistical purposes.
|
||||||
|
func (s *DeleteByQueryService) Stats(stats ...string) *DeleteByQueryService {
|
||||||
|
s.stats = append(s.stats, stats...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoredFields specifies the list of stored fields to return as part of a hit.
|
||||||
|
func (s *DeleteByQueryService) StoredFields(storedFields ...string) *DeleteByQueryService {
|
||||||
|
s.storedFields = storedFields
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestField specifies which field to use for suggestions.
|
||||||
|
func (s *DeleteByQueryService) SuggestField(suggestField string) *DeleteByQueryService {
|
||||||
|
s.suggestField = suggestField
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestMode specifies the suggest mode. Possible values are
|
||||||
|
// "missing", "popular", and "always".
|
||||||
|
func (s *DeleteByQueryService) SuggestMode(suggestMode string) *DeleteByQueryService {
|
||||||
|
s.suggestMode = suggestMode
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestSize specifies how many suggestions to return in response.
|
||||||
|
func (s *DeleteByQueryService) SuggestSize(suggestSize int) *DeleteByQueryService {
|
||||||
|
s.suggestSize = &suggestSize
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestText specifies the source text for which the suggestions should be returned.
|
||||||
|
func (s *DeleteByQueryService) SuggestText(suggestText string) *DeleteByQueryService {
|
||||||
|
s.suggestText = suggestText
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// TerminateAfter indicates the maximum number of documents to collect
|
||||||
|
// for each shard, upon reaching which the query execution will terminate early.
|
||||||
|
func (s *DeleteByQueryService) TerminateAfter(terminateAfter int) *DeleteByQueryService {
|
||||||
|
s.terminateAfter = &terminateAfter
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is the time each individual bulk request should wait for shards
|
||||||
|
// that are unavailable.
|
||||||
|
func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeoutInMillis sets the timeout in milliseconds.
|
||||||
|
func (s *DeleteByQueryService) TimeoutInMillis(timeoutInMillis int) *DeleteByQueryService {
|
||||||
|
s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrackScores indicates whether to calculate and return scores even if
|
||||||
|
// they are not used for sorting.
|
||||||
|
func (s *DeleteByQueryService) TrackScores(trackScores bool) *DeleteByQueryService {
|
||||||
|
s.trackScores = &trackScores
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version specifies whether to return document version as part of a hit.
|
||||||
|
func (s *DeleteByQueryService) Version(version bool) *DeleteByQueryService {
|
||||||
|
s.version = &version
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of shard copies that must be active before proceeding
|
||||||
|
// with the update by query operation. Defaults to 1, meaning the primary shard only.
|
||||||
|
// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal
|
||||||
|
// to the total number of copies for the shard (number of replicas + 1).
|
||||||
|
func (s *DeleteByQueryService) WaitForActiveShards(waitForActiveShards string) *DeleteByQueryService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForCompletion indicates if the request should block until the reindex is complete.
|
||||||
|
func (s *DeleteByQueryService) WaitForCompletion(waitForCompletion bool) *DeleteByQueryService {
|
||||||
|
s.waitForCompletion = &waitForCompletion
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Body specifies the body of the request. It overrides data being specified via SearchService.
|
||||||
|
func (s *DeleteByQueryService) Body(body string) *DeleteByQueryService {
|
||||||
|
s.body = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *DeleteByQueryService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
if len(s.typ) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/{type}/_delete_by_query", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
"type": strings.Join(s.typ, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_delete_by_query", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if len(s.xSource) > 0 {
|
||||||
|
params.Set("_source", strings.Join(s.xSource, ","))
|
||||||
|
}
|
||||||
|
if len(s.xSourceExclude) > 0 {
|
||||||
|
params.Set("_source_excludes", strings.Join(s.xSourceExclude, ","))
|
||||||
|
}
|
||||||
|
if len(s.xSourceInclude) > 0 {
|
||||||
|
params.Set("_source_includes", strings.Join(s.xSourceInclude, ","))
|
||||||
|
}
|
||||||
|
if s.analyzer != "" {
|
||||||
|
params.Set("analyzer", s.analyzer)
|
||||||
|
}
|
||||||
|
if s.analyzeWildcard != nil {
|
||||||
|
params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
|
||||||
|
}
|
||||||
|
if s.defaultOperator != "" {
|
||||||
|
params.Set("default_operator", s.defaultOperator)
|
||||||
|
}
|
||||||
|
if s.df != "" {
|
||||||
|
params.Set("df", s.df)
|
||||||
|
}
|
||||||
|
if s.explain != nil {
|
||||||
|
params.Set("explain", fmt.Sprintf("%v", *s.explain))
|
||||||
|
}
|
||||||
|
if len(s.storedFields) > 0 {
|
||||||
|
params.Set("stored_fields", strings.Join(s.storedFields, ","))
|
||||||
|
}
|
||||||
|
if len(s.docvalueFields) > 0 {
|
||||||
|
params.Set("docvalue_fields", strings.Join(s.docvalueFields, ","))
|
||||||
|
}
|
||||||
|
if s.from != nil {
|
||||||
|
params.Set("from", fmt.Sprintf("%d", *s.from))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.conflicts != "" {
|
||||||
|
params.Set("conflicts", s.conflicts)
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.lenient != nil {
|
||||||
|
params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
|
||||||
|
}
|
||||||
|
if s.lowercaseExpandedTerms != nil {
|
||||||
|
params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
|
||||||
|
}
|
||||||
|
if s.preference != "" {
|
||||||
|
params.Set("preference", s.preference)
|
||||||
|
}
|
||||||
|
if s.q != "" {
|
||||||
|
params.Set("q", s.q)
|
||||||
|
}
|
||||||
|
if len(s.routing) > 0 {
|
||||||
|
params.Set("routing", strings.Join(s.routing, ","))
|
||||||
|
}
|
||||||
|
if s.scroll != "" {
|
||||||
|
params.Set("scroll", s.scroll)
|
||||||
|
}
|
||||||
|
if s.searchType != "" {
|
||||||
|
params.Set("search_type", s.searchType)
|
||||||
|
}
|
||||||
|
if s.searchTimeout != "" {
|
||||||
|
params.Set("search_timeout", s.searchTimeout)
|
||||||
|
}
|
||||||
|
if s.size != nil {
|
||||||
|
params.Set("size", fmt.Sprintf("%d", *s.size))
|
||||||
|
}
|
||||||
|
if s.slices != nil {
|
||||||
|
params.Set("slices", fmt.Sprintf("%v", s.slices))
|
||||||
|
}
|
||||||
|
if len(s.sort) > 0 {
|
||||||
|
params.Set("sort", strings.Join(s.sort, ","))
|
||||||
|
}
|
||||||
|
if s.terminateAfter != nil {
|
||||||
|
params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter))
|
||||||
|
}
|
||||||
|
if len(s.stats) > 0 {
|
||||||
|
params.Set("stats", strings.Join(s.stats, ","))
|
||||||
|
}
|
||||||
|
if s.suggestField != "" {
|
||||||
|
params.Set("suggest_field", s.suggestField)
|
||||||
|
}
|
||||||
|
if s.suggestMode != "" {
|
||||||
|
params.Set("suggest_mode", s.suggestMode)
|
||||||
|
}
|
||||||
|
if s.suggestSize != nil {
|
||||||
|
params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize))
|
||||||
|
}
|
||||||
|
if s.suggestText != "" {
|
||||||
|
params.Set("suggest_text", s.suggestText)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.trackScores != nil {
|
||||||
|
params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores))
|
||||||
|
}
|
||||||
|
if s.version != nil {
|
||||||
|
params.Set("version", fmt.Sprintf("%v", *s.version))
|
||||||
|
}
|
||||||
|
if s.requestCache != nil {
|
||||||
|
params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache))
|
||||||
|
}
|
||||||
|
if s.refresh != "" {
|
||||||
|
params.Set("refresh", s.refresh)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
if s.scrollSize != nil {
|
||||||
|
params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize))
|
||||||
|
}
|
||||||
|
if s.waitForCompletion != nil {
|
||||||
|
params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
|
||||||
|
}
|
||||||
|
if s.requestsPerSecond != nil {
|
||||||
|
params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *DeleteByQueryService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(s.index) == 0 {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the delete-by-query operation.
|
||||||
|
func (s *DeleteByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set body if there is a query set
|
||||||
|
var body interface{}
|
||||||
|
if s.body != nil {
|
||||||
|
body = s.body
|
||||||
|
} else if s.query != nil {
|
||||||
|
src, err := s.query.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
body = map[string]interface{}{
|
||||||
|
"query": src,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return result
|
||||||
|
ret := new(BulkIndexByScrollResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoAsync executes the delete-by-query operation asynchronously by starting a new task.
|
||||||
|
// Callers need to use the Task Management API to watch the outcome of the reindexing
|
||||||
|
// operation.
|
||||||
|
func (s *DeleteByQueryService) DoAsync(ctx context.Context) (*StartTaskResult, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoAsync only makes sense with WaitForCompletion set to true
|
||||||
|
if s.waitForCompletion != nil && *s.waitForCompletion {
|
||||||
|
return nil, fmt.Errorf("cannot start a task with WaitForCompletion set to true")
|
||||||
|
}
|
||||||
|
f := false
|
||||||
|
s.waitForCompletion = &f
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set body if there is a query set
|
||||||
|
var body interface{}
|
||||||
|
if s.body != nil {
|
||||||
|
body = s.body
|
||||||
|
} else if s.query != nil {
|
||||||
|
src, err := s.query.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
body = map[string]interface{}{
|
||||||
|
"query": src,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(StartTaskResult)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BulkIndexByScrollResponse is the outcome of executing Do with
|
||||||
|
// DeleteByQueryService and UpdateByQueryService.
|
||||||
|
type BulkIndexByScrollResponse struct {
|
||||||
|
Header http.Header `json:"-"`
|
||||||
|
Took int64 `json:"took"`
|
||||||
|
SliceId *int64 `json:"slice_id,omitempty"`
|
||||||
|
TimedOut bool `json:"timed_out"`
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
Updated int64 `json:"updated,omitempty"`
|
||||||
|
Created int64 `json:"created,omitempty"`
|
||||||
|
Deleted int64 `json:"deleted"`
|
||||||
|
Batches int64 `json:"batches"`
|
||||||
|
VersionConflicts int64 `json:"version_conflicts"`
|
||||||
|
Noops int64 `json:"noops"`
|
||||||
|
Retries struct {
|
||||||
|
Bulk int64 `json:"bulk"`
|
||||||
|
Search int64 `json:"search"`
|
||||||
|
} `json:"retries,omitempty"`
|
||||||
|
Throttled string `json:"throttled"`
|
||||||
|
ThrottledMillis int64 `json:"throttled_millis"`
|
||||||
|
RequestsPerSecond float64 `json:"requests_per_second"`
|
||||||
|
Canceled string `json:"canceled,omitempty"`
|
||||||
|
ThrottledUntil string `json:"throttled_until"`
|
||||||
|
ThrottledUntilMillis int64 `json:"throttled_until_millis"`
|
||||||
|
Failures []bulkIndexByScrollResponseFailure `json:"failures"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type bulkIndexByScrollResponseFailure struct {
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
|
Status int `json:"status,omitempty"`
|
||||||
|
Shard int `json:"shard,omitempty"`
|
||||||
|
Node int `json:"node,omitempty"`
|
||||||
|
// TOOD "cause" contains exception details
|
||||||
|
// TOOD "reason" contains exception details
|
||||||
|
}
|
51
vendor/github.com/olivere/elastic/v7/doc.go
generated
vendored
Normal file
51
vendor/github.com/olivere/elastic/v7/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package elastic provides an interface to the Elasticsearch server
|
||||||
|
(https://www.elastic.co/products/elasticsearch).
|
||||||
|
|
||||||
|
The first thing you do is to create a Client. If you have Elasticsearch
|
||||||
|
installed and running with its default settings
|
||||||
|
(i.e. available at http://127.0.0.1:9200), all you need to do is:
|
||||||
|
|
||||||
|
client, err := elastic.NewClient()
|
||||||
|
if err != nil {
|
||||||
|
// Handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
If your Elasticsearch server is running on a different IP and/or port,
|
||||||
|
just provide a URL to NewClient:
|
||||||
|
|
||||||
|
// Create a client and connect to http://192.168.2.10:9201
|
||||||
|
client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
|
||||||
|
if err != nil {
|
||||||
|
// Handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
You can pass many more configuration parameters to NewClient. Review the
|
||||||
|
documentation of NewClient for more information.
|
||||||
|
|
||||||
|
If no Elasticsearch server is available, services will fail when creating
|
||||||
|
a new request and will return ErrNoClient.
|
||||||
|
|
||||||
|
A Client provides services. The services usually come with a variety of
|
||||||
|
methods to prepare the query and a Do function to execute it against the
|
||||||
|
Elasticsearch REST interface and return a response. Here is an example
|
||||||
|
of the IndexExists service that checks if a given index already exists.
|
||||||
|
|
||||||
|
exists, err := client.IndexExists("twitter").Do(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
// Handle error
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
// Index does not exist yet.
|
||||||
|
}
|
||||||
|
|
||||||
|
Look up the documentation for Client to get an idea of the services provided
|
||||||
|
and what kinds of responses you get when executing the Do function of a service.
|
||||||
|
Also see the wiki on Github for more details.
|
||||||
|
|
||||||
|
*/
|
||||||
|
package elastic
|
59
vendor/github.com/olivere/elastic/v7/docker-compose.yml
generated
vendored
Normal file
59
vendor/github.com/olivere/elastic/v7/docker-compose.yml
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
elasticsearch:
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.4.2
|
||||||
|
hostname: elasticsearch
|
||||||
|
environment:
|
||||||
|
- cluster.name=elasticsearch
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- discovery.type=single-node
|
||||||
|
# - http.host=0.0.0.0
|
||||||
|
# - transport.host=127.0.0.1
|
||||||
|
# - network.host=_local_
|
||||||
|
- network.publish_host=127.0.0.1
|
||||||
|
- logger.org.elasticsearch=warn
|
||||||
|
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
|
||||||
|
ulimits:
|
||||||
|
nproc: 65536
|
||||||
|
nofile:
|
||||||
|
soft: 65536
|
||||||
|
hard: 65536
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
# volumes:
|
||||||
|
# - ./data/elasticsearch:/usr/share/elasticsearch/data
|
||||||
|
ports:
|
||||||
|
- 9200:9200
|
||||||
|
platinum:
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:7.4.2
|
||||||
|
hostname: elasticsearch-platinum
|
||||||
|
environment:
|
||||||
|
- cluster.name=platinum
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- discovery.type=single-node
|
||||||
|
- xpack.ilm.enabled=true
|
||||||
|
- xpack.license.self_generated.type=trial
|
||||||
|
- xpack.security.enabled=true
|
||||||
|
- xpack.watcher.enabled=true
|
||||||
|
# - http.host=0.0.0.0
|
||||||
|
# - transport.host=127.0.0.1
|
||||||
|
# - network.host=_local_
|
||||||
|
- http.port=9210
|
||||||
|
- network.publish_host=127.0.0.1
|
||||||
|
- logger.org.elasticsearch=warn
|
||||||
|
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
|
||||||
|
- ELASTIC_PASSWORD=elastic
|
||||||
|
ulimits:
|
||||||
|
nproc: 65536
|
||||||
|
nofile:
|
||||||
|
soft: 65536
|
||||||
|
hard: 65536
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
# volumes:
|
||||||
|
# - ./data/elasticsearch-platinum:/usr/share/elasticsearch/data
|
||||||
|
ports:
|
||||||
|
- 9210:9210
|
42
vendor/github.com/olivere/elastic/v7/docvalue_field.go
generated
vendored
Normal file
42
vendor/github.com/olivere/elastic/v7/docvalue_field.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
// DocvalueField represents a docvalue field, its name and
|
||||||
|
// its format (optional).
|
||||||
|
type DocvalueField struct {
|
||||||
|
Field string
|
||||||
|
Format string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source serializes the DocvalueField into JSON.
|
||||||
|
func (d DocvalueField) Source() (interface{}, error) {
|
||||||
|
if d.Format == "" {
|
||||||
|
return d.Field, nil
|
||||||
|
}
|
||||||
|
return map[string]interface{}{
|
||||||
|
"field": d.Field,
|
||||||
|
"format": d.Format,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DocvalueFields is a slice of DocvalueField instances.
|
||||||
|
type DocvalueFields []DocvalueField
|
||||||
|
|
||||||
|
// Source serializes the DocvalueFields into JSON.
|
||||||
|
func (d DocvalueFields) Source() (interface{}, error) {
|
||||||
|
if d == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
v := make([]interface{}, 0)
|
||||||
|
for _, f := range d {
|
||||||
|
src, err := f.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
v = append(v, src)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
184
vendor/github.com/olivere/elastic/v7/errors.go
generated
vendored
Normal file
184
vendor/github.com/olivere/elastic/v7/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// checkResponse will return an error if the request/response indicates
|
||||||
|
// an error returned from Elasticsearch.
|
||||||
|
//
|
||||||
|
// HTTP status codes between in the range [200..299] are considered successful.
|
||||||
|
// All other errors are considered errors except they are specified in
|
||||||
|
// ignoreErrors. This is necessary because for some services, HTTP status 404
|
||||||
|
// is a valid response from Elasticsearch (e.g. the Exists service).
|
||||||
|
//
|
||||||
|
// The func tries to parse error details as returned from Elasticsearch
|
||||||
|
// and encapsulates them in type elastic.Error.
|
||||||
|
func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error {
|
||||||
|
// 200-299 are valid status codes
|
||||||
|
if res.StatusCode >= 200 && res.StatusCode <= 299 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Ignore certain errors?
|
||||||
|
for _, code := range ignoreErrors {
|
||||||
|
if code == res.StatusCode {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return createResponseError(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createResponseError creates an Error structure from the HTTP response,
|
||||||
|
// its status code and the error information sent by Elasticsearch.
|
||||||
|
func createResponseError(res *http.Response) error {
|
||||||
|
if res.Body == nil {
|
||||||
|
return &Error{Status: res.StatusCode}
|
||||||
|
}
|
||||||
|
data, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return &Error{Status: res.StatusCode}
|
||||||
|
}
|
||||||
|
errReply := new(Error)
|
||||||
|
err = json.Unmarshal(data, errReply)
|
||||||
|
if err != nil {
|
||||||
|
return &Error{Status: res.StatusCode}
|
||||||
|
}
|
||||||
|
if errReply != nil {
|
||||||
|
if errReply.Status == 0 {
|
||||||
|
errReply.Status = res.StatusCode
|
||||||
|
}
|
||||||
|
return errReply
|
||||||
|
}
|
||||||
|
return &Error{Status: res.StatusCode}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error encapsulates error details as returned from Elasticsearch.
|
||||||
|
type Error struct {
|
||||||
|
Status int `json:"status"`
|
||||||
|
Details *ErrorDetails `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorDetails encapsulate error details from Elasticsearch.
|
||||||
|
// It is used in e.g. elastic.Error and elastic.BulkResponseItem.
|
||||||
|
type ErrorDetails struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
ResourceType string `json:"resource.type,omitempty"`
|
||||||
|
ResourceId string `json:"resource.id,omitempty"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
Phase string `json:"phase,omitempty"`
|
||||||
|
Grouped bool `json:"grouped,omitempty"`
|
||||||
|
CausedBy map[string]interface{} `json:"caused_by,omitempty"`
|
||||||
|
RootCause []*ErrorDetails `json:"root_cause,omitempty"`
|
||||||
|
FailedShards []map[string]interface{} `json:"failed_shards,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns a string representation of the error.
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
if e.Details != nil && e.Details.Reason != "" {
|
||||||
|
return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsContextErr returns true if the error is from a context that was canceled or deadline exceeded
|
||||||
|
func IsContextErr(err error) bool {
|
||||||
|
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// This happens e.g. on redirect errors, see https://golang.org/src/net/http/client_test.go#L329
|
||||||
|
if ue, ok := err.(*url.Error); ok {
|
||||||
|
if ue.Temporary() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Use of an AWS Signing Transport can result in a wrapped url.Error
|
||||||
|
return IsContextErr(ue.Err)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsConnErr returns true if the error indicates that Elastic could not
|
||||||
|
// find an Elasticsearch host to connect to.
|
||||||
|
func IsConnErr(err error) bool {
|
||||||
|
return err == ErrNoClient || errors.Cause(err) == ErrNoClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotFound returns true if the given error indicates that Elasticsearch
|
||||||
|
// returned HTTP status 404. The err parameter can be of type *elastic.Error,
|
||||||
|
// elastic.Error, *http.Response or int (indicating the HTTP status code).
|
||||||
|
func IsNotFound(err interface{}) bool {
|
||||||
|
return IsStatusCode(err, http.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTimeout returns true if the given error indicates that Elasticsearch
|
||||||
|
// returned HTTP status 408. The err parameter can be of type *elastic.Error,
|
||||||
|
// elastic.Error, *http.Response or int (indicating the HTTP status code).
|
||||||
|
func IsTimeout(err interface{}) bool {
|
||||||
|
return IsStatusCode(err, http.StatusRequestTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsConflict returns true if the given error indicates that the Elasticsearch
|
||||||
|
// operation resulted in a version conflict. This can occur in operations like
|
||||||
|
// `update` or `index` with `op_type=create`. The err parameter can be of
|
||||||
|
// type *elastic.Error, elastic.Error, *http.Response or int (indicating the
|
||||||
|
// HTTP status code).
|
||||||
|
func IsConflict(err interface{}) bool {
|
||||||
|
return IsStatusCode(err, http.StatusConflict)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsForbidden returns true if the given error indicates that Elasticsearch
|
||||||
|
// returned HTTP status 403. This happens e.g. due to a missing license.
|
||||||
|
// The err parameter can be of type *elastic.Error, elastic.Error,
|
||||||
|
// *http.Response or int (indicating the HTTP status code).
|
||||||
|
func IsForbidden(err interface{}) bool {
|
||||||
|
return IsStatusCode(err, http.StatusForbidden)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsStatusCode returns true if the given error indicates that the Elasticsearch
|
||||||
|
// operation returned the specified HTTP status code. The err parameter can be of
|
||||||
|
// type *http.Response, *Error, Error, or int (indicating the HTTP status code).
|
||||||
|
func IsStatusCode(err interface{}, code int) bool {
|
||||||
|
switch e := err.(type) {
|
||||||
|
case *http.Response:
|
||||||
|
return e.StatusCode == code
|
||||||
|
case *Error:
|
||||||
|
return e.Status == code
|
||||||
|
case Error:
|
||||||
|
return e.Status == code
|
||||||
|
case int:
|
||||||
|
return e == code
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- General errors --
|
||||||
|
|
||||||
|
// ShardsInfo represents information from a shard.
|
||||||
|
type ShardsInfo struct {
|
||||||
|
Total int `json:"total"`
|
||||||
|
Successful int `json:"successful"`
|
||||||
|
Failed int `json:"failed"`
|
||||||
|
Failures []*ShardFailure `json:"failures,omitempty"`
|
||||||
|
Skipped int `json:"skipped,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShardFailure represents details about a failure.
|
||||||
|
type ShardFailure struct {
|
||||||
|
Index string `json:"_index,omitempty"`
|
||||||
|
Shard int `json:"_shard,omitempty"`
|
||||||
|
Node string `json:"_node,omitempty"`
|
||||||
|
Reason map[string]interface{} `json:"reason,omitempty"`
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
Primary bool `json:"primary,omitempty"`
|
||||||
|
}
|
236
vendor/github.com/olivere/elastic/v7/exists.go
generated
vendored
Normal file
236
vendor/github.com/olivere/elastic/v7/exists.go
generated
vendored
Normal file
|
@ -0,0 +1,236 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ExistsService checks for the existence of a document using HEAD.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-get.html
|
||||||
|
// for details.
|
||||||
|
type ExistsService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
id string
|
||||||
|
index string
|
||||||
|
typ string
|
||||||
|
preference string
|
||||||
|
realtime *bool
|
||||||
|
refresh string
|
||||||
|
routing string
|
||||||
|
parent string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExistsService creates a new ExistsService.
|
||||||
|
func NewExistsService(client *Client) *ExistsService {
|
||||||
|
return &ExistsService{
|
||||||
|
client: client,
|
||||||
|
typ: "_doc",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *ExistsService) Pretty(pretty bool) *ExistsService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *ExistsService) Human(human bool) *ExistsService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *ExistsService) ErrorTrace(errorTrace bool) *ExistsService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *ExistsService) FilterPath(filterPath ...string) *ExistsService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *ExistsService) Header(name string, value string) *ExistsService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *ExistsService) Headers(headers http.Header) *ExistsService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id is the document ID.
|
||||||
|
func (s *ExistsService) Id(id string) *ExistsService {
|
||||||
|
s.id = id
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index.
|
||||||
|
func (s *ExistsService) Index(index string) *ExistsService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is the type of the document (use `_all` to fetch the first document
|
||||||
|
// matching the ID across all types).
|
||||||
|
func (s *ExistsService) Type(typ string) *ExistsService {
|
||||||
|
s.typ = typ
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preference specifies the node or shard the operation should be performed on (default: random).
|
||||||
|
func (s *ExistsService) Preference(preference string) *ExistsService {
|
||||||
|
s.preference = preference
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Realtime specifies whether to perform the operation in realtime or search mode.
|
||||||
|
func (s *ExistsService) Realtime(realtime bool) *ExistsService {
|
||||||
|
s.realtime = &realtime
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh the shard containing the document before performing the operation.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html
|
||||||
|
// for details.
|
||||||
|
func (s *ExistsService) Refresh(refresh string) *ExistsService {
|
||||||
|
s.refresh = refresh
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing is a specific routing value.
|
||||||
|
func (s *ExistsService) Routing(routing string) *ExistsService {
|
||||||
|
s.routing = routing
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent is the ID of the parent document.
|
||||||
|
func (s *ExistsService) Parent(parent string) *ExistsService {
|
||||||
|
s.parent = parent
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *ExistsService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
|
||||||
|
"id": s.id,
|
||||||
|
"index": s.index,
|
||||||
|
"type": s.typ,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.realtime != nil {
|
||||||
|
params.Set("realtime", fmt.Sprint(*s.realtime))
|
||||||
|
}
|
||||||
|
if s.refresh != "" {
|
||||||
|
params.Set("refresh", s.refresh)
|
||||||
|
}
|
||||||
|
if s.routing != "" {
|
||||||
|
params.Set("routing", s.routing)
|
||||||
|
}
|
||||||
|
if s.parent != "" {
|
||||||
|
params.Set("parent", s.parent)
|
||||||
|
}
|
||||||
|
if s.preference != "" {
|
||||||
|
params.Set("preference", s.preference)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *ExistsService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.id == "" {
|
||||||
|
invalid = append(invalid, "Id")
|
||||||
|
}
|
||||||
|
if s.index == "" {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if s.typ == "" {
|
||||||
|
invalid = append(invalid, "Type")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *ExistsService) Do(ctx context.Context) (bool, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "HEAD",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
IgnoreErrors: []int{404},
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
switch res.StatusCode {
|
||||||
|
case http.StatusOK:
|
||||||
|
return true, nil
|
||||||
|
case http.StatusNotFound:
|
||||||
|
return false, nil
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
390
vendor/github.com/olivere/elastic/v7/explain.go
generated
vendored
Normal file
390
vendor/github.com/olivere/elastic/v7/explain.go
generated
vendored
Normal file
|
@ -0,0 +1,390 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ExplainService computes a score explanation for a query and
|
||||||
|
// a specific document.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-explain.html.
|
||||||
|
type ExplainService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
id string
|
||||||
|
index string
|
||||||
|
typ string
|
||||||
|
q string
|
||||||
|
routing string
|
||||||
|
lenient *bool
|
||||||
|
analyzer string
|
||||||
|
df string
|
||||||
|
fields []string
|
||||||
|
lowercaseExpandedTerms *bool
|
||||||
|
xSourceInclude []string
|
||||||
|
analyzeWildcard *bool
|
||||||
|
parent string
|
||||||
|
preference string
|
||||||
|
xSource []string
|
||||||
|
defaultOperator string
|
||||||
|
xSourceExclude []string
|
||||||
|
source string
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExplainService creates a new ExplainService.
|
||||||
|
func NewExplainService(client *Client) *ExplainService {
|
||||||
|
return &ExplainService{
|
||||||
|
client: client,
|
||||||
|
typ: "_doc",
|
||||||
|
xSource: make([]string, 0),
|
||||||
|
xSourceExclude: make([]string, 0),
|
||||||
|
fields: make([]string, 0),
|
||||||
|
xSourceInclude: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *ExplainService) Pretty(pretty bool) *ExplainService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *ExplainService) Human(human bool) *ExplainService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *ExplainService) ErrorTrace(errorTrace bool) *ExplainService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *ExplainService) FilterPath(filterPath ...string) *ExplainService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *ExplainService) Header(name string, value string) *ExplainService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *ExplainService) Headers(headers http.Header) *ExplainService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id is the document ID.
|
||||||
|
func (s *ExplainService) Id(id string) *ExplainService {
|
||||||
|
s.id = id
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index.
|
||||||
|
func (s *ExplainService) Index(index string) *ExplainService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is the type of the document.
|
||||||
|
//
|
||||||
|
// Deprecated: Types are in the process of being removed.
|
||||||
|
func (s *ExplainService) Type(typ string) *ExplainService {
|
||||||
|
s.typ = typ
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source is the URL-encoded query definition (instead of using the request body).
|
||||||
|
func (s *ExplainService) Source(source string) *ExplainService {
|
||||||
|
s.source = source
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// XSourceExclude is a list of fields to exclude from the returned _source field.
|
||||||
|
func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService {
|
||||||
|
s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lenient specifies whether format-based query failures
|
||||||
|
// (such as providing text to a numeric field) should be ignored.
|
||||||
|
func (s *ExplainService) Lenient(lenient bool) *ExplainService {
|
||||||
|
s.lenient = &lenient
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query in the Lucene query string syntax.
|
||||||
|
func (s *ExplainService) Q(q string) *ExplainService {
|
||||||
|
s.q = q
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing sets a specific routing value.
|
||||||
|
func (s *ExplainService) Routing(routing string) *ExplainService {
|
||||||
|
s.routing = routing
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnalyzeWildcard specifies whether wildcards and prefix queries
|
||||||
|
// in the query string query should be analyzed (default: false).
|
||||||
|
func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService {
|
||||||
|
s.analyzeWildcard = &analyzeWildcard
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Analyzer is the analyzer for the query string query.
|
||||||
|
func (s *ExplainService) Analyzer(analyzer string) *ExplainService {
|
||||||
|
s.analyzer = analyzer
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Df is the default field for query string query (default: _all).
|
||||||
|
func (s *ExplainService) Df(df string) *ExplainService {
|
||||||
|
s.df = df
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields is a list of fields to return in the response.
|
||||||
|
func (s *ExplainService) Fields(fields ...string) *ExplainService {
|
||||||
|
s.fields = append(s.fields, fields...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// LowercaseExpandedTerms specifies whether query terms should be lowercased.
|
||||||
|
func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService {
|
||||||
|
s.lowercaseExpandedTerms = &lowercaseExpandedTerms
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// XSourceInclude is a list of fields to extract and return from the _source field.
|
||||||
|
func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService {
|
||||||
|
s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultOperator is the default operator for query string query (AND or OR).
|
||||||
|
func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService {
|
||||||
|
s.defaultOperator = defaultOperator
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent is the ID of the parent document.
|
||||||
|
func (s *ExplainService) Parent(parent string) *ExplainService {
|
||||||
|
s.parent = parent
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preference specifies the node or shard the operation should be performed on (default: random).
|
||||||
|
func (s *ExplainService) Preference(preference string) *ExplainService {
|
||||||
|
s.preference = preference
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// XSource is true or false to return the _source field or not, or a list of fields to return.
|
||||||
|
func (s *ExplainService) XSource(xSource ...string) *ExplainService {
|
||||||
|
s.xSource = append(s.xSource, xSource...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query sets a query definition using the Query DSL.
|
||||||
|
func (s *ExplainService) Query(query Query) *ExplainService {
|
||||||
|
src, err := query.Source()
|
||||||
|
if err != nil {
|
||||||
|
// Do nothing in case of an error
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
body := make(map[string]interface{})
|
||||||
|
body["query"] = src
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson sets the query definition using the Query DSL.
|
||||||
|
func (s *ExplainService) BodyJson(body interface{}) *ExplainService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString sets the query definition using the Query DSL as a string.
|
||||||
|
func (s *ExplainService) BodyString(body string) *ExplainService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *ExplainService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var path string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if s.typ == "" || s.typ == "_doc" {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_explain/{id}", map[string]string{
|
||||||
|
"id": s.id,
|
||||||
|
"index": s.index,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path, err = uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{
|
||||||
|
"id": s.id,
|
||||||
|
"index": s.index,
|
||||||
|
"type": s.typ,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if len(s.xSource) > 0 {
|
||||||
|
params.Set("_source", strings.Join(s.xSource, ","))
|
||||||
|
}
|
||||||
|
if s.defaultOperator != "" {
|
||||||
|
params.Set("default_operator", s.defaultOperator)
|
||||||
|
}
|
||||||
|
if s.parent != "" {
|
||||||
|
params.Set("parent", s.parent)
|
||||||
|
}
|
||||||
|
if s.preference != "" {
|
||||||
|
params.Set("preference", s.preference)
|
||||||
|
}
|
||||||
|
if s.source != "" {
|
||||||
|
params.Set("source", s.source)
|
||||||
|
}
|
||||||
|
if len(s.xSourceExclude) > 0 {
|
||||||
|
params.Set("_source_excludes", strings.Join(s.xSourceExclude, ","))
|
||||||
|
}
|
||||||
|
if s.lenient != nil {
|
||||||
|
params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
|
||||||
|
}
|
||||||
|
if s.q != "" {
|
||||||
|
params.Set("q", s.q)
|
||||||
|
}
|
||||||
|
if s.routing != "" {
|
||||||
|
params.Set("routing", s.routing)
|
||||||
|
}
|
||||||
|
if len(s.fields) > 0 {
|
||||||
|
params.Set("fields", strings.Join(s.fields, ","))
|
||||||
|
}
|
||||||
|
if s.lowercaseExpandedTerms != nil {
|
||||||
|
params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
|
||||||
|
}
|
||||||
|
if len(s.xSourceInclude) > 0 {
|
||||||
|
params.Set("_source_includes", strings.Join(s.xSourceInclude, ","))
|
||||||
|
}
|
||||||
|
if s.analyzeWildcard != nil {
|
||||||
|
params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
|
||||||
|
}
|
||||||
|
if s.analyzer != "" {
|
||||||
|
params.Set("analyzer", s.analyzer)
|
||||||
|
}
|
||||||
|
if s.df != "" {
|
||||||
|
params.Set("df", s.df)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *ExplainService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.index == "" {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if s.typ == "" {
|
||||||
|
invalid = append(invalid, "Type")
|
||||||
|
}
|
||||||
|
if s.id == "" {
|
||||||
|
invalid = append(invalid, "Id")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *ExplainService) Do(ctx context.Context) (*ExplainResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(ExplainResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExplainResponse is the response of ExplainService.Do.
|
||||||
|
type ExplainResponse struct {
|
||||||
|
Index string `json:"_index"`
|
||||||
|
Type string `json:"_type"`
|
||||||
|
Id string `json:"_id"`
|
||||||
|
Matched bool `json:"matched"`
|
||||||
|
Explanation map[string]interface{} `json:"explanation"`
|
||||||
|
}
|
90
vendor/github.com/olivere/elastic/v7/fetch_source_context.go
generated
vendored
Normal file
90
vendor/github.com/olivere/elastic/v7/fetch_source_context.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FetchSourceContext enables source filtering, i.e. it allows control
|
||||||
|
// over how the _source field is returned with every hit. It is used
|
||||||
|
// with various endpoints, e.g. when searching for documents, retrieving
|
||||||
|
// individual documents, or even updating documents.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-source-filtering.html
|
||||||
|
// for details.
|
||||||
|
type FetchSourceContext struct {
|
||||||
|
fetchSource bool
|
||||||
|
includes []string
|
||||||
|
excludes []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFetchSourceContext returns a new FetchSourceContext.
|
||||||
|
func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
|
||||||
|
return &FetchSourceContext{
|
||||||
|
fetchSource: fetchSource,
|
||||||
|
includes: make([]string, 0),
|
||||||
|
excludes: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchSource indicates whether to return the _source.
|
||||||
|
func (fsc *FetchSourceContext) FetchSource() bool {
|
||||||
|
return fsc.fetchSource
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFetchSource specifies whether to return the _source.
|
||||||
|
func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
|
||||||
|
fsc.fetchSource = fetchSource
|
||||||
|
}
|
||||||
|
|
||||||
|
// Include indicates to return specific parts of the _source.
|
||||||
|
// Wildcards are allowed here.
|
||||||
|
func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
|
||||||
|
fsc.includes = append(fsc.includes, includes...)
|
||||||
|
return fsc
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exclude indicates to exclude specific parts of the _source.
|
||||||
|
// Wildcards are allowed here.
|
||||||
|
func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
|
||||||
|
fsc.excludes = append(fsc.excludes, excludes...)
|
||||||
|
return fsc
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the JSON-serializable data to be used in a body.
|
||||||
|
func (fsc *FetchSourceContext) Source() (interface{}, error) {
|
||||||
|
if !fsc.fetchSource {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if len(fsc.includes) == 0 && len(fsc.excludes) == 0 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
src := make(map[string]interface{})
|
||||||
|
if len(fsc.includes) > 0 {
|
||||||
|
src["includes"] = fsc.includes
|
||||||
|
}
|
||||||
|
if len(fsc.excludes) > 0 {
|
||||||
|
src["excludes"] = fsc.excludes
|
||||||
|
}
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query returns the parameters in a form suitable for a URL query string.
|
||||||
|
func (fsc *FetchSourceContext) Query() url.Values {
|
||||||
|
params := url.Values{}
|
||||||
|
if fsc.fetchSource {
|
||||||
|
if len(fsc.includes) > 0 {
|
||||||
|
params.Add("_source_includes", strings.Join(fsc.includes, ","))
|
||||||
|
}
|
||||||
|
if len(fsc.excludes) > 0 {
|
||||||
|
params.Add("_source_excludes", strings.Join(fsc.excludes, ","))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
params.Add("_source", "false")
|
||||||
|
}
|
||||||
|
return params
|
||||||
|
}
|
257
vendor/github.com/olivere/elastic/v7/field_caps.go
generated
vendored
Normal file
257
vendor/github.com/olivere/elastic/v7/field_caps.go
generated
vendored
Normal file
|
@ -0,0 +1,257 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FieldCapsService allows retrieving the capabilities of fields among multiple indices.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-field-caps.html
|
||||||
|
// for details
|
||||||
|
type FieldCapsService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
fields []string
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFieldCapsService creates a new FieldCapsService
|
||||||
|
func NewFieldCapsService(client *Client) *FieldCapsService {
|
||||||
|
return &FieldCapsService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *FieldCapsService) Pretty(pretty bool) *FieldCapsService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *FieldCapsService) Human(human bool) *FieldCapsService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *FieldCapsService) ErrorTrace(errorTrace bool) *FieldCapsService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *FieldCapsService) FilterPath(filterPath ...string) *FieldCapsService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *FieldCapsService) Header(name string, value string) *FieldCapsService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *FieldCapsService) Headers(headers http.Header) *FieldCapsService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names; use `_all` or empty string to perform
|
||||||
|
// the operation on all indices.
|
||||||
|
func (s *FieldCapsService) Index(index ...string) *FieldCapsService {
|
||||||
|
s.index = append(s.index, index...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
|
||||||
|
// resolves into no concrete indices.
|
||||||
|
// (This includes `_all` string or when no indices have been specified).
|
||||||
|
func (s *FieldCapsService) AllowNoIndices(allowNoIndices bool) *FieldCapsService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both.
|
||||||
|
func (s *FieldCapsService) ExpandWildcards(expandWildcards string) *FieldCapsService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields is a list of fields for to get field capabilities.
|
||||||
|
func (s *FieldCapsService) Fields(fields ...string) *FieldCapsService {
|
||||||
|
s.fields = append(s.fields, fields...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed).
|
||||||
|
func (s *FieldCapsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldCapsService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
|
||||||
|
func (s *FieldCapsService) BodyJson(body interface{}) *FieldCapsService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
|
||||||
|
func (s *FieldCapsService) BodyString(body string) *FieldCapsService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *FieldCapsService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_field_caps", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_field_caps"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if len(s.fields) > 0 {
|
||||||
|
params.Set("fields", strings.Join(s.fields, ","))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *FieldCapsService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *FieldCapsService) Do(ctx context.Context) (*FieldCapsResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
IgnoreErrors: []int{http.StatusNotFound},
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(oe): Is 404 really a valid response here?
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return &FieldCapsResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(FieldCapsResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Request --
|
||||||
|
|
||||||
|
// FieldCapsRequest can be used to set up the body to be used in the
|
||||||
|
// Field Capabilities API.
|
||||||
|
type FieldCapsRequest struct {
|
||||||
|
Fields []string `json:"fields"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Response --
|
||||||
|
|
||||||
|
// FieldCapsResponse contains field capabilities.
|
||||||
|
type FieldCapsResponse struct {
|
||||||
|
Indices []string `json:"indices,omitempty"` // list of index names
|
||||||
|
Fields map[string]FieldCapsType `json:"fields,omitempty"` // Name -> type -> caps
|
||||||
|
}
|
||||||
|
|
||||||
|
// FieldCapsType represents a mapping from type (e.g. keyword)
|
||||||
|
// to capabilities.
|
||||||
|
type FieldCapsType map[string]FieldCaps // type -> caps
|
||||||
|
|
||||||
|
// FieldCaps contains capabilities of an individual field.
|
||||||
|
type FieldCaps struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Searchable bool `json:"searchable"`
|
||||||
|
Aggregatable bool `json:"aggregatable"`
|
||||||
|
Indices []string `json:"indices,omitempty"`
|
||||||
|
NonSearchableIndices []string `json:"non_searchable_indices,omitempty"`
|
||||||
|
NonAggregatableIndices []string `json:"non_aggregatable_indices,omitempty"`
|
||||||
|
}
|
54
vendor/github.com/olivere/elastic/v7/geo_point.go
generated
vendored
Normal file
54
vendor/github.com/olivere/elastic/v7/geo_point.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GeoPoint is a geographic position described via latitude and longitude.
|
||||||
|
type GeoPoint struct {
|
||||||
|
Lat float64 `json:"lat"`
|
||||||
|
Lon float64 `json:"lon"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the object to be serialized in Elasticsearch DSL.
|
||||||
|
func (pt *GeoPoint) Source() map[string]float64 {
|
||||||
|
return map[string]float64{
|
||||||
|
"lat": pt.Lat,
|
||||||
|
"lon": pt.Lon,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON encodes the GeoPoint to JSON.
|
||||||
|
func (pt *GeoPoint) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(pt.Source())
|
||||||
|
}
|
||||||
|
|
||||||
|
// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
|
||||||
|
func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
|
||||||
|
return &GeoPoint{Lat: lat, Lon: lon}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GeoPointFromString initializes a new GeoPoint by a string that is
|
||||||
|
// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
|
||||||
|
func GeoPointFromString(latLon string) (*GeoPoint, error) {
|
||||||
|
latlon := strings.SplitN(latLon, ",", 2)
|
||||||
|
if len(latlon) != 2 {
|
||||||
|
return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
|
||||||
|
}
|
||||||
|
lat, err := strconv.ParseFloat(latlon[0], 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
lon, err := strconv.ParseFloat(latlon[1], 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &GeoPoint{Lat: lat, Lon: lon}, nil
|
||||||
|
}
|
317
vendor/github.com/olivere/elastic/v7/get.go
generated
vendored
Normal file
317
vendor/github.com/olivere/elastic/v7/get.go
generated
vendored
Normal file
|
@ -0,0 +1,317 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetService allows to get a typed JSON document from the index based
|
||||||
|
// on its id.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-get.html
|
||||||
|
// for details.
|
||||||
|
type GetService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index string
|
||||||
|
typ string
|
||||||
|
id string
|
||||||
|
routing string
|
||||||
|
preference string
|
||||||
|
storedFields []string
|
||||||
|
refresh string
|
||||||
|
realtime *bool
|
||||||
|
fsc *FetchSourceContext
|
||||||
|
version interface{}
|
||||||
|
versionType string
|
||||||
|
parent string
|
||||||
|
ignoreErrorsOnGeneratedFields *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetService creates a new GetService.
|
||||||
|
func NewGetService(client *Client) *GetService {
|
||||||
|
return &GetService{
|
||||||
|
client: client,
|
||||||
|
typ: "_doc",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *GetService) Pretty(pretty bool) *GetService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *GetService) Human(human bool) *GetService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *GetService) ErrorTrace(errorTrace bool) *GetService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *GetService) FilterPath(filterPath ...string) *GetService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *GetService) Header(name string, value string) *GetService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *GetService) Headers(headers http.Header) *GetService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index.
|
||||||
|
func (s *GetService) Index(index string) *GetService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is the type of the document
|
||||||
|
//
|
||||||
|
// Deprecated: Types are in the process of being removed.
|
||||||
|
func (s *GetService) Type(typ string) *GetService {
|
||||||
|
s.typ = typ
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id is the document ID.
|
||||||
|
func (s *GetService) Id(id string) *GetService {
|
||||||
|
s.id = id
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent is the ID of the parent document.
|
||||||
|
func (s *GetService) Parent(parent string) *GetService {
|
||||||
|
s.parent = parent
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing is the specific routing value.
|
||||||
|
func (s *GetService) Routing(routing string) *GetService {
|
||||||
|
s.routing = routing
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preference specifies the node or shard the operation should be performed on (default: random).
|
||||||
|
func (s *GetService) Preference(preference string) *GetService {
|
||||||
|
s.preference = preference
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoredFields is a list of fields to return in the response.
|
||||||
|
func (s *GetService) StoredFields(storedFields ...string) *GetService {
|
||||||
|
s.storedFields = append(s.storedFields, storedFields...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GetService) FetchSource(fetchSource bool) *GetService {
|
||||||
|
if s.fsc == nil {
|
||||||
|
s.fsc = NewFetchSourceContext(fetchSource)
|
||||||
|
} else {
|
||||||
|
s.fsc.SetFetchSource(fetchSource)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
|
||||||
|
s.fsc = fetchSourceContext
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh the shard containing the document before performing the operation.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html
|
||||||
|
// for details.
|
||||||
|
func (s *GetService) Refresh(refresh string) *GetService {
|
||||||
|
s.refresh = refresh
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Realtime specifies whether to perform the operation in realtime or search mode.
|
||||||
|
func (s *GetService) Realtime(realtime bool) *GetService {
|
||||||
|
s.realtime = &realtime
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionType is the specific version type.
|
||||||
|
func (s *GetService) VersionType(versionType string) *GetService {
|
||||||
|
s.versionType = versionType
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version is an explicit version number for concurrency control.
|
||||||
|
func (s *GetService) Version(version interface{}) *GetService {
|
||||||
|
s.version = version
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that
|
||||||
|
// are generated if the transaction log is accessed.
|
||||||
|
func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService {
|
||||||
|
s.ignoreErrorsOnGeneratedFields = &ignore
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *GetService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.id == "" {
|
||||||
|
invalid = append(invalid, "Id")
|
||||||
|
}
|
||||||
|
if s.index == "" {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if s.typ == "" {
|
||||||
|
invalid = append(invalid, "Type")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *GetService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
|
||||||
|
"id": s.id,
|
||||||
|
"index": s.index,
|
||||||
|
"type": s.typ,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.routing != "" {
|
||||||
|
params.Set("routing", s.routing)
|
||||||
|
}
|
||||||
|
if s.parent != "" {
|
||||||
|
params.Set("parent", s.parent)
|
||||||
|
}
|
||||||
|
if s.preference != "" {
|
||||||
|
params.Set("preference", s.preference)
|
||||||
|
}
|
||||||
|
if len(s.storedFields) > 0 {
|
||||||
|
params.Set("stored_fields", strings.Join(s.storedFields, ","))
|
||||||
|
}
|
||||||
|
if s.refresh != "" {
|
||||||
|
params.Set("refresh", s.refresh)
|
||||||
|
}
|
||||||
|
if s.version != nil {
|
||||||
|
params.Set("version", fmt.Sprintf("%v", s.version))
|
||||||
|
}
|
||||||
|
if s.versionType != "" {
|
||||||
|
params.Set("version_type", s.versionType)
|
||||||
|
}
|
||||||
|
if s.realtime != nil {
|
||||||
|
params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
|
||||||
|
}
|
||||||
|
if s.ignoreErrorsOnGeneratedFields != nil {
|
||||||
|
params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields))
|
||||||
|
}
|
||||||
|
if s.fsc != nil {
|
||||||
|
for k, values := range s.fsc.Query() {
|
||||||
|
params.Add(k, strings.Join(values, ","))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *GetService) Do(ctx context.Context) (*GetResult, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(GetResult)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a get request.
|
||||||
|
|
||||||
|
// GetResult is the outcome of GetService.Do.
|
||||||
|
type GetResult struct {
|
||||||
|
Index string `json:"_index"` // index meta field
|
||||||
|
Type string `json:"_type"` // type meta field
|
||||||
|
Id string `json:"_id"` // id meta field
|
||||||
|
Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields)
|
||||||
|
Routing string `json:"_routing"` // routing meta field
|
||||||
|
Parent string `json:"_parent"` // parent meta field
|
||||||
|
Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
|
||||||
|
SeqNo *int64 `json:"_seq_no"`
|
||||||
|
PrimaryTerm *int64 `json:"_primary_term"`
|
||||||
|
Source json.RawMessage `json:"_source,omitempty"`
|
||||||
|
Found bool `json:"found,omitempty"`
|
||||||
|
Fields map[string]interface{} `json:"fields,omitempty"`
|
||||||
|
//Error string `json:"error,omitempty"` // used only in MultiGet
|
||||||
|
// TODO double-check that MultiGet now returns details error information
|
||||||
|
Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet
|
||||||
|
}
|
18
vendor/github.com/olivere/elastic/v7/go.mod
generated
vendored
Normal file
18
vendor/github.com/olivere/elastic/v7/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
module github.com/olivere/elastic/v7
|
||||||
|
|
||||||
|
go 1.12
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/aws/aws-sdk-go v1.25.25
|
||||||
|
github.com/fortytw2/leaktest v1.3.0
|
||||||
|
github.com/golang/mock v1.2.0 // indirect
|
||||||
|
github.com/google/go-cmp v0.3.1
|
||||||
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e
|
||||||
|
github.com/opentracing/opentracing-go v1.1.0
|
||||||
|
github.com/pkg/errors v0.8.1
|
||||||
|
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9
|
||||||
|
go.opencensus.io v0.22.1
|
||||||
|
google.golang.org/api v0.3.1 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.2.2 // indirect
|
||||||
|
)
|
469
vendor/github.com/olivere/elastic/v7/highlight.go
generated
vendored
Normal file
469
vendor/github.com/olivere/elastic/v7/highlight.go
generated
vendored
Normal file
|
@ -0,0 +1,469 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
// Highlight allows highlighting search results on one or more fields.
|
||||||
|
// For details, see:
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-highlighting.html
|
||||||
|
type Highlight struct {
|
||||||
|
fields []*HighlighterField
|
||||||
|
tagsSchema *string
|
||||||
|
highlightFilter *bool
|
||||||
|
fragmentSize *int
|
||||||
|
numOfFragments *int
|
||||||
|
preTags []string
|
||||||
|
postTags []string
|
||||||
|
order *string
|
||||||
|
encoder *string
|
||||||
|
requireFieldMatch *bool
|
||||||
|
boundaryMaxScan *int
|
||||||
|
boundaryChars *string
|
||||||
|
boundaryScannerType *string
|
||||||
|
boundaryScannerLocale *string
|
||||||
|
highlighterType *string
|
||||||
|
fragmenter *string
|
||||||
|
highlightQuery Query
|
||||||
|
noMatchSize *int
|
||||||
|
phraseLimit *int
|
||||||
|
options map[string]interface{}
|
||||||
|
forceSource *bool
|
||||||
|
useExplicitFieldOrder bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHighlight() *Highlight {
|
||||||
|
hl := &Highlight{
|
||||||
|
options: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
|
||||||
|
hl.fields = append(hl.fields, fields...)
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) Field(name string) *Highlight {
|
||||||
|
field := NewHighlighterField(name)
|
||||||
|
hl.fields = append(hl.fields, field)
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
|
||||||
|
hl.tagsSchema = &schemaName
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
|
||||||
|
hl.highlightFilter = &highlightFilter
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
|
||||||
|
hl.fragmentSize = &fragmentSize
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
|
||||||
|
hl.numOfFragments = &numOfFragments
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) Encoder(encoder string) *Highlight {
|
||||||
|
hl.encoder = &encoder
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) PreTags(preTags ...string) *Highlight {
|
||||||
|
hl.preTags = append(hl.preTags, preTags...)
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) PostTags(postTags ...string) *Highlight {
|
||||||
|
hl.postTags = append(hl.postTags, postTags...)
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) Order(order string) *Highlight {
|
||||||
|
hl.order = &order
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
|
||||||
|
hl.requireFieldMatch = &requireFieldMatch
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
|
||||||
|
hl.boundaryMaxScan = &boundaryMaxScan
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) BoundaryChars(boundaryChars string) *Highlight {
|
||||||
|
hl.boundaryChars = &boundaryChars
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) BoundaryScannerType(boundaryScannerType string) *Highlight {
|
||||||
|
hl.boundaryScannerType = &boundaryScannerType
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) BoundaryScannerLocale(boundaryScannerLocale string) *Highlight {
|
||||||
|
hl.boundaryScannerLocale = &boundaryScannerLocale
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
|
||||||
|
hl.highlighterType = &highlighterType
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
|
||||||
|
hl.fragmenter = &fragmenter
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) HighlightQuery(highlightQuery Query) *Highlight {
|
||||||
|
hl.highlightQuery = highlightQuery
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
|
||||||
|
hl.noMatchSize = &noMatchSize
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
|
||||||
|
hl.options = options
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
|
||||||
|
hl.forceSource = &forceSource
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
|
||||||
|
hl.useExplicitFieldOrder = useExplicitFieldOrder
|
||||||
|
return hl
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates the query source for the bool query.
|
||||||
|
func (hl *Highlight) Source() (interface{}, error) {
|
||||||
|
// Returns the map inside of "highlight":
|
||||||
|
// "highlight":{
|
||||||
|
// ... this ...
|
||||||
|
// }
|
||||||
|
source := make(map[string]interface{})
|
||||||
|
if hl.tagsSchema != nil {
|
||||||
|
source["tags_schema"] = *hl.tagsSchema
|
||||||
|
}
|
||||||
|
if hl.preTags != nil && len(hl.preTags) > 0 {
|
||||||
|
source["pre_tags"] = hl.preTags
|
||||||
|
}
|
||||||
|
if hl.postTags != nil && len(hl.postTags) > 0 {
|
||||||
|
source["post_tags"] = hl.postTags
|
||||||
|
}
|
||||||
|
if hl.order != nil {
|
||||||
|
source["order"] = *hl.order
|
||||||
|
}
|
||||||
|
if hl.highlightFilter != nil {
|
||||||
|
source["highlight_filter"] = *hl.highlightFilter
|
||||||
|
}
|
||||||
|
if hl.fragmentSize != nil {
|
||||||
|
source["fragment_size"] = *hl.fragmentSize
|
||||||
|
}
|
||||||
|
if hl.numOfFragments != nil {
|
||||||
|
source["number_of_fragments"] = *hl.numOfFragments
|
||||||
|
}
|
||||||
|
if hl.encoder != nil {
|
||||||
|
source["encoder"] = *hl.encoder
|
||||||
|
}
|
||||||
|
if hl.requireFieldMatch != nil {
|
||||||
|
source["require_field_match"] = *hl.requireFieldMatch
|
||||||
|
}
|
||||||
|
if hl.boundaryMaxScan != nil {
|
||||||
|
source["boundary_max_scan"] = *hl.boundaryMaxScan
|
||||||
|
}
|
||||||
|
if hl.boundaryChars != nil {
|
||||||
|
source["boundary_chars"] = *hl.boundaryChars
|
||||||
|
}
|
||||||
|
if hl.boundaryScannerType != nil {
|
||||||
|
source["boundary_scanner"] = *hl.boundaryScannerType
|
||||||
|
}
|
||||||
|
if hl.boundaryScannerLocale != nil {
|
||||||
|
source["boundary_scanner_locale"] = *hl.boundaryScannerLocale
|
||||||
|
}
|
||||||
|
if hl.highlighterType != nil {
|
||||||
|
source["type"] = *hl.highlighterType
|
||||||
|
}
|
||||||
|
if hl.fragmenter != nil {
|
||||||
|
source["fragmenter"] = *hl.fragmenter
|
||||||
|
}
|
||||||
|
if hl.highlightQuery != nil {
|
||||||
|
src, err := hl.highlightQuery.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
source["highlight_query"] = src
|
||||||
|
}
|
||||||
|
if hl.noMatchSize != nil {
|
||||||
|
source["no_match_size"] = *hl.noMatchSize
|
||||||
|
}
|
||||||
|
if hl.phraseLimit != nil {
|
||||||
|
source["phrase_limit"] = *hl.phraseLimit
|
||||||
|
}
|
||||||
|
if hl.options != nil && len(hl.options) > 0 {
|
||||||
|
source["options"] = hl.options
|
||||||
|
}
|
||||||
|
if hl.forceSource != nil {
|
||||||
|
source["force_source"] = *hl.forceSource
|
||||||
|
}
|
||||||
|
|
||||||
|
if hl.fields != nil && len(hl.fields) > 0 {
|
||||||
|
if hl.useExplicitFieldOrder {
|
||||||
|
// Use a slice for the fields
|
||||||
|
var fields []map[string]interface{}
|
||||||
|
for _, field := range hl.fields {
|
||||||
|
src, err := field.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fmap := make(map[string]interface{})
|
||||||
|
fmap[field.Name] = src
|
||||||
|
fields = append(fields, fmap)
|
||||||
|
}
|
||||||
|
source["fields"] = fields
|
||||||
|
} else {
|
||||||
|
// Use a map for the fields
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
for _, field := range hl.fields {
|
||||||
|
src, err := field.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fields[field.Name] = src
|
||||||
|
}
|
||||||
|
source["fields"] = fields
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return source, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HighlighterField specifies a highlighted field.
|
||||||
|
type HighlighterField struct {
|
||||||
|
Name string
|
||||||
|
|
||||||
|
preTags []string
|
||||||
|
postTags []string
|
||||||
|
fragmentSize int
|
||||||
|
fragmentOffset int
|
||||||
|
numOfFragments int
|
||||||
|
highlightFilter *bool
|
||||||
|
order *string
|
||||||
|
requireFieldMatch *bool
|
||||||
|
boundaryMaxScan int
|
||||||
|
boundaryChars []rune
|
||||||
|
highlighterType *string
|
||||||
|
fragmenter *string
|
||||||
|
highlightQuery Query
|
||||||
|
noMatchSize *int
|
||||||
|
matchedFields []string
|
||||||
|
phraseLimit *int
|
||||||
|
options map[string]interface{}
|
||||||
|
forceSource *bool
|
||||||
|
|
||||||
|
/*
|
||||||
|
Name string
|
||||||
|
preTags []string
|
||||||
|
postTags []string
|
||||||
|
fragmentSize int
|
||||||
|
numOfFragments int
|
||||||
|
fragmentOffset int
|
||||||
|
highlightFilter *bool
|
||||||
|
order string
|
||||||
|
requireFieldMatch *bool
|
||||||
|
boundaryMaxScan int
|
||||||
|
boundaryChars []rune
|
||||||
|
highlighterType string
|
||||||
|
fragmenter string
|
||||||
|
highlightQuery Query
|
||||||
|
noMatchSize *int
|
||||||
|
matchedFields []string
|
||||||
|
options map[string]interface{}
|
||||||
|
forceSource *bool
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHighlighterField(name string) *HighlighterField {
|
||||||
|
return &HighlighterField{
|
||||||
|
Name: name,
|
||||||
|
preTags: make([]string, 0),
|
||||||
|
postTags: make([]string, 0),
|
||||||
|
fragmentSize: -1,
|
||||||
|
fragmentOffset: -1,
|
||||||
|
numOfFragments: -1,
|
||||||
|
boundaryMaxScan: -1,
|
||||||
|
boundaryChars: make([]rune, 0),
|
||||||
|
matchedFields: make([]string, 0),
|
||||||
|
options: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
|
||||||
|
f.preTags = append(f.preTags, preTags...)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
|
||||||
|
f.postTags = append(f.postTags, postTags...)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
|
||||||
|
f.fragmentSize = fragmentSize
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
|
||||||
|
f.fragmentOffset = fragmentOffset
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
|
||||||
|
f.numOfFragments = numOfFragments
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
|
||||||
|
f.highlightFilter = &highlightFilter
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) Order(order string) *HighlighterField {
|
||||||
|
f.order = &order
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
|
||||||
|
f.requireFieldMatch = &requireFieldMatch
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
|
||||||
|
f.boundaryMaxScan = boundaryMaxScan
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
|
||||||
|
f.boundaryChars = append(f.boundaryChars, boundaryChars...)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
|
||||||
|
f.highlighterType = &highlighterType
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
|
||||||
|
f.fragmenter = &fragmenter
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
|
||||||
|
f.highlightQuery = highlightQuery
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
|
||||||
|
f.noMatchSize = &noMatchSize
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
|
||||||
|
f.options = options
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
|
||||||
|
f.matchedFields = append(f.matchedFields, matchedFields...)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
|
||||||
|
f.phraseLimit = &phraseLimit
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
|
||||||
|
f.forceSource = &forceSource
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HighlighterField) Source() (interface{}, error) {
|
||||||
|
source := make(map[string]interface{})
|
||||||
|
|
||||||
|
if f.preTags != nil && len(f.preTags) > 0 {
|
||||||
|
source["pre_tags"] = f.preTags
|
||||||
|
}
|
||||||
|
if f.postTags != nil && len(f.postTags) > 0 {
|
||||||
|
source["post_tags"] = f.postTags
|
||||||
|
}
|
||||||
|
if f.fragmentSize != -1 {
|
||||||
|
source["fragment_size"] = f.fragmentSize
|
||||||
|
}
|
||||||
|
if f.numOfFragments != -1 {
|
||||||
|
source["number_of_fragments"] = f.numOfFragments
|
||||||
|
}
|
||||||
|
if f.fragmentOffset != -1 {
|
||||||
|
source["fragment_offset"] = f.fragmentOffset
|
||||||
|
}
|
||||||
|
if f.highlightFilter != nil {
|
||||||
|
source["highlight_filter"] = *f.highlightFilter
|
||||||
|
}
|
||||||
|
if f.order != nil {
|
||||||
|
source["order"] = *f.order
|
||||||
|
}
|
||||||
|
if f.requireFieldMatch != nil {
|
||||||
|
source["require_field_match"] = *f.requireFieldMatch
|
||||||
|
}
|
||||||
|
if f.boundaryMaxScan != -1 {
|
||||||
|
source["boundary_max_scan"] = f.boundaryMaxScan
|
||||||
|
}
|
||||||
|
if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
|
||||||
|
source["boundary_chars"] = f.boundaryChars
|
||||||
|
}
|
||||||
|
if f.highlighterType != nil {
|
||||||
|
source["type"] = *f.highlighterType
|
||||||
|
}
|
||||||
|
if f.fragmenter != nil {
|
||||||
|
source["fragmenter"] = *f.fragmenter
|
||||||
|
}
|
||||||
|
if f.highlightQuery != nil {
|
||||||
|
src, err := f.highlightQuery.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
source["highlight_query"] = src
|
||||||
|
}
|
||||||
|
if f.noMatchSize != nil {
|
||||||
|
source["no_match_size"] = *f.noMatchSize
|
||||||
|
}
|
||||||
|
if f.matchedFields != nil && len(f.matchedFields) > 0 {
|
||||||
|
source["matched_fields"] = f.matchedFields
|
||||||
|
}
|
||||||
|
if f.phraseLimit != nil {
|
||||||
|
source["phrase_limit"] = *f.phraseLimit
|
||||||
|
}
|
||||||
|
if f.options != nil && len(f.options) > 0 {
|
||||||
|
source["options"] = f.options
|
||||||
|
}
|
||||||
|
if f.forceSource != nil {
|
||||||
|
source["force_source"] = *f.forceSource
|
||||||
|
}
|
||||||
|
|
||||||
|
return source, nil
|
||||||
|
}
|
377
vendor/github.com/olivere/elastic/v7/index.go
generated
vendored
Normal file
377
vendor/github.com/olivere/elastic/v7/index.go
generated
vendored
Normal file
|
@ -0,0 +1,377 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndexService adds or updates a typed JSON document in a specified index,
|
||||||
|
// making it searchable.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html
|
||||||
|
// for details.
|
||||||
|
type IndexService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
id string
|
||||||
|
index string
|
||||||
|
typ string
|
||||||
|
parent string
|
||||||
|
routing string
|
||||||
|
timeout string
|
||||||
|
timestamp string
|
||||||
|
ttl string
|
||||||
|
version interface{}
|
||||||
|
opType string
|
||||||
|
versionType string
|
||||||
|
refresh string
|
||||||
|
waitForActiveShards string
|
||||||
|
pipeline string
|
||||||
|
ifSeqNo *int64
|
||||||
|
ifPrimaryTerm *int64
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndexService creates a new IndexService.
|
||||||
|
func NewIndexService(client *Client) *IndexService {
|
||||||
|
return &IndexService{
|
||||||
|
client: client,
|
||||||
|
typ: "_doc",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndexService) Pretty(pretty bool) *IndexService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndexService) Human(human bool) *IndexService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndexService) ErrorTrace(errorTrace bool) *IndexService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndexService) FilterPath(filterPath ...string) *IndexService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndexService) Header(name string, value string) *IndexService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndexService) Headers(headers http.Header) *IndexService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id is the document ID.
|
||||||
|
func (s *IndexService) Id(id string) *IndexService {
|
||||||
|
s.id = id
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index.
|
||||||
|
func (s *IndexService) Index(index string) *IndexService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is the type of the document.
|
||||||
|
//
|
||||||
|
// Deprecated: Types are in the process of being removed.
|
||||||
|
func (s *IndexService) Type(typ string) *IndexService {
|
||||||
|
s.typ = typ
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of shard copies that must be active
|
||||||
|
// before proceeding with the index operation. Defaults to 1, meaning the
|
||||||
|
// primary shard only. Set to `all` for all shard copies, otherwise set to
|
||||||
|
// any non-negative value less than or equal to the total number of copies
|
||||||
|
// for the shard (number of replicas + 1).
|
||||||
|
func (s *IndexService) WaitForActiveShards(waitForActiveShards string) *IndexService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pipeline specifies the pipeline id to preprocess incoming documents with.
|
||||||
|
func (s *IndexService) Pipeline(pipeline string) *IndexService {
|
||||||
|
s.pipeline = pipeline
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh the index after performing the operation.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html
|
||||||
|
// for details.
|
||||||
|
func (s *IndexService) Refresh(refresh string) *IndexService {
|
||||||
|
s.refresh = refresh
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ttl is an expiration time for the document.
|
||||||
|
func (s *IndexService) Ttl(ttl string) *IndexService {
|
||||||
|
s.ttl = ttl
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// TTL is an expiration time for the document (alias for Ttl).
|
||||||
|
func (s *IndexService) TTL(ttl string) *IndexService {
|
||||||
|
s.ttl = ttl
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version is an explicit version number for concurrency control.
|
||||||
|
func (s *IndexService) Version(version interface{}) *IndexService {
|
||||||
|
s.version = version
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpType is an explicit operation type, i.e. "create" or "index" (default).
|
||||||
|
func (s *IndexService) OpType(opType string) *IndexService {
|
||||||
|
s.opType = opType
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent is the ID of the parent document.
|
||||||
|
func (s *IndexService) Parent(parent string) *IndexService {
|
||||||
|
s.parent = parent
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing is a specific routing value.
|
||||||
|
func (s *IndexService) Routing(routing string) *IndexService {
|
||||||
|
s.routing = routing
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is an explicit operation timeout.
|
||||||
|
func (s *IndexService) Timeout(timeout string) *IndexService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timestamp is an explicit timestamp for the document.
|
||||||
|
func (s *IndexService) Timestamp(timestamp string) *IndexService {
|
||||||
|
s.timestamp = timestamp
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionType is a specific version type.
|
||||||
|
func (s *IndexService) VersionType(versionType string) *IndexService {
|
||||||
|
s.versionType = versionType
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfSeqNo indicates to only perform the index operation if the last
|
||||||
|
// operation that has changed the document has the specified sequence number.
|
||||||
|
func (s *IndexService) IfSeqNo(seqNo int64) *IndexService {
|
||||||
|
s.ifSeqNo = &seqNo
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfPrimaryTerm indicates to only perform the index operation if the
|
||||||
|
// last operation that has changed the document has the specified primary term.
|
||||||
|
func (s *IndexService) IfPrimaryTerm(primaryTerm int64) *IndexService {
|
||||||
|
s.ifPrimaryTerm = &primaryTerm
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson is the document as a serializable JSON interface.
|
||||||
|
func (s *IndexService) BodyJson(body interface{}) *IndexService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is the document encoded as a string.
|
||||||
|
func (s *IndexService) BodyString(body string) *IndexService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndexService) buildURL() (string, string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var method, path string
|
||||||
|
|
||||||
|
if s.id != "" {
|
||||||
|
// Create document with manual id
|
||||||
|
method = "PUT"
|
||||||
|
path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
|
||||||
|
"id": s.id,
|
||||||
|
"index": s.index,
|
||||||
|
"type": s.typ,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// Automatic ID generation
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html#index-creation
|
||||||
|
method = "POST"
|
||||||
|
path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{
|
||||||
|
"index": s.index,
|
||||||
|
"type": s.typ,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
if s.refresh != "" {
|
||||||
|
params.Set("refresh", s.refresh)
|
||||||
|
}
|
||||||
|
if s.opType != "" {
|
||||||
|
params.Set("op_type", s.opType)
|
||||||
|
}
|
||||||
|
if s.parent != "" {
|
||||||
|
params.Set("parent", s.parent)
|
||||||
|
}
|
||||||
|
if s.pipeline != "" {
|
||||||
|
params.Set("pipeline", s.pipeline)
|
||||||
|
}
|
||||||
|
if s.routing != "" {
|
||||||
|
params.Set("routing", s.routing)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.timestamp != "" {
|
||||||
|
params.Set("timestamp", s.timestamp)
|
||||||
|
}
|
||||||
|
if s.ttl != "" {
|
||||||
|
params.Set("ttl", s.ttl)
|
||||||
|
}
|
||||||
|
if s.version != nil {
|
||||||
|
params.Set("version", fmt.Sprintf("%v", s.version))
|
||||||
|
}
|
||||||
|
if s.versionType != "" {
|
||||||
|
params.Set("version_type", s.versionType)
|
||||||
|
}
|
||||||
|
if v := s.ifSeqNo; v != nil {
|
||||||
|
params.Set("if_seq_no", fmt.Sprintf("%d", *v))
|
||||||
|
}
|
||||||
|
if v := s.ifPrimaryTerm; v != nil {
|
||||||
|
params.Set("if_primary_term", fmt.Sprintf("%d", *v))
|
||||||
|
}
|
||||||
|
return method, path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndexService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.index == "" {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if s.typ == "" {
|
||||||
|
invalid = append(invalid, "Type")
|
||||||
|
}
|
||||||
|
if s.bodyString == "" && s.bodyJson == nil {
|
||||||
|
invalid = append(invalid, "BodyJson")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
method, path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: method,
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndexResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexResponse is the result of indexing a document in Elasticsearch.
|
||||||
|
type IndexResponse struct {
|
||||||
|
Index string `json:"_index,omitempty"`
|
||||||
|
Type string `json:"_type,omitempty"`
|
||||||
|
Id string `json:"_id,omitempty"`
|
||||||
|
Version int64 `json:"_version,omitempty"`
|
||||||
|
Result string `json:"result,omitempty"`
|
||||||
|
Shards *ShardsInfo `json:"_shards,omitempty"`
|
||||||
|
SeqNo int64 `json:"_seq_no,omitempty"`
|
||||||
|
PrimaryTerm int64 `json:"_primary_term,omitempty"`
|
||||||
|
Status int `json:"status,omitempty"`
|
||||||
|
ForcedRefresh bool `json:"forced_refresh,omitempty"`
|
||||||
|
}
|
320
vendor/github.com/olivere/elastic/v7/indices_analyze.go
generated
vendored
Normal file
320
vendor/github.com/olivere/elastic/v7/indices_analyze.go
generated
vendored
Normal file
|
@ -0,0 +1,320 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesAnalyzeService performs the analysis process on a text and returns
|
||||||
|
// the tokens breakdown of the text.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-analyze.html
|
||||||
|
// for detail.
|
||||||
|
type IndicesAnalyzeService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index string
|
||||||
|
request *IndicesAnalyzeRequest
|
||||||
|
format string
|
||||||
|
preferLocal *bool
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesAnalyzeService creates a new IndicesAnalyzeService.
|
||||||
|
func NewIndicesAnalyzeService(client *Client) *IndicesAnalyzeService {
|
||||||
|
return &IndicesAnalyzeService{
|
||||||
|
client: client,
|
||||||
|
request: new(IndicesAnalyzeRequest),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesAnalyzeService) Pretty(pretty bool) *IndicesAnalyzeService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesAnalyzeService) Human(human bool) *IndicesAnalyzeService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesAnalyzeService) ErrorTrace(errorTrace bool) *IndicesAnalyzeService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesAnalyzeService) FilterPath(filterPath ...string) *IndicesAnalyzeService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesAnalyzeService) Header(name string, value string) *IndicesAnalyzeService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesAnalyzeService) Headers(headers http.Header) *IndicesAnalyzeService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index to scope the operation.
|
||||||
|
func (s *IndicesAnalyzeService) Index(index string) *IndicesAnalyzeService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format of the output.
|
||||||
|
func (s *IndicesAnalyzeService) Format(format string) *IndicesAnalyzeService {
|
||||||
|
s.format = format
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreferLocal, when true, specifies that a local shard should be used
|
||||||
|
// if available. When false, a random shard is used (default: true).
|
||||||
|
func (s *IndicesAnalyzeService) PreferLocal(preferLocal bool) *IndicesAnalyzeService {
|
||||||
|
s.preferLocal = &preferLocal
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request passes the analyze request to use.
|
||||||
|
func (s *IndicesAnalyzeService) Request(request *IndicesAnalyzeRequest) *IndicesAnalyzeService {
|
||||||
|
if request == nil {
|
||||||
|
s.request = new(IndicesAnalyzeRequest)
|
||||||
|
} else {
|
||||||
|
s.request = request
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Analyzer is the name of the analyzer to use.
|
||||||
|
func (s *IndicesAnalyzeService) Analyzer(analyzer string) *IndicesAnalyzeService {
|
||||||
|
s.request.Analyzer = analyzer
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attributes is a list of token attributes to output; this parameter works
|
||||||
|
// only with explain=true.
|
||||||
|
func (s *IndicesAnalyzeService) Attributes(attributes ...string) *IndicesAnalyzeService {
|
||||||
|
s.request.Attributes = attributes
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// CharFilter is a list of character filters to use for the analysis.
|
||||||
|
func (s *IndicesAnalyzeService) CharFilter(charFilter ...string) *IndicesAnalyzeService {
|
||||||
|
s.request.CharFilter = charFilter
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Explain, when true, outputs more advanced details (default: false).
|
||||||
|
func (s *IndicesAnalyzeService) Explain(explain bool) *IndicesAnalyzeService {
|
||||||
|
s.request.Explain = explain
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field specifies to use a specific analyzer configured for this field (instead of passing the analyzer name).
|
||||||
|
func (s *IndicesAnalyzeService) Field(field string) *IndicesAnalyzeService {
|
||||||
|
s.request.Field = field
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter is a list of filters to use for the analysis.
|
||||||
|
func (s *IndicesAnalyzeService) Filter(filter ...string) *IndicesAnalyzeService {
|
||||||
|
s.request.Filter = filter
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Text is the text on which the analysis should be performed (when request body is not used).
|
||||||
|
func (s *IndicesAnalyzeService) Text(text ...string) *IndicesAnalyzeService {
|
||||||
|
s.request.Text = text
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tokenizer is the name of the tokenizer to use for the analysis.
|
||||||
|
func (s *IndicesAnalyzeService) Tokenizer(tokenizer string) *IndicesAnalyzeService {
|
||||||
|
s.request.Tokenizer = tokenizer
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson is the text on which the analysis should be performed.
|
||||||
|
func (s *IndicesAnalyzeService) BodyJson(body interface{}) *IndicesAnalyzeService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is the text on which the analysis should be performed.
|
||||||
|
func (s *IndicesAnalyzeService) BodyString(body string) *IndicesAnalyzeService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesAnalyzeService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if s.index == "" {
|
||||||
|
path = "/_analyze"
|
||||||
|
} else {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_analyze", map[string]string{
|
||||||
|
"index": s.index,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.format != "" {
|
||||||
|
params.Set("format", s.format)
|
||||||
|
}
|
||||||
|
if s.preferLocal != nil {
|
||||||
|
params.Set("prefer_local", fmt.Sprintf("%v", *s.preferLocal))
|
||||||
|
}
|
||||||
|
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do will execute the request with the given context.
|
||||||
|
func (s *IndicesAnalyzeService) Do(ctx context.Context) (*IndicesAnalyzeResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else if s.bodyString != "" {
|
||||||
|
body = s.bodyString
|
||||||
|
} else {
|
||||||
|
// Request parameters are deprecated in 5.1.1, and we must use a JSON
|
||||||
|
// structure in the body to pass the parameters.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-analyze.html
|
||||||
|
body = s.request
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := new(IndicesAnalyzeResponse)
|
||||||
|
if err = s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *IndicesAnalyzeService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.bodyJson == nil && s.bodyString == "" {
|
||||||
|
if len(s.request.Text) == 0 {
|
||||||
|
invalid = append(invalid, "Text")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesAnalyzeRequest specifies the parameters of the analyze request.
|
||||||
|
type IndicesAnalyzeRequest struct {
|
||||||
|
Text []string `json:"text,omitempty"`
|
||||||
|
Analyzer string `json:"analyzer,omitempty"`
|
||||||
|
Tokenizer string `json:"tokenizer,omitempty"`
|
||||||
|
Filter []string `json:"filter,omitempty"`
|
||||||
|
CharFilter []string `json:"char_filter,omitempty"`
|
||||||
|
Field string `json:"field,omitempty"`
|
||||||
|
Explain bool `json:"explain,omitempty"`
|
||||||
|
Attributes []string `json:"attributes,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndicesAnalyzeResponse struct {
|
||||||
|
Tokens []AnalyzeToken `json:"tokens"` // json part for normal message
|
||||||
|
Detail IndicesAnalyzeResponseDetail `json:"detail"` // json part for verbose message of explain request
|
||||||
|
}
|
||||||
|
|
||||||
|
type AnalyzeTokenList struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Tokens []AnalyzeToken `json:"tokens,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AnalyzeToken struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
Type string `json:"type"` // e.g. "<ALPHANUM>"
|
||||||
|
StartOffset int `json:"start_offset"`
|
||||||
|
EndOffset int `json:"end_offset"`
|
||||||
|
Bytes string `json:"bytes"` // e.g. "[67 75 79]"
|
||||||
|
Position int `json:"position"`
|
||||||
|
PositionLength int `json:"positionLength"` // seems to be wrong in 7.2+ (no snake_case), see https://github.com/elastic/elasticsearch/blob/7.2/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java
|
||||||
|
TermFrequency int `json:"termFrequency"`
|
||||||
|
Keyword bool `json:"keyword"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CharFilteredText struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
FilteredText []string `json:"filtered_text"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndicesAnalyzeResponseDetail struct {
|
||||||
|
CustomAnalyzer bool `json:"custom_analyzer"`
|
||||||
|
Analyzer *AnalyzeTokenList `json:"analyzer,omitempty"`
|
||||||
|
Charfilters []*CharFilteredText `json:"charfilters,omitempty"`
|
||||||
|
Tokenizer *AnalyzeTokenList `json:"tokenizer,omitempty"`
|
||||||
|
TokenFilters []*AnalyzeTokenList `json:"tokenfilters,omitempty"`
|
||||||
|
}
|
214
vendor/github.com/olivere/elastic/v7/indices_close.go
generated
vendored
Normal file
214
vendor/github.com/olivere/elastic/v7/indices_close.go
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesCloseService closes an index.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-open-close.html
|
||||||
|
// for details.
|
||||||
|
type IndicesCloseService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index string
|
||||||
|
timeout string
|
||||||
|
masterTimeout string
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesCloseService creates and initializes a new IndicesCloseService.
|
||||||
|
func NewIndicesCloseService(client *Client) *IndicesCloseService {
|
||||||
|
return &IndicesCloseService{client: client}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesCloseService) Human(human bool) *IndicesCloseService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesCloseService) ErrorTrace(errorTrace bool) *IndicesCloseService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesCloseService) FilterPath(filterPath ...string) *IndicesCloseService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesCloseService) Header(name string, value string) *IndicesCloseService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesCloseService) Headers(headers http.Header) *IndicesCloseService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index to close.
|
||||||
|
func (s *IndicesCloseService) Index(index string) *IndicesCloseService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is an explicit operation timeout.
|
||||||
|
func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
|
||||||
|
func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both.
|
||||||
|
func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesCloseService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}/_close", map[string]string{
|
||||||
|
"index": s.index,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesCloseService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.index == "" {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesCloseResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesCloseResponse is the response of IndicesCloseService.Do.
|
||||||
|
type IndicesCloseResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
}
|
189
vendor/github.com/olivere/elastic/v7/indices_create.go
generated
vendored
Normal file
189
vendor/github.com/olivere/elastic/v7/indices_create.go
generated
vendored
Normal file
|
@ -0,0 +1,189 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesCreateService creates a new index.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-create-index.html
|
||||||
|
// for details.
|
||||||
|
type IndicesCreateService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index string
|
||||||
|
timeout string
|
||||||
|
masterTimeout string
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesCreateService returns a new IndicesCreateService.
|
||||||
|
func NewIndicesCreateService(client *Client) *IndicesCreateService {
|
||||||
|
return &IndicesCreateService{client: client}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesCreateService) Human(human bool) *IndicesCreateService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesCreateService) ErrorTrace(errorTrace bool) *IndicesCreateService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesCreateService) FilterPath(filterPath ...string) *IndicesCreateService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesCreateService) Header(name string, value string) *IndicesCreateService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesCreateService) Headers(headers http.Header) *IndicesCreateService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index to create.
|
||||||
|
func (s *IndicesCreateService) Index(index string) *IndicesCreateService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout the explicit operation timeout, e.g. "5s".
|
||||||
|
func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Body specifies the configuration of the index as a string.
|
||||||
|
// It is an alias for BodyString.
|
||||||
|
func (s *IndicesCreateService) Body(body string) *IndicesCreateService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString specifies the configuration of the index as a string.
|
||||||
|
func (s *IndicesCreateService) BodyString(body string) *IndicesCreateService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson specifies the configuration of the index. The interface{} will
|
||||||
|
// be serializes as a JSON document, so use a map[string]interface{}.
|
||||||
|
func (s *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, error) {
|
||||||
|
if s.index == "" {
|
||||||
|
return nil, errors.New("missing index name")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build url
|
||||||
|
path, err := uritemplates.Expand("/{index}", map[string]string{
|
||||||
|
"index": s.index,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := new(IndicesCreateResult)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a create index request.
|
||||||
|
|
||||||
|
// IndicesCreateResult is the outcome of creating a new index.
|
||||||
|
type IndicesCreateResult struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
}
|
184
vendor/github.com/olivere/elastic/v7/indices_delete.go
generated
vendored
Normal file
184
vendor/github.com/olivere/elastic/v7/indices_delete.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesDeleteService allows to delete existing indices.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-delete-index.html
|
||||||
|
// for details.
|
||||||
|
type IndicesDeleteService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
timeout string
|
||||||
|
masterTimeout string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesDeleteService creates and initializes a new IndicesDeleteService.
|
||||||
|
func NewIndicesDeleteService(client *Client) *IndicesDeleteService {
|
||||||
|
return &IndicesDeleteService{
|
||||||
|
client: client,
|
||||||
|
index: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesDeleteService) Human(human bool) *IndicesDeleteService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesDeleteService) ErrorTrace(errorTrace bool) *IndicesDeleteService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesDeleteService) FilterPath(filterPath ...string) *IndicesDeleteService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesDeleteService) Header(name string, value string) *IndicesDeleteService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesDeleteService) Headers(headers http.Header) *IndicesDeleteService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index adds the list of indices to delete.
|
||||||
|
// Use `_all` or `*` string to delete all indices.
|
||||||
|
func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is an explicit operation timeout.
|
||||||
|
func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesDeleteService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesDeleteService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(s.index) == 0 {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "DELETE",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesDeleteResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a delete index request.
|
||||||
|
|
||||||
|
// IndicesDeleteResponse is the response of IndicesDeleteService.Do.
|
||||||
|
type IndicesDeleteResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
}
|
180
vendor/github.com/olivere/elastic/v7/indices_delete_template.go
generated
vendored
Normal file
180
vendor/github.com/olivere/elastic/v7/indices_delete_template.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesDeleteTemplateService deletes index templates.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-templates.html.
|
||||||
|
type IndicesDeleteTemplateService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
name string
|
||||||
|
timeout string
|
||||||
|
masterTimeout string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService.
|
||||||
|
func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService {
|
||||||
|
return &IndicesDeleteTemplateService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesDeleteTemplateService) Human(human bool) *IndicesDeleteTemplateService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesDeleteTemplateService) ErrorTrace(errorTrace bool) *IndicesDeleteTemplateService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesDeleteTemplateService) FilterPath(filterPath ...string) *IndicesDeleteTemplateService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesDeleteTemplateService) Header(name string, value string) *IndicesDeleteTemplateService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesDeleteTemplateService) Headers(headers http.Header) *IndicesDeleteTemplateService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name is the name of the template.
|
||||||
|
func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService {
|
||||||
|
s.name = name
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is an explicit operation timeout.
|
||||||
|
func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/_template/{name}", map[string]string{
|
||||||
|
"name": s.name,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesDeleteTemplateService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.name == "" {
|
||||||
|
invalid = append(invalid, "Name")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTemplateResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "DELETE",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesDeleteTemplateResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
|
||||||
|
type IndicesDeleteTemplateResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
}
|
204
vendor/github.com/olivere/elastic/v7/indices_exists.go
generated
vendored
Normal file
204
vendor/github.com/olivere/elastic/v7/indices_exists.go
generated
vendored
Normal file
|
@ -0,0 +1,204 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesExistsService checks if an index or indices exist or not.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-exists.html
|
||||||
|
// for details.
|
||||||
|
type IndicesExistsService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
local *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesExistsService creates and initializes a new IndicesExistsService.
|
||||||
|
func NewIndicesExistsService(client *Client) *IndicesExistsService {
|
||||||
|
return &IndicesExistsService{
|
||||||
|
client: client,
|
||||||
|
index: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesExistsService) Human(human bool) *IndicesExistsService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesExistsService) ErrorTrace(errorTrace bool) *IndicesExistsService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesExistsService) FilterPath(filterPath ...string) *IndicesExistsService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesExistsService) Header(name string, value string) *IndicesExistsService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesExistsService) Headers(headers http.Header) *IndicesExistsService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of one or more indices to check.
|
||||||
|
func (s *IndicesExistsService) Index(index []string) *IndicesExistsService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
|
||||||
|
// resolves into no concrete indices. (This includes `_all` string or
|
||||||
|
// when no indices have been specified).
|
||||||
|
func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both.
|
||||||
|
func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local, when set, returns local information and does not retrieve the state
|
||||||
|
// from master node (default: false).
|
||||||
|
func (s *IndicesExistsService) Local(local bool) *IndicesExistsService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesExistsService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.local != nil {
|
||||||
|
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesExistsService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(s.index) == 0 {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesExistsService) Do(ctx context.Context) (bool, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "HEAD",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
IgnoreErrors: []int{404},
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
switch res.StatusCode {
|
||||||
|
case http.StatusOK:
|
||||||
|
return true, nil
|
||||||
|
case http.StatusNotFound:
|
||||||
|
return false, nil
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
169
vendor/github.com/olivere/elastic/v7/indices_exists_template.go
generated
vendored
Normal file
169
vendor/github.com/olivere/elastic/v7/indices_exists_template.go
generated
vendored
Normal file
|
@ -0,0 +1,169 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesExistsTemplateService checks if a given template exists.
|
||||||
|
// See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-templates.html#indices-templates-exists
|
||||||
|
// for documentation.
|
||||||
|
type IndicesExistsTemplateService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
name string
|
||||||
|
local *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService.
|
||||||
|
func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService {
|
||||||
|
return &IndicesExistsTemplateService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesExistsTemplateService) Human(human bool) *IndicesExistsTemplateService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesExistsTemplateService) ErrorTrace(errorTrace bool) *IndicesExistsTemplateService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesExistsTemplateService) FilterPath(filterPath ...string) *IndicesExistsTemplateService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesExistsTemplateService) Header(name string, value string) *IndicesExistsTemplateService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesExistsTemplateService) Headers(headers http.Header) *IndicesExistsTemplateService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name is the name of the template.
|
||||||
|
func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService {
|
||||||
|
s.name = name
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates whether to return local information, i.e. do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/_template/{name}", map[string]string{
|
||||||
|
"name": s.name,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.local != nil {
|
||||||
|
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesExistsTemplateService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.name == "" {
|
||||||
|
invalid = append(invalid, "Name")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesExistsTemplateService) Do(ctx context.Context) (bool, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "HEAD",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
IgnoreErrors: []int{404},
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
switch res.StatusCode {
|
||||||
|
case http.StatusOK:
|
||||||
|
return true, nil
|
||||||
|
case http.StatusNotFound:
|
||||||
|
return false, nil
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
224
vendor/github.com/olivere/elastic/v7/indices_flush.go
generated
vendored
Normal file
224
vendor/github.com/olivere/elastic/v7/indices_flush.go
generated
vendored
Normal file
|
@ -0,0 +1,224 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Flush allows to flush one or more indices. The flush process of an index
|
||||||
|
// basically frees memory from the index by flushing data to the index
|
||||||
|
// storage and clearing the internal transaction log.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-flush.html
|
||||||
|
// for details.
|
||||||
|
type IndicesFlushService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
force *bool
|
||||||
|
waitIfOngoing *bool
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesFlushService creates a new IndicesFlushService.
|
||||||
|
func NewIndicesFlushService(client *Client) *IndicesFlushService {
|
||||||
|
return &IndicesFlushService{
|
||||||
|
client: client,
|
||||||
|
index: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesFlushService) Human(human bool) *IndicesFlushService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesFlushService) ErrorTrace(errorTrace bool) *IndicesFlushService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesFlushService) FilterPath(filterPath ...string) *IndicesFlushService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesFlushService) Header(name string, value string) *IndicesFlushService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesFlushService) Headers(headers http.Header) *IndicesFlushService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names; use `_all` or empty string for all indices.
|
||||||
|
func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService {
|
||||||
|
s.index = append(s.index, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force indicates whether a flush should be forced even if it is not
|
||||||
|
// necessarily needed ie. if no changes will be committed to the index.
|
||||||
|
// This is useful if transaction log IDs should be incremented even if
|
||||||
|
// no uncommitted changes are present. (This setting can be considered as internal).
|
||||||
|
func (s *IndicesFlushService) Force(force bool) *IndicesFlushService {
|
||||||
|
s.force = &force
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitIfOngoing, if set to true, indicates that the flush operation will
|
||||||
|
// block until the flush can be executed if another flush operation is
|
||||||
|
// already executing. The default is false and will cause an exception
|
||||||
|
// to be thrown on the shard level if another flush operation is already running..
|
||||||
|
func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService {
|
||||||
|
s.waitIfOngoing = &waitIfOngoing
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
|
||||||
|
// resolves into no concrete indices. (This includes `_all` string or when
|
||||||
|
// no indices have been specified).
|
||||||
|
func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards specifies whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both..
|
||||||
|
func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesFlushService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_flush", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_flush"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.force != nil {
|
||||||
|
params.Set("force", fmt.Sprintf("%v", *s.force))
|
||||||
|
}
|
||||||
|
if s.waitIfOngoing != nil {
|
||||||
|
params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesFlushService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the service.
|
||||||
|
func (s *IndicesFlushService) Do(ctx context.Context) (*IndicesFlushResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesFlushResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a flush request.
|
||||||
|
|
||||||
|
type IndicesFlushResponse struct {
|
||||||
|
Shards *ShardsInfo `json:"_shards"`
|
||||||
|
}
|
281
vendor/github.com/olivere/elastic/v7/indices_flush_synced.go
generated
vendored
Normal file
281
vendor/github.com/olivere/elastic/v7/indices_flush_synced.go
generated
vendored
Normal file
|
@ -0,0 +1,281 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesSyncedFlushService performs a normal flush, then adds a generated
|
||||||
|
// unique marked (sync_id) to all shards.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-synced-flush.html
|
||||||
|
// for details.
|
||||||
|
type IndicesSyncedFlushService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesSyncedFlushService creates a new IndicesSyncedFlushService.
|
||||||
|
func NewIndicesSyncedFlushService(client *Client) *IndicesSyncedFlushService {
|
||||||
|
return &IndicesSyncedFlushService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesSyncedFlushService) Pretty(pretty bool) *IndicesSyncedFlushService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesSyncedFlushService) Human(human bool) *IndicesSyncedFlushService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesSyncedFlushService) ErrorTrace(errorTrace bool) *IndicesSyncedFlushService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesSyncedFlushService) FilterPath(filterPath ...string) *IndicesSyncedFlushService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesSyncedFlushService) Header(name string, value string) *IndicesSyncedFlushService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesSyncedFlushService) Headers(headers http.Header) *IndicesSyncedFlushService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names; use `_all` or empty string for all indices.
|
||||||
|
func (s *IndicesSyncedFlushService) Index(indices ...string) *IndicesSyncedFlushService {
|
||||||
|
s.index = append(s.index, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesSyncedFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesSyncedFlushService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
|
||||||
|
// resolves into no concrete indices. (This includes `_all` string or when
|
||||||
|
// no indices have been specified).
|
||||||
|
func (s *IndicesSyncedFlushService) AllowNoIndices(allowNoIndices bool) *IndicesSyncedFlushService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards specifies whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both..
|
||||||
|
func (s *IndicesSyncedFlushService) ExpandWildcards(expandWildcards string) *IndicesSyncedFlushService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesSyncedFlushService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_flush/synced", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_flush/synced"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesSyncedFlushService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the service.
|
||||||
|
func (s *IndicesSyncedFlushService) Do(ctx context.Context) (*IndicesSyncedFlushResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesSyncedFlushResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a flush request.
|
||||||
|
|
||||||
|
// IndicesSyncedFlushResponse is the outcome of a synched flush call.
|
||||||
|
type IndicesSyncedFlushResponse struct {
|
||||||
|
Shards *ShardsInfo `json:"_shards"`
|
||||||
|
Index map[string]*IndicesShardsSyncedFlushResult `json:"-"`
|
||||||
|
|
||||||
|
// TODO Add information about the indices here from the root level
|
||||||
|
// It looks like this:
|
||||||
|
// {
|
||||||
|
// "_shards" : {
|
||||||
|
// "total" : 4,
|
||||||
|
// "successful" : 4,
|
||||||
|
// "failed" : 0
|
||||||
|
// },
|
||||||
|
// "elastic-test" : {
|
||||||
|
// "total" : 1,
|
||||||
|
// "successful" : 1,
|
||||||
|
// "failed" : 0
|
||||||
|
// },
|
||||||
|
// "elastic-test2" : {
|
||||||
|
// "total" : 1,
|
||||||
|
// "successful" : 1,
|
||||||
|
// "failed" : 0
|
||||||
|
// },
|
||||||
|
// "elastic-orders" : {
|
||||||
|
// "total" : 1,
|
||||||
|
// "successful" : 1,
|
||||||
|
// "failed" : 0
|
||||||
|
// },
|
||||||
|
// "elastic-nosource-test" : {
|
||||||
|
// "total" : 1,
|
||||||
|
// "successful" : 1,
|
||||||
|
// "failed" : 0
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesShardsSyncedFlushResult represents synced flush information about
|
||||||
|
// a specific index.
|
||||||
|
type IndicesShardsSyncedFlushResult struct {
|
||||||
|
Total int `json:"total"`
|
||||||
|
Successful int `json:"successful"`
|
||||||
|
Failed int `json:"failed"`
|
||||||
|
Failures []IndicesShardsSyncedFlushResultFailure `json:"failures,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesShardsSyncedFlushResultFailure represents a failure of a synced
|
||||||
|
// flush operation.
|
||||||
|
type IndicesShardsSyncedFlushResultFailure struct {
|
||||||
|
Shard int `json:"shard"`
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
Routing struct {
|
||||||
|
State string `json:"state"`
|
||||||
|
Primary bool `json:"primary"`
|
||||||
|
Node string `json:"node"`
|
||||||
|
RelocatingNode *string `json:"relocating_node"`
|
||||||
|
Shard int `json:"shard"`
|
||||||
|
Index string `json:"index"`
|
||||||
|
ExpectedShardSizeInBytes int64 `json:"expected_shard_size_in_bytes,omitempty"`
|
||||||
|
// recoverySource
|
||||||
|
// allocationId
|
||||||
|
// unassignedInfo
|
||||||
|
} `json:"routing"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON parses the output from Synced Flush API.
|
||||||
|
func (resp *IndicesSyncedFlushResponse) UnmarshalJSON(data []byte) error {
|
||||||
|
m := make(map[string]json.RawMessage)
|
||||||
|
err := json.Unmarshal(data, &m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Index = make(map[string]*IndicesShardsSyncedFlushResult)
|
||||||
|
for k, v := range m {
|
||||||
|
if k == "_shards" {
|
||||||
|
if err := json.Unmarshal(v, &resp.Shards); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ix := new(IndicesShardsSyncedFlushResult)
|
||||||
|
if err := json.Unmarshal(v, &ix); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Index[k] = ix
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
235
vendor/github.com/olivere/elastic/v7/indices_forcemerge.go
generated
vendored
Normal file
235
vendor/github.com/olivere/elastic/v7/indices_forcemerge.go
generated
vendored
Normal file
|
@ -0,0 +1,235 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesForcemergeService allows to force merging of one or more indices.
|
||||||
|
// The merge relates to the number of segments a Lucene index holds
|
||||||
|
// within each shard. The force merge operation allows to reduce the number
|
||||||
|
// of segments by merging them.
|
||||||
|
//
|
||||||
|
// See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-forcemerge.html
|
||||||
|
// for more information.
|
||||||
|
type IndicesForcemergeService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
flush *bool
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
maxNumSegments interface{}
|
||||||
|
onlyExpungeDeletes *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesForcemergeService creates a new IndicesForcemergeService.
|
||||||
|
func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService {
|
||||||
|
return &IndicesForcemergeService{
|
||||||
|
client: client,
|
||||||
|
index: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesForcemergeService) Human(human bool) *IndicesForcemergeService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesForcemergeService) ErrorTrace(errorTrace bool) *IndicesForcemergeService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesForcemergeService) FilterPath(filterPath ...string) *IndicesForcemergeService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesForcemergeService) Header(name string, value string) *IndicesForcemergeService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesForcemergeService) Headers(headers http.Header) *IndicesForcemergeService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names; use `_all` or empty string to perform
|
||||||
|
// the operation on all indices.
|
||||||
|
func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService {
|
||||||
|
if s.index == nil {
|
||||||
|
s.index = make([]string, 0)
|
||||||
|
}
|
||||||
|
s.index = append(s.index, index...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices.
|
||||||
|
// (This includes `_all` string or when no indices have been specified).
|
||||||
|
func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both..
|
||||||
|
func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush specifies whether the index should be flushed after performing
|
||||||
|
// the operation (default: true).
|
||||||
|
func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService {
|
||||||
|
s.flush = &flush
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should
|
||||||
|
// be ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxNumSegments specifies the number of segments the index should be
|
||||||
|
// merged into (default: dynamic).
|
||||||
|
func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService {
|
||||||
|
s.maxNumSegments = maxNumSegments
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyExpungeDeletes specifies whether the operation should only expunge
|
||||||
|
// deleted documents.
|
||||||
|
func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService {
|
||||||
|
s.onlyExpungeDeletes = &onlyExpungeDeletes
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
// Build URL
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_forcemerge"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.flush != nil {
|
||||||
|
params.Set("flush", fmt.Sprintf("%v", *s.flush))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.maxNumSegments != nil {
|
||||||
|
params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments))
|
||||||
|
}
|
||||||
|
if s.onlyExpungeDeletes != nil {
|
||||||
|
params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesForcemergeService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesForcemergeService) Do(ctx context.Context) (*IndicesForcemergeResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesForcemergeResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do.
|
||||||
|
type IndicesForcemergeResponse struct {
|
||||||
|
Shards *ShardsInfo `json:"_shards"`
|
||||||
|
}
|
229
vendor/github.com/olivere/elastic/v7/indices_freeze.go
generated
vendored
Normal file
229
vendor/github.com/olivere/elastic/v7/indices_freeze.go
generated
vendored
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesFreezeService freezes an index.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/freeze-index-api.html
|
||||||
|
// and https://www.elastic.co/blog/creating-frozen-indices-with-the-elasticsearch-freeze-index-api
|
||||||
|
// for details.
|
||||||
|
type IndicesFreezeService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index string
|
||||||
|
timeout string
|
||||||
|
masterTimeout string
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
waitForActiveShards string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesFreezeService creates a new IndicesFreezeService.
|
||||||
|
func NewIndicesFreezeService(client *Client) *IndicesFreezeService {
|
||||||
|
return &IndicesFreezeService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesFreezeService) Pretty(pretty bool) *IndicesFreezeService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesFreezeService) Human(human bool) *IndicesFreezeService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesFreezeService) ErrorTrace(errorTrace bool) *IndicesFreezeService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesFreezeService) FilterPath(filterPath ...string) *IndicesFreezeService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesFreezeService) Header(name string, value string) *IndicesFreezeService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesFreezeService) Headers(headers http.Header) *IndicesFreezeService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index to freeze.
|
||||||
|
func (s *IndicesFreezeService) Index(index string) *IndicesFreezeService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout allows to specify an explicit timeout.
|
||||||
|
func (s *IndicesFreezeService) Timeout(timeout string) *IndicesFreezeService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout allows to specify a timeout for connection to master.
|
||||||
|
func (s *IndicesFreezeService) MasterTimeout(masterTimeout string) *IndicesFreezeService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesFreezeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFreezeService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
|
||||||
|
// resolves into no concrete indices. (This includes `_all` string or when
|
||||||
|
// no indices have been specified).
|
||||||
|
func (s *IndicesFreezeService) AllowNoIndices(allowNoIndices bool) *IndicesFreezeService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards specifies whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both..
|
||||||
|
func (s *IndicesFreezeService) ExpandWildcards(expandWildcards string) *IndicesFreezeService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of active shards to wait for
|
||||||
|
// before the operation returns.
|
||||||
|
func (s *IndicesFreezeService) WaitForActiveShards(numShards string) *IndicesFreezeService {
|
||||||
|
s.waitForActiveShards = numShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesFreezeService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}/_freeze", map[string]string{
|
||||||
|
"index": s.index,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesFreezeService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.index == "" {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the service.
|
||||||
|
func (s *IndicesFreezeService) Do(ctx context.Context) (*IndicesFreezeResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesFreezeResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesFreezeResponse is the outcome of freezing an index.
|
||||||
|
type IndicesFreezeResponse struct {
|
||||||
|
Shards *ShardsInfo `json:"_shards"`
|
||||||
|
}
|
237
vendor/github.com/olivere/elastic/v7/indices_get.go
generated
vendored
Normal file
237
vendor/github.com/olivere/elastic/v7/indices_get.go
generated
vendored
Normal file
|
@ -0,0 +1,237 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesGetService retrieves information about one or more indices.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-get-index.html
|
||||||
|
// for more details.
|
||||||
|
type IndicesGetService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
feature []string
|
||||||
|
local *bool
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
flatSettings *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesGetService creates a new IndicesGetService.
|
||||||
|
func NewIndicesGetService(client *Client) *IndicesGetService {
|
||||||
|
return &IndicesGetService{
|
||||||
|
client: client,
|
||||||
|
index: make([]string, 0),
|
||||||
|
feature: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesGetService) Human(human bool) *IndicesGetService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesGetService) ErrorTrace(errorTrace bool) *IndicesGetService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesGetService) FilterPath(filterPath ...string) *IndicesGetService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesGetService) Header(name string, value string) *IndicesGetService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesGetService) Headers(headers http.Header) *IndicesGetService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names.
|
||||||
|
func (s *IndicesGetService) Index(indices ...string) *IndicesGetService {
|
||||||
|
s.index = append(s.index, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Feature is a list of features.
|
||||||
|
func (s *IndicesGetService) Feature(features ...string) *IndicesGetService {
|
||||||
|
s.feature = append(s.feature, features...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates whether to return local information, i.e. do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *IndicesGetService) Local(local bool) *IndicesGetService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false).
|
||||||
|
func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard expression
|
||||||
|
// resolves to no concrete indices (default: false).
|
||||||
|
func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether wildcard expressions should get
|
||||||
|
// expanded to open or closed indices (default: open).
|
||||||
|
func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesGetService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
var index []string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
index = s.index
|
||||||
|
} else {
|
||||||
|
index = []string{"_all"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.feature) > 0 {
|
||||||
|
// Build URL
|
||||||
|
path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{
|
||||||
|
"index": strings.Join(index, ","),
|
||||||
|
"feature": strings.Join(s.feature, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// Build URL
|
||||||
|
path, err = uritemplates.Expand("/{index}", map[string]string{
|
||||||
|
"index": strings.Join(index, ","),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.flatSettings != nil {
|
||||||
|
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
|
||||||
|
}
|
||||||
|
if s.local != nil {
|
||||||
|
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesGetService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(s.index) == 0 {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesGetService) Do(ctx context.Context) (map[string]*IndicesGetResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
var ret map[string]*IndicesGetResponse
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesGetResponse is part of the response of IndicesGetService.Do.
|
||||||
|
type IndicesGetResponse struct {
|
||||||
|
Aliases map[string]interface{} `json:"aliases"`
|
||||||
|
Mappings map[string]interface{} `json:"mappings"`
|
||||||
|
Settings map[string]interface{} `json:"settings"`
|
||||||
|
Warmers map[string]interface{} `json:"warmers"`
|
||||||
|
}
|
230
vendor/github.com/olivere/elastic/v7/indices_get_aliases.go
generated
vendored
Normal file
230
vendor/github.com/olivere/elastic/v7/indices_get_aliases.go
generated
vendored
Normal file
|
@ -0,0 +1,230 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AliasesService returns the aliases associated with one or more indices, or the
|
||||||
|
// indices associated with one or more aliases, or a combination of those filters.
|
||||||
|
// See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-aliases.html.
|
||||||
|
type AliasesService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
alias []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAliasesService instantiates a new AliasesService.
|
||||||
|
func NewAliasesService(client *Client) *AliasesService {
|
||||||
|
builder := &AliasesService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
return builder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *AliasesService) Pretty(pretty bool) *AliasesService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *AliasesService) Human(human bool) *AliasesService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *AliasesService) ErrorTrace(errorTrace bool) *AliasesService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *AliasesService) FilterPath(filterPath ...string) *AliasesService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *AliasesService) Header(name string, value string) *AliasesService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *AliasesService) Headers(headers http.Header) *AliasesService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index adds one or more indices.
|
||||||
|
func (s *AliasesService) Index(index ...string) *AliasesService {
|
||||||
|
s.index = append(s.index, index...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alias adds one or more aliases.
|
||||||
|
func (s *AliasesService) Alias(alias ...string) *AliasesService {
|
||||||
|
s.alias = append(s.alias, alias...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *AliasesService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_alias/{alias}", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
"alias": strings.Join(s.alias, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path, err = uritemplates.Expand("/_alias/{alias}", map[string]string{
|
||||||
|
"alias": strings.Join(s.alias, ","),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
path = strings.TrimSuffix(path, "/")
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error) {
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// {
|
||||||
|
// "indexName" : {
|
||||||
|
// "aliases" : {
|
||||||
|
// "alias1" : { },
|
||||||
|
// "alias2" : { }
|
||||||
|
// }
|
||||||
|
// },
|
||||||
|
// "indexName2" : {
|
||||||
|
// ...
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
indexMap := make(map[string]struct {
|
||||||
|
Aliases map[string]struct {
|
||||||
|
IsWriteIndex bool `json:"is_write_index"`
|
||||||
|
} `json:"aliases"`
|
||||||
|
})
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &indexMap); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Each (indexName, _)
|
||||||
|
ret := &AliasesResult{
|
||||||
|
Indices: make(map[string]indexResult),
|
||||||
|
}
|
||||||
|
for indexName, indexData := range indexMap {
|
||||||
|
if indexData.Aliases == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
indexOut, found := ret.Indices[indexName]
|
||||||
|
if !found {
|
||||||
|
indexOut = indexResult{Aliases: make([]aliasResult, 0)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// { "aliases" : { ... } }
|
||||||
|
for aliasName, aliasData := range indexData.Aliases {
|
||||||
|
aliasRes := aliasResult{AliasName: aliasName, IsWriteIndex: aliasData.IsWriteIndex}
|
||||||
|
indexOut.Aliases = append(indexOut.Aliases, aliasRes)
|
||||||
|
}
|
||||||
|
|
||||||
|
ret.Indices[indexName] = indexOut
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of an alias request.
|
||||||
|
|
||||||
|
// AliasesResult is the outcome of calling AliasesService.Do.
|
||||||
|
type AliasesResult struct {
|
||||||
|
Indices map[string]indexResult
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexResult struct {
|
||||||
|
Aliases []aliasResult
|
||||||
|
}
|
||||||
|
|
||||||
|
type aliasResult struct {
|
||||||
|
AliasName string
|
||||||
|
IsWriteIndex bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesByAlias returns all indices given a specific alias name.
|
||||||
|
func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
|
||||||
|
var indices []string
|
||||||
|
for indexName, indexInfo := range ar.Indices {
|
||||||
|
for _, aliasInfo := range indexInfo.Aliases {
|
||||||
|
if aliasInfo.AliasName == aliasName {
|
||||||
|
indices = append(indices, indexName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return indices
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAlias returns true if the index has a specific alias.
|
||||||
|
func (ir indexResult) HasAlias(aliasName string) bool {
|
||||||
|
for _, alias := range ir.Aliases {
|
||||||
|
if alias.AliasName == aliasName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
238
vendor/github.com/olivere/elastic/v7/indices_get_field_mapping.go
generated
vendored
Normal file
238
vendor/github.com/olivere/elastic/v7/indices_get_field_mapping.go
generated
vendored
Normal file
|
@ -0,0 +1,238 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesGetFieldMappingService retrieves the mapping definitions for the fields in an index
|
||||||
|
// or index/type.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-get-field-mapping.html
|
||||||
|
// for details.
|
||||||
|
type IndicesGetFieldMappingService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
typ []string
|
||||||
|
field []string
|
||||||
|
local *bool
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetFieldMappingService is an alias for NewIndicesGetFieldMappingService.
|
||||||
|
// Use NewIndicesGetFieldMappingService.
|
||||||
|
func NewGetFieldMappingService(client *Client) *IndicesGetFieldMappingService {
|
||||||
|
return NewIndicesGetFieldMappingService(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesGetFieldMappingService creates a new IndicesGetFieldMappingService.
|
||||||
|
func NewIndicesGetFieldMappingService(client *Client) *IndicesGetFieldMappingService {
|
||||||
|
return &IndicesGetFieldMappingService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesGetFieldMappingService) Pretty(pretty bool) *IndicesGetFieldMappingService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesGetFieldMappingService) Human(human bool) *IndicesGetFieldMappingService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesGetFieldMappingService) ErrorTrace(errorTrace bool) *IndicesGetFieldMappingService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesGetFieldMappingService) FilterPath(filterPath ...string) *IndicesGetFieldMappingService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesGetFieldMappingService) Header(name string, value string) *IndicesGetFieldMappingService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesGetFieldMappingService) Headers(headers http.Header) *IndicesGetFieldMappingService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names.
|
||||||
|
func (s *IndicesGetFieldMappingService) Index(indices ...string) *IndicesGetFieldMappingService {
|
||||||
|
s.index = append(s.index, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is a list of document types.
|
||||||
|
func (s *IndicesGetFieldMappingService) Type(types ...string) *IndicesGetFieldMappingService {
|
||||||
|
s.typ = append(s.typ, types...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field is a list of fields.
|
||||||
|
func (s *IndicesGetFieldMappingService) Field(fields ...string) *IndicesGetFieldMappingService {
|
||||||
|
s.field = append(s.field, fields...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices.
|
||||||
|
// This includes `_all` string or when no indices have been specified.
|
||||||
|
func (s *IndicesGetFieldMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetFieldMappingService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both..
|
||||||
|
func (s *IndicesGetFieldMappingService) ExpandWildcards(expandWildcards string) *IndicesGetFieldMappingService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates whether to return local information, do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *IndicesGetFieldMappingService) Local(local bool) *IndicesGetFieldMappingService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesGetFieldMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetFieldMappingService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesGetFieldMappingService) buildURL() (string, url.Values, error) {
|
||||||
|
var index, typ, field []string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
index = s.index
|
||||||
|
} else {
|
||||||
|
index = []string{"_all"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.typ) > 0 {
|
||||||
|
typ = s.typ
|
||||||
|
} else {
|
||||||
|
typ = []string{"_all"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.field) > 0 {
|
||||||
|
field = s.field
|
||||||
|
} else {
|
||||||
|
field = []string{"*"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}/_mapping/{type}/field/{field}", map[string]string{
|
||||||
|
"index": strings.Join(index, ","),
|
||||||
|
"type": strings.Join(typ, ","),
|
||||||
|
"field": strings.Join(field, ","),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.local != nil {
|
||||||
|
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesGetFieldMappingService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation. It returns mapping definitions for an index
|
||||||
|
// or index/type.
|
||||||
|
func (s *IndicesGetFieldMappingService) Do(ctx context.Context) (map[string]interface{}, error) {
|
||||||
|
var ret map[string]interface{}
|
||||||
|
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
225
vendor/github.com/olivere/elastic/v7/indices_get_mapping.go
generated
vendored
Normal file
225
vendor/github.com/olivere/elastic/v7/indices_get_mapping.go
generated
vendored
Normal file
|
@ -0,0 +1,225 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesGetMappingService retrieves the mapping definitions for an index or
|
||||||
|
// index/type.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-get-mapping.html
|
||||||
|
// for details.
|
||||||
|
type IndicesGetMappingService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
typ []string
|
||||||
|
local *bool
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetMappingService is an alias for NewIndicesGetMappingService.
|
||||||
|
// Use NewIndicesGetMappingService.
|
||||||
|
func NewGetMappingService(client *Client) *IndicesGetMappingService {
|
||||||
|
return NewIndicesGetMappingService(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesGetMappingService creates a new IndicesGetMappingService.
|
||||||
|
func NewIndicesGetMappingService(client *Client) *IndicesGetMappingService {
|
||||||
|
return &IndicesGetMappingService{
|
||||||
|
client: client,
|
||||||
|
index: make([]string, 0),
|
||||||
|
typ: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesGetMappingService) Pretty(pretty bool) *IndicesGetMappingService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesGetMappingService) Human(human bool) *IndicesGetMappingService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesGetMappingService) ErrorTrace(errorTrace bool) *IndicesGetMappingService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesGetMappingService) FilterPath(filterPath ...string) *IndicesGetMappingService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesGetMappingService) Header(name string, value string) *IndicesGetMappingService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesGetMappingService) Headers(headers http.Header) *IndicesGetMappingService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names.
|
||||||
|
func (s *IndicesGetMappingService) Index(indices ...string) *IndicesGetMappingService {
|
||||||
|
s.index = append(s.index, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is a list of document types.
|
||||||
|
func (s *IndicesGetMappingService) Type(types ...string) *IndicesGetMappingService {
|
||||||
|
s.typ = append(s.typ, types...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices.
|
||||||
|
// This includes `_all` string or when no indices have been specified.
|
||||||
|
func (s *IndicesGetMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetMappingService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both..
|
||||||
|
func (s *IndicesGetMappingService) ExpandWildcards(expandWildcards string) *IndicesGetMappingService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates whether to return local information, do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *IndicesGetMappingService) Local(local bool) *IndicesGetMappingService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesGetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetMappingService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) {
|
||||||
|
var index, typ []string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
index = s.index
|
||||||
|
} else {
|
||||||
|
index = []string{"_all"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.typ) > 0 {
|
||||||
|
typ = s.typ
|
||||||
|
} else {
|
||||||
|
typ = []string{"_all"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
|
||||||
|
"index": strings.Join(index, ","),
|
||||||
|
"type": strings.Join(typ, ","),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.local != nil {
|
||||||
|
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesGetMappingService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation. It returns mapping definitions for an index
|
||||||
|
// or index/type.
|
||||||
|
func (s *IndicesGetMappingService) Do(ctx context.Context) (map[string]interface{}, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
var ret map[string]interface{}
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
238
vendor/github.com/olivere/elastic/v7/indices_get_settings.go
generated
vendored
Normal file
238
vendor/github.com/olivere/elastic/v7/indices_get_settings.go
generated
vendored
Normal file
|
@ -0,0 +1,238 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesGetSettingsService allows to retrieve settings of one
|
||||||
|
// or more indices.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-get-settings.html
|
||||||
|
// for more details.
|
||||||
|
type IndicesGetSettingsService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
name []string
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
flatSettings *bool
|
||||||
|
local *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesGetSettingsService creates a new IndicesGetSettingsService.
|
||||||
|
func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService {
|
||||||
|
return &IndicesGetSettingsService{
|
||||||
|
client: client,
|
||||||
|
index: make([]string, 0),
|
||||||
|
name: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesGetSettingsService) Human(human bool) *IndicesGetSettingsService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesGetSettingsService) ErrorTrace(errorTrace bool) *IndicesGetSettingsService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesGetSettingsService) FilterPath(filterPath ...string) *IndicesGetSettingsService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesGetSettingsService) Header(name string, value string) *IndicesGetSettingsService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesGetSettingsService) Headers(headers http.Header) *IndicesGetSettingsService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names; use `_all` or empty string to perform
|
||||||
|
// the operation on all indices.
|
||||||
|
func (s *IndicesGetSettingsService) Index(indices ...string) *IndicesGetSettingsService {
|
||||||
|
s.index = append(s.index, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name are the names of the settings that should be included.
|
||||||
|
func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService {
|
||||||
|
s.name = append(s.name, name...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should
|
||||||
|
// be ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices.
|
||||||
|
// (This includes `_all` string or when no indices have been specified).
|
||||||
|
func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression
|
||||||
|
// to concrete indices that are open, closed or both.
|
||||||
|
// Options: open, closed, none, all. Default: open,closed.
|
||||||
|
func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlatSettings indicates whether to return settings in flat format (default: false).
|
||||||
|
func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService {
|
||||||
|
s.flatSettings = &flatSettings
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates whether to return local information, do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
var index []string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
index = s.index
|
||||||
|
} else {
|
||||||
|
index = []string{"_all"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.name) > 0 {
|
||||||
|
// Build URL
|
||||||
|
path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{
|
||||||
|
"index": strings.Join(index, ","),
|
||||||
|
"name": strings.Join(s.name, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// Build URL
|
||||||
|
path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
|
||||||
|
"index": strings.Join(index, ","),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.flatSettings != nil {
|
||||||
|
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
|
||||||
|
}
|
||||||
|
if s.local != nil {
|
||||||
|
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesGetSettingsService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesGetSettingsService) Do(ctx context.Context) (map[string]*IndicesGetSettingsResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
var ret map[string]*IndicesGetSettingsResponse
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do.
|
||||||
|
type IndicesGetSettingsResponse struct {
|
||||||
|
Settings map[string]interface{} `json:"settings"`
|
||||||
|
}
|
184
vendor/github.com/olivere/elastic/v7/indices_get_template.go
generated
vendored
Normal file
184
vendor/github.com/olivere/elastic/v7/indices_get_template.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesGetTemplateService returns an index template.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-templates.html.
|
||||||
|
type IndicesGetTemplateService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
name []string
|
||||||
|
flatSettings *bool
|
||||||
|
local *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesGetTemplateService creates a new IndicesGetTemplateService.
|
||||||
|
func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService {
|
||||||
|
return &IndicesGetTemplateService{
|
||||||
|
client: client,
|
||||||
|
name: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesGetTemplateService) Human(human bool) *IndicesGetTemplateService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesGetTemplateService) ErrorTrace(errorTrace bool) *IndicesGetTemplateService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesGetTemplateService) FilterPath(filterPath ...string) *IndicesGetTemplateService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesGetTemplateService) Header(name string, value string) *IndicesGetTemplateService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesGetTemplateService) Headers(headers http.Header) *IndicesGetTemplateService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name is the name of the index template.
|
||||||
|
func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService {
|
||||||
|
s.name = append(s.name, name...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlatSettings is returns settings in flat format (default: false).
|
||||||
|
func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService {
|
||||||
|
s.flatSettings = &flatSettings
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local indicates whether to return local information, i.e. do not retrieve
|
||||||
|
// the state from master node (default: false).
|
||||||
|
func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService {
|
||||||
|
s.local = &local
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
if len(s.name) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/_template/{name}", map[string]string{
|
||||||
|
"name": strings.Join(s.name, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_template"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.flatSettings != nil {
|
||||||
|
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
|
||||||
|
}
|
||||||
|
if s.local != nil {
|
||||||
|
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesGetTemplateService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesGetTemplateService) Do(ctx context.Context) (map[string]*IndicesGetTemplateResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
var ret map[string]*IndicesGetTemplateResponse
|
||||||
|
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do.
|
||||||
|
type IndicesGetTemplateResponse struct {
|
||||||
|
Order int `json:"order,omitempty"`
|
||||||
|
Version int `json:"version,omitempty"`
|
||||||
|
IndexPatterns []string `json:"index_patterns,omitempty"`
|
||||||
|
Settings map[string]interface{} `json:"settings,omitempty"`
|
||||||
|
Mappings map[string]interface{} `json:"mappings,omitempty"`
|
||||||
|
Aliases map[string]interface{} `json:"aliases,omitempty"`
|
||||||
|
}
|
227
vendor/github.com/olivere/elastic/v7/indices_open.go
generated
vendored
Normal file
227
vendor/github.com/olivere/elastic/v7/indices_open.go
generated
vendored
Normal file
|
@ -0,0 +1,227 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesOpenService opens an index.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-open-close.html
|
||||||
|
// for details.
|
||||||
|
type IndicesOpenService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index string
|
||||||
|
timeout string
|
||||||
|
masterTimeout string
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
waitForActiveShards string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesOpenService creates and initializes a new IndicesOpenService.
|
||||||
|
func NewIndicesOpenService(client *Client) *IndicesOpenService {
|
||||||
|
return &IndicesOpenService{client: client}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesOpenService) Pretty(pretty bool) *IndicesOpenService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesOpenService) Human(human bool) *IndicesOpenService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesOpenService) ErrorTrace(errorTrace bool) *IndicesOpenService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesOpenService) FilterPath(filterPath ...string) *IndicesOpenService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesOpenService) Header(name string, value string) *IndicesOpenService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesOpenService) Headers(headers http.Header) *IndicesOpenService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is the name of the index to open.
|
||||||
|
func (s *IndicesOpenService) Index(index string) *IndicesOpenService {
|
||||||
|
s.index = index
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is an explicit operation timeout.
|
||||||
|
func (s *IndicesOpenService) Timeout(timeout string) *IndicesOpenService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesOpenService) MasterTimeout(masterTimeout string) *IndicesOpenService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should
|
||||||
|
// be ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesOpenService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesOpenService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices.
|
||||||
|
// (This includes `_all` string or when no indices have been specified).
|
||||||
|
func (s *IndicesOpenService) AllowNoIndices(allowNoIndices bool) *IndicesOpenService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both..
|
||||||
|
func (s *IndicesOpenService) ExpandWildcards(expandWildcards string) *IndicesOpenService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards specifies the number of shards that must be allocated
|
||||||
|
// before the Open operation returns. Valid values are "all" or an integer
|
||||||
|
// between 0 and number_of_replicas+1 (default: 0)
|
||||||
|
func (s *IndicesOpenService) WaitForActiveShards(waitForActiveShards string) *IndicesOpenService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesOpenService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{index}/_open", map[string]string{
|
||||||
|
"index": s.index,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesOpenService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.index == "" {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesOpenService) Do(ctx context.Context) (*IndicesOpenResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesOpenResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesOpenResponse is the response of IndicesOpenService.Do.
|
||||||
|
type IndicesOpenResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
}
|
399
vendor/github.com/olivere/elastic/v7/indices_put_alias.go
generated
vendored
Normal file
399
vendor/github.com/olivere/elastic/v7/indices_put_alias.go
generated
vendored
Normal file
|
@ -0,0 +1,399 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// -- Actions --
|
||||||
|
|
||||||
|
// AliasAction is an action to apply to an alias, e.g. "add" or "remove".
|
||||||
|
type AliasAction interface {
|
||||||
|
Source() (interface{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AliasAddAction is an action to add to an alias.
|
||||||
|
type AliasAddAction struct {
|
||||||
|
index []string // index name(s)
|
||||||
|
alias string // alias name
|
||||||
|
filter Query
|
||||||
|
routing string
|
||||||
|
searchRouting string
|
||||||
|
indexRouting string
|
||||||
|
isWriteIndex *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAliasAddAction returns an action to add an alias.
|
||||||
|
func NewAliasAddAction(alias string) *AliasAddAction {
|
||||||
|
return &AliasAddAction{
|
||||||
|
alias: alias,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index associates one or more indices to the alias.
|
||||||
|
func (a *AliasAddAction) Index(index ...string) *AliasAddAction {
|
||||||
|
a.index = append(a.index, index...)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AliasAddAction) removeBlankIndexNames() {
|
||||||
|
var indices []string
|
||||||
|
for _, index := range a.index {
|
||||||
|
if len(index) > 0 {
|
||||||
|
indices = append(indices, index)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
a.index = indices
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter associates a filter to the alias.
|
||||||
|
func (a *AliasAddAction) Filter(filter Query) *AliasAddAction {
|
||||||
|
a.filter = filter
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing associates a routing value to the alias.
|
||||||
|
// This basically sets index and search routing to the same value.
|
||||||
|
func (a *AliasAddAction) Routing(routing string) *AliasAddAction {
|
||||||
|
a.routing = routing
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexRouting associates an index routing value to the alias.
|
||||||
|
func (a *AliasAddAction) IndexRouting(routing string) *AliasAddAction {
|
||||||
|
a.indexRouting = routing
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchRouting associates a search routing value to the alias.
|
||||||
|
func (a *AliasAddAction) SearchRouting(routing ...string) *AliasAddAction {
|
||||||
|
a.searchRouting = strings.Join(routing, ",")
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsWriteIndex associates an is_write_index flag to the alias.
|
||||||
|
func (a *AliasAddAction) IsWriteIndex(flag bool) *AliasAddAction {
|
||||||
|
a.isWriteIndex = &flag
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (a *AliasAddAction) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(a.alias) == 0 {
|
||||||
|
invalid = append(invalid, "Alias")
|
||||||
|
}
|
||||||
|
if len(a.index) == 0 {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
if a.isWriteIndex != nil && len(a.index) > 1 {
|
||||||
|
return fmt.Errorf("more than 1 target index specified in operation with 'is_write_index' flag present")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the JSON-serializable data.
|
||||||
|
func (a *AliasAddAction) Source() (interface{}, error) {
|
||||||
|
a.removeBlankIndexNames()
|
||||||
|
if err := a.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
src := make(map[string]interface{})
|
||||||
|
act := make(map[string]interface{})
|
||||||
|
src["add"] = act
|
||||||
|
act["alias"] = a.alias
|
||||||
|
switch len(a.index) {
|
||||||
|
case 1:
|
||||||
|
act["index"] = a.index[0]
|
||||||
|
default:
|
||||||
|
act["indices"] = a.index
|
||||||
|
}
|
||||||
|
if a.filter != nil {
|
||||||
|
f, err := a.filter.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
act["filter"] = f
|
||||||
|
}
|
||||||
|
if len(a.routing) > 0 {
|
||||||
|
act["routing"] = a.routing
|
||||||
|
}
|
||||||
|
if len(a.indexRouting) > 0 {
|
||||||
|
act["index_routing"] = a.indexRouting
|
||||||
|
}
|
||||||
|
if len(a.searchRouting) > 0 {
|
||||||
|
act["search_routing"] = a.searchRouting
|
||||||
|
}
|
||||||
|
if a.isWriteIndex != nil {
|
||||||
|
act["is_write_index"] = *a.isWriteIndex
|
||||||
|
}
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AliasRemoveAction is an action to remove an alias.
|
||||||
|
type AliasRemoveAction struct {
|
||||||
|
index []string // index name(s)
|
||||||
|
alias string // alias name
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAliasRemoveAction returns an action to remove an alias.
|
||||||
|
func NewAliasRemoveAction(alias string) *AliasRemoveAction {
|
||||||
|
return &AliasRemoveAction{
|
||||||
|
alias: alias,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index associates one or more indices to the alias.
|
||||||
|
func (a *AliasRemoveAction) Index(index ...string) *AliasRemoveAction {
|
||||||
|
a.index = append(a.index, index...)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AliasRemoveAction) removeBlankIndexNames() {
|
||||||
|
var indices []string
|
||||||
|
for _, index := range a.index {
|
||||||
|
if len(index) > 0 {
|
||||||
|
indices = append(indices, index)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
a.index = indices
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (a *AliasRemoveAction) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(a.alias) == 0 {
|
||||||
|
invalid = append(invalid, "Alias")
|
||||||
|
}
|
||||||
|
if len(a.index) == 0 {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the JSON-serializable data.
|
||||||
|
func (a *AliasRemoveAction) Source() (interface{}, error) {
|
||||||
|
a.removeBlankIndexNames()
|
||||||
|
if err := a.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
src := make(map[string]interface{})
|
||||||
|
act := make(map[string]interface{})
|
||||||
|
src["remove"] = act
|
||||||
|
act["alias"] = a.alias
|
||||||
|
switch len(a.index) {
|
||||||
|
case 1:
|
||||||
|
act["index"] = a.index[0]
|
||||||
|
default:
|
||||||
|
act["indices"] = a.index
|
||||||
|
}
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AliasRemoveIndexAction is an action to remove an index during an alias
|
||||||
|
// operation.
|
||||||
|
type AliasRemoveIndexAction struct {
|
||||||
|
index string // index name
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAliasRemoveIndexAction returns an action to remove an index.
|
||||||
|
func NewAliasRemoveIndexAction(index string) *AliasRemoveIndexAction {
|
||||||
|
return &AliasRemoveIndexAction{
|
||||||
|
index: index,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (a *AliasRemoveIndexAction) Validate() error {
|
||||||
|
if a.index == "" {
|
||||||
|
return fmt.Errorf("missing required field: index")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the JSON-serializable data.
|
||||||
|
func (a *AliasRemoveIndexAction) Source() (interface{}, error) {
|
||||||
|
if err := a.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
src := make(map[string]interface{})
|
||||||
|
act := make(map[string]interface{})
|
||||||
|
src["remove_index"] = act
|
||||||
|
act["index"] = a.index
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Service --
|
||||||
|
|
||||||
|
// AliasService enables users to add or remove an alias.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-aliases.html
|
||||||
|
// for details.
|
||||||
|
type AliasService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
actions []AliasAction
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAliasService implements a service to manage aliases.
|
||||||
|
func NewAliasService(client *Client) *AliasService {
|
||||||
|
builder := &AliasService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
return builder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *AliasService) Pretty(pretty bool) *AliasService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *AliasService) Human(human bool) *AliasService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *AliasService) ErrorTrace(errorTrace bool) *AliasService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *AliasService) FilterPath(filterPath ...string) *AliasService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *AliasService) Header(name string, value string) *AliasService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *AliasService) Headers(headers http.Header) *AliasService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds an alias to an index.
|
||||||
|
func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
|
||||||
|
action := NewAliasAddAction(aliasName).Index(indexName)
|
||||||
|
s.actions = append(s.actions, action)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds an alias to an index and associates a filter to the alias.
|
||||||
|
func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService {
|
||||||
|
action := NewAliasAddAction(aliasName).Index(indexName).Filter(filter)
|
||||||
|
s.actions = append(s.actions, action)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes an alias.
|
||||||
|
func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
|
||||||
|
action := NewAliasRemoveAction(aliasName).Index(indexName)
|
||||||
|
s.actions = append(s.actions, action)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Action accepts one or more AliasAction instances which can be
|
||||||
|
// of type AliasAddAction or AliasRemoveAction.
|
||||||
|
func (s *AliasService) Action(action ...AliasAction) *AliasService {
|
||||||
|
s.actions = append(s.actions, action...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *AliasService) buildURL() (string, url.Values, error) {
|
||||||
|
path := "/_aliases"
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the command.
|
||||||
|
func (s *AliasService) Do(ctx context.Context) (*AliasResult, error) {
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Body with actions
|
||||||
|
body := make(map[string]interface{})
|
||||||
|
var actions []interface{}
|
||||||
|
for _, action := range s.actions {
|
||||||
|
src, err := action.Source()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
actions = append(actions, src)
|
||||||
|
}
|
||||||
|
body["actions"] = actions
|
||||||
|
|
||||||
|
// Get response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return results
|
||||||
|
ret := new(AliasResult)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of an alias request.
|
||||||
|
|
||||||
|
// AliasResult is the outcome of calling Do on AliasService.
|
||||||
|
type AliasResult struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
}
|
261
vendor/github.com/olivere/elastic/v7/indices_put_mapping.go
generated
vendored
Normal file
261
vendor/github.com/olivere/elastic/v7/indices_put_mapping.go
generated
vendored
Normal file
|
@ -0,0 +1,261 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesPutMappingService allows to register specific mapping definition
|
||||||
|
// for a specific type.
|
||||||
|
//
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-put-mapping.html
|
||||||
|
// for details.
|
||||||
|
type IndicesPutMappingService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
masterTimeout string
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
updateAllTypes *bool
|
||||||
|
timeout string
|
||||||
|
bodyJson map[string]interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPutMappingService is an alias for NewIndicesPutMappingService.
|
||||||
|
// Use NewIndicesPutMappingService.
|
||||||
|
func NewPutMappingService(client *Client) *IndicesPutMappingService {
|
||||||
|
return NewIndicesPutMappingService(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesPutMappingService creates a new IndicesPutMappingService.
|
||||||
|
func NewIndicesPutMappingService(client *Client) *IndicesPutMappingService {
|
||||||
|
return &IndicesPutMappingService{
|
||||||
|
client: client,
|
||||||
|
index: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesPutMappingService) Pretty(pretty bool) *IndicesPutMappingService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesPutMappingService) Human(human bool) *IndicesPutMappingService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesPutMappingService) ErrorTrace(errorTrace bool) *IndicesPutMappingService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesPutMappingService) FilterPath(filterPath ...string) *IndicesPutMappingService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesPutMappingService) Header(name string, value string) *IndicesPutMappingService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesPutMappingService) Headers(headers http.Header) *IndicesPutMappingService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names the mapping should be added to
|
||||||
|
// (supports wildcards); use `_all` or omit to add the mapping on all indices.
|
||||||
|
func (s *IndicesPutMappingService) Index(indices ...string) *IndicesPutMappingService {
|
||||||
|
s.index = append(s.index, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is an explicit operation timeout.
|
||||||
|
func (s *IndicesPutMappingService) Timeout(timeout string) *IndicesPutMappingService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesPutMappingService) MasterTimeout(masterTimeout string) *IndicesPutMappingService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesPutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutMappingService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices.
|
||||||
|
// This includes `_all` string or when no indices have been specified.
|
||||||
|
func (s *IndicesPutMappingService) AllowNoIndices(allowNoIndices bool) *IndicesPutMappingService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both.
|
||||||
|
func (s *IndicesPutMappingService) ExpandWildcards(expandWildcards string) *IndicesPutMappingService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAllTypes, if true, indicates that all fields that span multiple indices
|
||||||
|
// should be updated (default: false).
|
||||||
|
func (s *IndicesPutMappingService) UpdateAllTypes(updateAllTypes bool) *IndicesPutMappingService {
|
||||||
|
s.updateAllTypes = &updateAllTypes
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson contains the mapping definition.
|
||||||
|
func (s *IndicesPutMappingService) BodyJson(mapping map[string]interface{}) *IndicesPutMappingService {
|
||||||
|
s.bodyJson = mapping
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is the mapping definition serialized as a string.
|
||||||
|
func (s *IndicesPutMappingService) BodyString(mapping string) *IndicesPutMappingService {
|
||||||
|
s.bodyString = mapping
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) {
|
||||||
|
path, err := uritemplates.Expand("/{index}/_mapping", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.updateAllTypes != nil {
|
||||||
|
params.Set("update_all_types", fmt.Sprintf("%v", *s.updateAllTypes))
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesPutMappingService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if len(s.index) == 0 {
|
||||||
|
invalid = append(invalid, "Index")
|
||||||
|
}
|
||||||
|
if s.bodyString == "" && s.bodyJson == nil {
|
||||||
|
invalid = append(invalid, "BodyJson")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesPutMappingService) Do(ctx context.Context) (*PutMappingResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(PutMappingResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutMappingResponse is the response of IndicesPutMappingService.Do.
|
||||||
|
type PutMappingResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
}
|
242
vendor/github.com/olivere/elastic/v7/indices_put_settings.go
generated
vendored
Normal file
242
vendor/github.com/olivere/elastic/v7/indices_put_settings.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesPutSettingsService changes specific index level settings in
|
||||||
|
// real time.
|
||||||
|
//
|
||||||
|
// See the documentation at
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-update-settings.html.
|
||||||
|
type IndicesPutSettingsService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
flatSettings *bool
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
masterTimeout string
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesPutSettingsService creates a new IndicesPutSettingsService.
|
||||||
|
func NewIndicesPutSettingsService(client *Client) *IndicesPutSettingsService {
|
||||||
|
return &IndicesPutSettingsService{
|
||||||
|
client: client,
|
||||||
|
index: make([]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesPutSettingsService) Pretty(pretty bool) *IndicesPutSettingsService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesPutSettingsService) Human(human bool) *IndicesPutSettingsService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesPutSettingsService) ErrorTrace(errorTrace bool) *IndicesPutSettingsService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesPutSettingsService) FilterPath(filterPath ...string) *IndicesPutSettingsService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesPutSettingsService) Header(name string, value string) *IndicesPutSettingsService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesPutSettingsService) Headers(headers http.Header) *IndicesPutSettingsService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a list of index names the mapping should be added to
|
||||||
|
// (supports wildcards); use `_all` or omit to add the mapping on all indices.
|
||||||
|
func (s *IndicesPutSettingsService) Index(indices ...string) *IndicesPutSettingsService {
|
||||||
|
s.index = append(s.index, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||||
|
// expression resolves into no concrete indices. (This includes `_all`
|
||||||
|
// string or when no indices have been specified).
|
||||||
|
func (s *IndicesPutSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesPutSettingsService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards specifies whether to expand wildcard expression to
|
||||||
|
// concrete indices that are open, closed or both.
|
||||||
|
func (s *IndicesPutSettingsService) ExpandWildcards(expandWildcards string) *IndicesPutSettingsService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlatSettings indicates whether to return settings in flat format (default: false).
|
||||||
|
func (s *IndicesPutSettingsService) FlatSettings(flatSettings bool) *IndicesPutSettingsService {
|
||||||
|
s.flatSettings = &flatSettings
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable specifies whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesPutSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutSettingsService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout is the timeout for connection to master.
|
||||||
|
func (s *IndicesPutSettingsService) MasterTimeout(masterTimeout string) *IndicesPutSettingsService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson is documented as: The index settings to be updated.
|
||||||
|
func (s *IndicesPutSettingsService) BodyJson(body interface{}) *IndicesPutSettingsService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is documented as: The index settings to be updated.
|
||||||
|
func (s *IndicesPutSettingsService) BodyString(body string) *IndicesPutSettingsService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_settings"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.flatSettings != nil {
|
||||||
|
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesPutSettingsService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesPutSettingsService) Do(ctx context.Context) (*IndicesPutSettingsResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesPutSettingsResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do.
|
||||||
|
type IndicesPutSettingsResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
}
|
259
vendor/github.com/olivere/elastic/v7/indices_put_template.go
generated
vendored
Normal file
259
vendor/github.com/olivere/elastic/v7/indices_put_template.go
generated
vendored
Normal file
|
@ -0,0 +1,259 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesPutTemplateService creates or updates index mappings.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-templates.html.
|
||||||
|
type IndicesPutTemplateService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
name string
|
||||||
|
cause string
|
||||||
|
order interface{}
|
||||||
|
version *int
|
||||||
|
create *bool
|
||||||
|
timeout string
|
||||||
|
masterTimeout string
|
||||||
|
flatSettings *bool
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesPutTemplateService creates a new IndicesPutTemplateService.
|
||||||
|
func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService {
|
||||||
|
return &IndicesPutTemplateService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesPutTemplateService) Human(human bool) *IndicesPutTemplateService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesPutTemplateService) ErrorTrace(errorTrace bool) *IndicesPutTemplateService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesPutTemplateService) FilterPath(filterPath ...string) *IndicesPutTemplateService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesPutTemplateService) Header(name string, value string) *IndicesPutTemplateService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesPutTemplateService) Headers(headers http.Header) *IndicesPutTemplateService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name is the name of the index template.
|
||||||
|
func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService {
|
||||||
|
s.name = name
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cause describes the cause for this index template creation. This is currently
|
||||||
|
// undocumented, but part of the Java source.
|
||||||
|
func (s *IndicesPutTemplateService) Cause(cause string) *IndicesPutTemplateService {
|
||||||
|
s.cause = cause
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is an explicit operation timeout.
|
||||||
|
func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlatSettings indicates whether to return settings in flat format (default: false).
|
||||||
|
func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService {
|
||||||
|
s.flatSettings = &flatSettings
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order is the order for this template when merging multiple matching ones
|
||||||
|
// (higher numbers are merged later, overriding the lower numbers).
|
||||||
|
func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService {
|
||||||
|
s.order = order
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version sets the version number for this template.
|
||||||
|
func (s *IndicesPutTemplateService) Version(version int) *IndicesPutTemplateService {
|
||||||
|
s.version = &version
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create indicates whether the index template should only be added if
|
||||||
|
// new or can also replace an existing one.
|
||||||
|
func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService {
|
||||||
|
s.create = &create
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson is documented as: The template definition.
|
||||||
|
func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is documented as: The template definition.
|
||||||
|
func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/_template/{name}", map[string]string{
|
||||||
|
"name": s.name,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.order != nil {
|
||||||
|
params.Set("order", fmt.Sprintf("%v", s.order))
|
||||||
|
}
|
||||||
|
if s.version != nil {
|
||||||
|
params.Set("version", fmt.Sprintf("%v", *s.version))
|
||||||
|
}
|
||||||
|
if s.create != nil {
|
||||||
|
params.Set("create", fmt.Sprintf("%v", *s.create))
|
||||||
|
}
|
||||||
|
if s.cause != "" {
|
||||||
|
params.Set("cause", s.cause)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.flatSettings != nil {
|
||||||
|
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesPutTemplateService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.name == "" {
|
||||||
|
invalid = append(invalid, "Name")
|
||||||
|
}
|
||||||
|
if s.bodyString == "" && s.bodyJson == nil {
|
||||||
|
invalid = append(invalid, "BodyJson")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesPutTemplateService) Do(ctx context.Context) (*IndicesPutTemplateResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesPutTemplateResponse)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do.
|
||||||
|
type IndicesPutTemplateResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
}
|
149
vendor/github.com/olivere/elastic/v7/indices_refresh.go
generated
vendored
Normal file
149
vendor/github.com/olivere/elastic/v7/indices_refresh.go
generated
vendored
Normal file
|
@ -0,0 +1,149 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RefreshService explicitly refreshes one or more indices.
|
||||||
|
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-refresh.html.
|
||||||
|
type RefreshService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRefreshService creates a new instance of RefreshService.
|
||||||
|
func NewRefreshService(client *Client) *RefreshService {
|
||||||
|
builder := &RefreshService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
return builder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *RefreshService) Pretty(pretty bool) *RefreshService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *RefreshService) Human(human bool) *RefreshService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *RefreshService) ErrorTrace(errorTrace bool) *RefreshService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *RefreshService) FilterPath(filterPath ...string) *RefreshService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *RefreshService) Header(name string, value string) *RefreshService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *RefreshService) Headers(headers http.Header) *RefreshService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index specifies the indices to refresh.
|
||||||
|
func (s *RefreshService) Index(index ...string) *RefreshService {
|
||||||
|
s.index = append(s.index, index...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *RefreshService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_refresh", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_refresh"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the request.
|
||||||
|
func (s *RefreshService) Do(ctx context.Context) (*RefreshResult, error) {
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return result
|
||||||
|
ret := new(RefreshResult)
|
||||||
|
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Result of a refresh request.
|
||||||
|
|
||||||
|
// RefreshResult is the outcome of RefreshService.Do.
|
||||||
|
type RefreshResult struct {
|
||||||
|
Shards *ShardsInfo `json:"_shards,omitempty"`
|
||||||
|
}
|
324
vendor/github.com/olivere/elastic/v7/indices_rollover.go
generated
vendored
Normal file
324
vendor/github.com/olivere/elastic/v7/indices_rollover.go
generated
vendored
Normal file
|
@ -0,0 +1,324 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesRolloverService rolls an alias over to a new index when the
|
||||||
|
// existing index is considered to be too large or too old.
|
||||||
|
//
|
||||||
|
// It is documented at
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-rollover-index.html.
|
||||||
|
type IndicesRolloverService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
dryRun bool
|
||||||
|
newIndex string
|
||||||
|
alias string
|
||||||
|
masterTimeout string
|
||||||
|
timeout string
|
||||||
|
waitForActiveShards string
|
||||||
|
conditions map[string]interface{}
|
||||||
|
settings map[string]interface{}
|
||||||
|
mappings map[string]interface{}
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesRolloverService creates a new IndicesRolloverService.
|
||||||
|
func NewIndicesRolloverService(client *Client) *IndicesRolloverService {
|
||||||
|
return &IndicesRolloverService{
|
||||||
|
client: client,
|
||||||
|
conditions: make(map[string]interface{}),
|
||||||
|
settings: make(map[string]interface{}),
|
||||||
|
mappings: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesRolloverService) Pretty(pretty bool) *IndicesRolloverService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesRolloverService) Human(human bool) *IndicesRolloverService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesRolloverService) ErrorTrace(errorTrace bool) *IndicesRolloverService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesRolloverService) FilterPath(filterPath ...string) *IndicesRolloverService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesRolloverService) Header(name string, value string) *IndicesRolloverService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesRolloverService) Headers(headers http.Header) *IndicesRolloverService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alias is the name of the alias to rollover.
|
||||||
|
func (s *IndicesRolloverService) Alias(alias string) *IndicesRolloverService {
|
||||||
|
s.alias = alias
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndex is the name of the rollover index.
|
||||||
|
func (s *IndicesRolloverService) NewIndex(newIndex string) *IndicesRolloverService {
|
||||||
|
s.newIndex = newIndex
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesRolloverService) MasterTimeout(masterTimeout string) *IndicesRolloverService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout sets an explicit operation timeout.
|
||||||
|
func (s *IndicesRolloverService) Timeout(timeout string) *IndicesRolloverService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of active shards to wait for on the
|
||||||
|
// newly created rollover index before the operation returns.
|
||||||
|
func (s *IndicesRolloverService) WaitForActiveShards(waitForActiveShards string) *IndicesRolloverService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// DryRun, when set, specifies that only conditions are checked without
|
||||||
|
// performing the actual rollover.
|
||||||
|
func (s *IndicesRolloverService) DryRun(dryRun bool) *IndicesRolloverService {
|
||||||
|
s.dryRun = dryRun
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conditions allows to specify all conditions as a dictionary.
|
||||||
|
func (s *IndicesRolloverService) Conditions(conditions map[string]interface{}) *IndicesRolloverService {
|
||||||
|
s.conditions = conditions
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddCondition adds a condition to the rollover decision.
|
||||||
|
func (s *IndicesRolloverService) AddCondition(name string, value interface{}) *IndicesRolloverService {
|
||||||
|
s.conditions[name] = value
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMaxIndexAgeCondition adds a condition to set the max index age.
|
||||||
|
func (s *IndicesRolloverService) AddMaxIndexAgeCondition(time string) *IndicesRolloverService {
|
||||||
|
s.conditions["max_age"] = time
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMaxIndexDocsCondition adds a condition to set the max documents in the index.
|
||||||
|
func (s *IndicesRolloverService) AddMaxIndexDocsCondition(docs int64) *IndicesRolloverService {
|
||||||
|
s.conditions["max_docs"] = docs
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Settings adds the index settings.
|
||||||
|
func (s *IndicesRolloverService) Settings(settings map[string]interface{}) *IndicesRolloverService {
|
||||||
|
s.settings = settings
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSetting adds an index setting.
|
||||||
|
func (s *IndicesRolloverService) AddSetting(name string, value interface{}) *IndicesRolloverService {
|
||||||
|
s.settings[name] = value
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mappings adds the index mappings.
|
||||||
|
func (s *IndicesRolloverService) Mappings(mappings map[string]interface{}) *IndicesRolloverService {
|
||||||
|
s.mappings = mappings
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMapping adds a mapping for the given type.
|
||||||
|
func (s *IndicesRolloverService) AddMapping(typ string, mapping interface{}) *IndicesRolloverService {
|
||||||
|
s.mappings[typ] = mapping
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson sets the conditions that needs to be met for executing rollover,
|
||||||
|
// specified as a serializable JSON instance which is sent as the body of
|
||||||
|
// the request.
|
||||||
|
func (s *IndicesRolloverService) BodyJson(body interface{}) *IndicesRolloverService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString sets the conditions that needs to be met for executing rollover,
|
||||||
|
// specified as a string which is sent as the body of the request.
|
||||||
|
func (s *IndicesRolloverService) BodyString(body string) *IndicesRolloverService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBody returns the body of the request, if not explicitly set via
|
||||||
|
// BodyJson or BodyString.
|
||||||
|
func (s *IndicesRolloverService) getBody() interface{} {
|
||||||
|
body := make(map[string]interface{})
|
||||||
|
if len(s.conditions) > 0 {
|
||||||
|
body["conditions"] = s.conditions
|
||||||
|
}
|
||||||
|
if len(s.settings) > 0 {
|
||||||
|
body["settings"] = s.settings
|
||||||
|
}
|
||||||
|
if len(s.mappings) > 0 {
|
||||||
|
body["mappings"] = s.mappings
|
||||||
|
}
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesRolloverService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
if s.newIndex != "" {
|
||||||
|
path, err = uritemplates.Expand("/{alias}/_rollover/{new_index}", map[string]string{
|
||||||
|
"alias": s.alias,
|
||||||
|
"new_index": s.newIndex,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path, err = uritemplates.Expand("/{alias}/_rollover", map[string]string{
|
||||||
|
"alias": s.alias,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.dryRun {
|
||||||
|
params.Set("dry_run", "true")
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesRolloverService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.alias == "" {
|
||||||
|
invalid = append(invalid, "Alias")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesRolloverService) Do(ctx context.Context) (*IndicesRolloverResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else if s.bodyString != "" {
|
||||||
|
body = s.bodyString
|
||||||
|
} else {
|
||||||
|
body = s.getBody()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesRolloverResponse)
|
||||||
|
if err := json.Unmarshal(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesRolloverResponse is the response of IndicesRolloverService.Do.
|
||||||
|
type IndicesRolloverResponse struct {
|
||||||
|
OldIndex string `json:"old_index"`
|
||||||
|
NewIndex string `json:"new_index"`
|
||||||
|
RolledOver bool `json:"rolled_over"`
|
||||||
|
DryRun bool `json:"dry_run"`
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Conditions map[string]bool `json:"conditions"`
|
||||||
|
}
|
278
vendor/github.com/olivere/elastic/v7/indices_segments.go
generated
vendored
Normal file
278
vendor/github.com/olivere/elastic/v7/indices_segments.go
generated
vendored
Normal file
|
@ -0,0 +1,278 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesSegmentsService provides low level segments information that a
|
||||||
|
// Lucene index (shard level) is built with. Allows to be used to provide
|
||||||
|
// more information on the state of a shard and an index, possibly
|
||||||
|
// optimization information, data "wasted" on deletes, and so on.
|
||||||
|
//
|
||||||
|
// Find further documentation at
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-segments.html.
|
||||||
|
type IndicesSegmentsService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
index []string
|
||||||
|
allowNoIndices *bool
|
||||||
|
expandWildcards string
|
||||||
|
ignoreUnavailable *bool
|
||||||
|
operationThreading interface{}
|
||||||
|
verbose *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesSegmentsService creates a new IndicesSegmentsService.
|
||||||
|
func NewIndicesSegmentsService(client *Client) *IndicesSegmentsService {
|
||||||
|
return &IndicesSegmentsService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesSegmentsService) Pretty(pretty bool) *IndicesSegmentsService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesSegmentsService) Human(human bool) *IndicesSegmentsService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesSegmentsService) ErrorTrace(errorTrace bool) *IndicesSegmentsService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesSegmentsService) FilterPath(filterPath ...string) *IndicesSegmentsService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesSegmentsService) Header(name string, value string) *IndicesSegmentsService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesSegmentsService) Headers(headers http.Header) *IndicesSegmentsService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is a comma-separated list of index names; use `_all` or empty string
|
||||||
|
// to perform the operation on all indices.
|
||||||
|
func (s *IndicesSegmentsService) Index(indices ...string) *IndicesSegmentsService {
|
||||||
|
s.index = append(s.index, indices...)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
|
||||||
|
// resolves into no concrete indices. (This includes `_all` string or when
|
||||||
|
// no indices have been specified).
|
||||||
|
func (s *IndicesSegmentsService) AllowNoIndices(allowNoIndices bool) *IndicesSegmentsService {
|
||||||
|
s.allowNoIndices = &allowNoIndices
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandWildcards indicates whether to expand wildcard expression to concrete indices
|
||||||
|
// that are open, closed or both.
|
||||||
|
func (s *IndicesSegmentsService) ExpandWildcards(expandWildcards string) *IndicesSegmentsService {
|
||||||
|
s.expandWildcards = expandWildcards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||||
|
// ignored when unavailable (missing or closed).
|
||||||
|
func (s *IndicesSegmentsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesSegmentsService {
|
||||||
|
s.ignoreUnavailable = &ignoreUnavailable
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationThreading is undocumented in Elasticsearch as of now.
|
||||||
|
func (s *IndicesSegmentsService) OperationThreading(operationThreading interface{}) *IndicesSegmentsService {
|
||||||
|
s.operationThreading = operationThreading
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verbose, when set to true, includes detailed memory usage by Lucene.
|
||||||
|
func (s *IndicesSegmentsService) Verbose(verbose bool) *IndicesSegmentsService {
|
||||||
|
s.verbose = &verbose
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesSegmentsService) buildURL() (string, url.Values, error) {
|
||||||
|
var err error
|
||||||
|
var path string
|
||||||
|
|
||||||
|
if len(s.index) > 0 {
|
||||||
|
path, err = uritemplates.Expand("/{index}/_segments", map[string]string{
|
||||||
|
"index": strings.Join(s.index, ","),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
path = "/_segments"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.allowNoIndices != nil {
|
||||||
|
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||||
|
}
|
||||||
|
if s.expandWildcards != "" {
|
||||||
|
params.Set("expand_wildcards", s.expandWildcards)
|
||||||
|
}
|
||||||
|
if s.ignoreUnavailable != nil {
|
||||||
|
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||||
|
}
|
||||||
|
if s.operationThreading != nil {
|
||||||
|
params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading))
|
||||||
|
}
|
||||||
|
if s.verbose != nil {
|
||||||
|
params.Set("verbose", fmt.Sprintf("%v", *s.verbose))
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesSegmentsService) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesSegmentsService) Do(ctx context.Context) (*IndicesSegmentsResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "GET",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesSegmentsResponse)
|
||||||
|
if err := json.Unmarshal(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesSegmentsResponse is the response of IndicesSegmentsService.Do.
|
||||||
|
type IndicesSegmentsResponse struct {
|
||||||
|
// Shards provides information returned from shards.
|
||||||
|
Shards *ShardsInfo `json:"_shards"`
|
||||||
|
|
||||||
|
// Indices provides a map into the stats of an index.
|
||||||
|
// The key of the map is the index name.
|
||||||
|
Indices map[string]*IndexSegments `json:"indices,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndexSegments struct {
|
||||||
|
// Shards provides a map into the shard related information of an index.
|
||||||
|
// The key of the map is the number of a specific shard.
|
||||||
|
Shards map[string][]*IndexSegmentsShards `json:"shards,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndexSegmentsShards struct {
|
||||||
|
Routing *IndexSegmentsRouting `json:"routing,omitempty"`
|
||||||
|
NumCommittedSegments int64 `json:"num_committed_segments,omitempty"`
|
||||||
|
NumSearchSegments int64 `json:"num_search_segments"`
|
||||||
|
|
||||||
|
// Segments provides a map into the segment related information of a shard.
|
||||||
|
// The key of the map is the specific lucene segment id.
|
||||||
|
Segments map[string]*IndexSegmentsDetails `json:"segments,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndexSegmentsRouting struct {
|
||||||
|
State string `json:"state,omitempty"`
|
||||||
|
Primary bool `json:"primary,omitempty"`
|
||||||
|
Node string `json:"node,omitempty"`
|
||||||
|
RelocatingNode string `json:"relocating_node,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndexSegmentsDetails struct {
|
||||||
|
Generation int64 `json:"generation,omitempty"`
|
||||||
|
NumDocs int64 `json:"num_docs,omitempty"`
|
||||||
|
DeletedDocs int64 `json:"deleted_docs,omitempty"`
|
||||||
|
Size string `json:"size,omitempty"`
|
||||||
|
SizeInBytes int64 `json:"size_in_bytes,omitempty"`
|
||||||
|
Memory string `json:"memory,omitempty"`
|
||||||
|
MemoryInBytes int64 `json:"memory_in_bytes,omitempty"`
|
||||||
|
Committed bool `json:"committed,omitempty"`
|
||||||
|
Search bool `json:"search,omitempty"`
|
||||||
|
Version string `json:"version,omitempty"`
|
||||||
|
Compound bool `json:"compound,omitempty"`
|
||||||
|
MergeId string `json:"merge_id,omitempty"`
|
||||||
|
Sort []*IndexSegmentsSort `json:"sort,omitempty"`
|
||||||
|
RAMTree []*IndexSegmentsRamTree `json:"ram_tree,omitempty"`
|
||||||
|
Attributes map[string]string `json:"attributes,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndexSegmentsSort struct {
|
||||||
|
Field string `json:"field,omitempty"`
|
||||||
|
Mode string `json:"mode,omitempty"`
|
||||||
|
Missing interface{} `json:"missing,omitempty"`
|
||||||
|
Reverse bool `json:"reverse,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndexSegmentsRamTree struct {
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Size string `json:"size,omitempty"`
|
||||||
|
SizeInBytes int64 `json:"size_in_bytes,omitempty"`
|
||||||
|
Children []*IndexSegmentsRamTree `json:"children,omitempty"`
|
||||||
|
}
|
231
vendor/github.com/olivere/elastic/v7/indices_shrink.go
generated
vendored
Normal file
231
vendor/github.com/olivere/elastic/v7/indices_shrink.go
generated
vendored
Normal file
|
@ -0,0 +1,231 @@
|
||||||
|
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-license.
|
||||||
|
// See http://olivere.mit-license.org/license.txt for details.
|
||||||
|
|
||||||
|
package elastic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/olivere/elastic/v7/uritemplates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndicesShrinkService allows you to shrink an existing index into a
|
||||||
|
// new index with fewer primary shards.
|
||||||
|
//
|
||||||
|
// For further details, see
|
||||||
|
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-shrink-index.html.
|
||||||
|
type IndicesShrinkService struct {
|
||||||
|
client *Client
|
||||||
|
|
||||||
|
pretty *bool // pretty format the returned JSON response
|
||||||
|
human *bool // return human readable values for statistics
|
||||||
|
errorTrace *bool // include the stack trace of returned errors
|
||||||
|
filterPath []string // list of filters used to reduce the response
|
||||||
|
headers http.Header // custom request-level HTTP headers
|
||||||
|
|
||||||
|
source string
|
||||||
|
target string
|
||||||
|
masterTimeout string
|
||||||
|
timeout string
|
||||||
|
waitForActiveShards string
|
||||||
|
bodyJson interface{}
|
||||||
|
bodyString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndicesShrinkService creates a new IndicesShrinkService.
|
||||||
|
func NewIndicesShrinkService(client *Client) *IndicesShrinkService {
|
||||||
|
return &IndicesShrinkService{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||||
|
func (s *IndicesShrinkService) Pretty(pretty bool) *IndicesShrinkService {
|
||||||
|
s.pretty = &pretty
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human specifies whether human readable values should be returned in
|
||||||
|
// the JSON response, e.g. "7.5mb".
|
||||||
|
func (s *IndicesShrinkService) Human(human bool) *IndicesShrinkService {
|
||||||
|
s.human = &human
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorTrace specifies whether to include the stack trace of returned errors.
|
||||||
|
func (s *IndicesShrinkService) ErrorTrace(errorTrace bool) *IndicesShrinkService {
|
||||||
|
s.errorTrace = &errorTrace
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPath specifies a list of filters used to reduce the response.
|
||||||
|
func (s *IndicesShrinkService) FilterPath(filterPath ...string) *IndicesShrinkService {
|
||||||
|
s.filterPath = filterPath
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header adds a header to the request.
|
||||||
|
func (s *IndicesShrinkService) Header(name string, value string) *IndicesShrinkService {
|
||||||
|
if s.headers == nil {
|
||||||
|
s.headers = http.Header{}
|
||||||
|
}
|
||||||
|
s.headers.Add(name, value)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers specifies the headers of the request.
|
||||||
|
func (s *IndicesShrinkService) Headers(headers http.Header) *IndicesShrinkService {
|
||||||
|
s.headers = headers
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source is the name of the source index to shrink.
|
||||||
|
func (s *IndicesShrinkService) Source(source string) *IndicesShrinkService {
|
||||||
|
s.source = source
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Target is the name of the target index to shrink into.
|
||||||
|
func (s *IndicesShrinkService) Target(target string) *IndicesShrinkService {
|
||||||
|
s.target = target
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterTimeout specifies the timeout for connection to master.
|
||||||
|
func (s *IndicesShrinkService) MasterTimeout(masterTimeout string) *IndicesShrinkService {
|
||||||
|
s.masterTimeout = masterTimeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout is an explicit operation timeout.
|
||||||
|
func (s *IndicesShrinkService) Timeout(timeout string) *IndicesShrinkService {
|
||||||
|
s.timeout = timeout
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForActiveShards sets the number of active shards to wait for on
|
||||||
|
// the shrunken index before the operation returns.
|
||||||
|
func (s *IndicesShrinkService) WaitForActiveShards(waitForActiveShards string) *IndicesShrinkService {
|
||||||
|
s.waitForActiveShards = waitForActiveShards
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyJson is the configuration for the target index (`settings` and `aliases`)
|
||||||
|
// defined as a JSON-serializable instance to be sent as the request body.
|
||||||
|
func (s *IndicesShrinkService) BodyJson(body interface{}) *IndicesShrinkService {
|
||||||
|
s.bodyJson = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyString is the configuration for the target index (`settings` and `aliases`)
|
||||||
|
// defined as a string to send as the request body.
|
||||||
|
func (s *IndicesShrinkService) BodyString(body string) *IndicesShrinkService {
|
||||||
|
s.bodyString = body
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildURL builds the URL for the operation.
|
||||||
|
func (s *IndicesShrinkService) buildURL() (string, url.Values, error) {
|
||||||
|
// Build URL
|
||||||
|
path, err := uritemplates.Expand("/{source}/_shrink/{target}", map[string]string{
|
||||||
|
"source": s.source,
|
||||||
|
"target": s.target,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", url.Values{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add query string parameters
|
||||||
|
params := url.Values{}
|
||||||
|
if v := s.pretty; v != nil {
|
||||||
|
params.Set("pretty", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.human; v != nil {
|
||||||
|
params.Set("human", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if v := s.errorTrace; v != nil {
|
||||||
|
params.Set("error_trace", fmt.Sprint(*v))
|
||||||
|
}
|
||||||
|
if len(s.filterPath) > 0 {
|
||||||
|
params.Set("filter_path", strings.Join(s.filterPath, ","))
|
||||||
|
}
|
||||||
|
if s.masterTimeout != "" {
|
||||||
|
params.Set("master_timeout", s.masterTimeout)
|
||||||
|
}
|
||||||
|
if s.timeout != "" {
|
||||||
|
params.Set("timeout", s.timeout)
|
||||||
|
}
|
||||||
|
if s.waitForActiveShards != "" {
|
||||||
|
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||||
|
}
|
||||||
|
return path, params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the operation is valid.
|
||||||
|
func (s *IndicesShrinkService) Validate() error {
|
||||||
|
var invalid []string
|
||||||
|
if s.source == "" {
|
||||||
|
invalid = append(invalid, "Source")
|
||||||
|
}
|
||||||
|
if s.target == "" {
|
||||||
|
invalid = append(invalid, "Target")
|
||||||
|
}
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
return fmt.Errorf("missing required fields: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do executes the operation.
|
||||||
|
func (s *IndicesShrinkService) Do(ctx context.Context) (*IndicesShrinkResponse, error) {
|
||||||
|
// Check pre-conditions
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get URL for request
|
||||||
|
path, params, err := s.buildURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP request body
|
||||||
|
var body interface{}
|
||||||
|
if s.bodyJson != nil {
|
||||||
|
body = s.bodyJson
|
||||||
|
} else if s.bodyString != "" {
|
||||||
|
body = s.bodyString
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get HTTP response
|
||||||
|
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
Params: params,
|
||||||
|
Body: body,
|
||||||
|
Headers: s.headers,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return operation response
|
||||||
|
ret := new(IndicesShrinkResponse)
|
||||||
|
if err := json.Unmarshal(res.Body, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndicesShrinkResponse is the response of IndicesShrinkService.Do.
|
||||||
|
type IndicesShrinkResponse struct {
|
||||||
|
Acknowledged bool `json:"acknowledged"`
|
||||||
|
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||||
|
Index string `json:"index,omitempty"`
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Reference in a new issue