Compare commits

...

19 Commits

Author SHA1 Message Date
73164d1779 Auto-generated API code (#2886) 2025-06-30 11:46:07 -05:00
2dafb2af94 Auto-generated API code (#2880) 2025-06-23 13:23:45 -04:00
2ac119fd05 Auto-generated API code (#2868) 2025-06-09 10:29:03 -05:00
94f6173483 Auto-generated API code (#2860) 2025-06-02 17:10:35 +00:00
a9d4d109f2 Auto-generated API code (#2853) 2025-05-27 15:14:42 +00:00
d524809eb5 Auto-generated API code (#2834) 2025-05-19 19:35:32 +00:00
14517d1015 Export helper types (#2822) (#2824)
(cherry picked from commit b030084f24)

Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2025-05-05 13:05:35 -05:00
f6ac48126c Auto-generated API code (#2820) 2025-05-05 11:21:20 -05:00
acdb0d6162 Auto-generated API code (#2808) 2025-04-28 10:41:15 -05:00
e8b4183b8b Bump to 8.18.2 (#2795) (#2796)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2025-04-25 10:47:18 -05:00
486cba656a 8.18.2 changelog (#2793) (#2794)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2025-04-25 10:43:06 -05:00
28e826d738 Use async reader for parsing Apache Arrow responses (#2788) (#2792) 2025-04-24 14:24:17 -05:00
48068562d1 Support Apache Arrow 19 (#2782) (#2785) 2025-04-22 10:32:50 -05:00
7cdbae220f Migrate integration tests to built JS files (#2750) (#2780) 2025-04-22 09:16:18 -05:00
48dcef4975 Release notes fo 8.18.1 (#2763) (#2778)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2025-04-21 13:52:25 -05:00
b5a36f37ab Improve deserialization docs (#2766) (#2767)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2025-04-17 15:10:45 -05:00
a31920b785 Put node roles support back (#2759) (#2762)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2025-04-17 13:55:18 -05:00
846c50b8bf Bump transport to latest 8.x version (#2757) (#2758)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2025-04-17 13:54:04 -05:00
5204faeb66 Bump to 8.18.1 (#2760) (#2761)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2025-04-16 13:42:12 -05:00
34 changed files with 1680 additions and 2145 deletions

View File

@ -1,17 +1,20 @@
---
agents:
provider: "gcp"
image: family/core-ubuntu-2204
memory: "8G"
cpu: "2"
steps:
- label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }}) Test Suite: {{ matrix.suite }}"
agents:
provider: "gcp"
- label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }})"
env:
NODE_VERSION: "{{ matrix.nodejs }}"
TEST_SUITE: "{{ matrix.suite }}"
STACK_VERSION: 8.16.0
TEST_SUITE: "platinum"
STACK_VERSION: 9.0.0
GITHUB_TOKEN_PATH: "secret/ci/elastic-elasticsearch-js/github-token"
TEST_ES_STACK: "1"
matrix:
setup:
suite:
- "free"
- "platinum"
nodejs:
- "18"
- "20"
@ -21,9 +24,6 @@ steps:
- wait: ~
continue_on_failure: true
- label: ":junit: Test results"
agents:
provider: "gcp"
image: family/core-ubuntu-2204
plugins:
- junit-annotate#v2.4.1:
artifacts: "junit-output/junit-*.xml"

View File

@ -10,22 +10,29 @@ export NODE_VERSION=${NODE_VERSION:-18}
echo "--- :javascript: Building Docker image"
docker build \
--file "$script_path/Dockerfile" \
--tag elastic/elasticsearch-js \
--build-arg NODE_VERSION="$NODE_VERSION" \
.
--file "$script_path/Dockerfile" \
--tag elastic/elasticsearch-js \
--build-arg NODE_VERSION="$NODE_VERSION" \
.
echo "--- :javascript: Running $TEST_SUITE tests"
GITHUB_TOKEN=$(vault read -field=token "$GITHUB_TOKEN_PATH")
export GITHUB_TOKEN
echo "--- :javascript: Running tests"
mkdir -p "$repo/junit-output"
docker run \
--network="${network_name}" \
--env "TEST_ES_SERVER=${elasticsearch_url}" \
--env "ELASTIC_PASSWORD=${elastic_password}" \
--env "TEST_SUITE=${TEST_SUITE}" \
--env "ELASTIC_USER=elastic" \
--env "BUILDKITE=true" \
--volume "$repo/junit-output:/junit-output" \
--name elasticsearch-js \
--rm \
elastic/elasticsearch-js \
bash -c "npm run test:integration; [ -f ./$TEST_SUITE-report-junit.xml ] && mv ./$TEST_SUITE-report-junit.xml /junit-output/junit-$BUILDKITE_JOB_ID.xml || echo 'No JUnit artifact found'"
--network="${network_name}" \
--env TEST_ES_STACK \
--env STACK_VERSION \
--env GITHUB_TOKEN \
--env "TEST_ES_SERVER=${elasticsearch_url}" \
--env "ELASTIC_PASSWORD=${elastic_password}" \
--env "ELASTIC_USER=elastic" \
--env "BUILDKITE=true" \
--volume "/usr/src/app/node_modules" \
--volume "$repo:/usr/src/app" \
--volume "$repo/junit-output:/junit-output" \
--name elasticsearch-js \
--rm \
elastic/elasticsearch-js \
bash -c "npm run test:integration; [ -f ./report-junit.xml ] && mv ./report-junit.xml /junit-output/junit-$BUILDKITE_JOB_ID.xml || echo 'No JUnit artifact found'"

View File

@ -6,3 +6,6 @@ elasticsearch
lib
junit-output
.tap
rest-api-spec
yaml-rest-tests
generated-tests

4
.gitignore vendored
View File

@ -68,3 +68,7 @@ bun.lockb
test-results
processinfo
.tap
rest-api-spec
yaml-rest-tests
generated-tests
schema

View File

@ -74,3 +74,6 @@ CONTRIBUTING.md
src
bun.lockb
.tap
rest-api-spec
yaml-rest-tests
generated-tests

View File

@ -171,7 +171,18 @@ a|`function` - Takes a `Connection` and returns `true` if it can be sent a reque
_Default:_
[source,js]
----
() => true
function defaultNodeFilter (conn) {
if (conn.roles != null) {
if (
// avoid master-only nodes
conn.roles.master &&
!conn.roles.data &&
!conn.roles.ingest &&
!conn.roles.ml
) return false
}
return true
}
----
|`nodeSelector`

View File

@ -1,6 +1,29 @@
[[changelog-client]]
== Release notes
[discrete]
=== 8.18.2
[discrete]
==== Fixes
[discrete]
===== Ensure Apache Arrow ES|QL helper uses async iterator
The `esql.toArrowReader()` helper function was trying to return `RecordBatchStreamReader`, a synchronous iterator, despite the fact that the `apache-arrow` package was, in most cases, automatically coercing it to `AsyncRecordBatchStreamReader`, its asynchronous counterpart. It now is always returned as an async iterator.
[discrete]
=== 8.18.1
[discrete]
==== Fixes
[discrete]
===== Fix broken node roles and node filter
The docs note a `nodeFilter` option on the client that will, by default, filter the nodes based on any `roles` values that are set at instantition. At some point, this functionality was partially disabled. This brings the feature back, ensuring that it matches what the documentation has said it does all along.
[discrete]
=== 8.18.0
[discrete]

View File

@ -715,7 +715,7 @@ const result = await client.helpers
ES|QL can return results in multiple binary formats, including https://arrow.apache.org/[Apache Arrow]'s streaming format. Because it is a very efficient format to read, it can be valuable for performing high-performance in-memory analytics. And, because the response is streamed as batches of records, it can be used to produce aggregations and other calculations on larger-than-memory data sets.
`toArrowReader` returns a https://arrow.apache.org/docs/js/classes/Arrow_dom.RecordBatchReader.html[`RecordBatchStreamReader`].
`toArrowReader` returns a https://github.com/apache/arrow/blob/520ae44272d491bbb52eb3c9b84864ed7088f11a/js/src/ipc/reader.ts#L216[`AsyncRecordBatchStreamReader`].
[source,ts]
----
@ -724,7 +724,7 @@ const reader = await client.helpers
.toArrowReader()
// print each record as JSON
for (const recordBatch of reader) {
for await (const recordBatch of reader) {
for (const record of recordBatch) {
console.log(record.toJSON())
}

View File

@ -97,7 +97,7 @@ client.diagnostic.on('request', (err, result) => {
----
|`deserialization`
a|Emitted before starting deserialization and decompression. If you want to measure this phase duration, you should measure the time elapsed between this event and `response`. _(This event might not be emitted in certain situations)_.
a|Emitted before starting deserialization and decompression. If you want to measure this phase duration, you should measure the time elapsed between this event and `response`. This event might not be emitted in certain situations, like: when `asStream` is set to true; a response is terminated early due to content length being too large; or a response is terminated early by an `AbortController`.
[source,js]
----
client.diagnostic.on('deserialization', (err, result) => {

View File

@ -1316,7 +1316,7 @@ client.openPointInTime({ index, keep_alive })
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, it is random.
** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`.
** *`allow_partial_search_results` (Optional, boolean)*: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request.
** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node.
@ -1734,7 +1734,7 @@ client.search({ ... })
** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The search definition using the Query DSL.
** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases.
** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`.
** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule, rescorer, linear, pinned })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`.
** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Retrieve a script evaluation (based on different fields) for each hit.
** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page.
** *`size` (Optional, number)*: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property.
@ -1972,7 +1972,7 @@ client.searchShards({ ... })
* *Request (object):*
** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`.
** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only.
** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout.
@ -2000,7 +2000,7 @@ client.searchTemplate({ ... })
** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required.
** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.
** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`.
** *`ignore_throttled` (Optional, boolean)*: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default.
@ -2268,7 +2268,7 @@ client.updateByQuery({ index })
** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified.
** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified.
** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`.
** *`from` (Optional, number)*: Skips the specified number of documents.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified.
@ -3043,12 +3043,13 @@ client.cat.nodes({ ... })
** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values.
** *`full_id` (Optional, boolean | string)*: If `true`, return the full node ID. If `false`, return the shortened node ID.
** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory.
** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards.
** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted.
** *`h` (Optional, Enum("build" | "completion.size" | "cpu" | "disk.avail" | "disk.total" | "disk.used" | "disk.used_percent" | "fielddata.evictions" | "fielddata.memory_size" | "file_desc.current" | "file_desc.max" | "file_desc.percent" | "flush.total" | "flush.total_time" | "get.current" | "get.exists_time" | "get.exists_total" | "get.missing_time" | "get.missing_total" | "get.time" | "get.total" | "heap.current" | "heap.max" | "heap.percent" | "http_address" | "id" | "indexing.delete_current" | "indexing.delete_time" | "indexing.delete_total" | "indexing.index_current" | "indexing.index_failed" | "indexing.index_failed_due_to_version_conflict" | "indexing.index_time" | "indexing.index_total" | "ip" | "jdk" | "load_1m" | "load_5m" | "load_15m" | "mappings.total_count" | "mappings.total_estimated_overhead_in_bytes" | "master" | "merges.current" | "merges.current_docs" | "merges.current_size" | "merges.total" | "merges.total_docs" | "merges.total_size" | "merges.total_time" | "name" | "node.role" | "pid" | "port" | "query_cache.memory_size" | "query_cache.evictions" | "query_cache.hit_count" | "query_cache.miss_count" | "ram.current" | "ram.max" | "ram.percent" | "refresh.total" | "refresh.time" | "request_cache.memory_size" | "request_cache.evictions" | "request_cache.hit_count" | "request_cache.miss_count" | "script.compilations" | "script.cache_evictions" | "search.fetch_current" | "search.fetch_time" | "search.fetch_total" | "search.open_contexts" | "search.query_current" | "search.query_time" | "search.query_total" | "search.scroll_current" | "search.scroll_time" | "search.scroll_total" | "segments.count" | "segments.fixed_bitset_memory" | "segments.index_writer_memory" | "segments.memory" | "segments.version_map_memory" | "shard_stats.total_count" | "suggest.current" | "suggest.time" | "suggest.total" | "uptime" | "version") | Enum("build" | "completion.size" | "cpu" | "disk.avail" | "disk.total" | "disk.used" | "disk.used_percent" | "fielddata.evictions" | "fielddata.memory_size" | "file_desc.current" | "file_desc.max" | "file_desc.percent" | "flush.total" | "flush.total_time" | "get.current" | "get.exists_time" | "get.exists_total" | "get.missing_time" | "get.missing_total" | "get.time" | "get.total" | "heap.current" | "heap.max" | "heap.percent" | "http_address" | "id" | "indexing.delete_current" | "indexing.delete_time" | "indexing.delete_total" | "indexing.index_current" | "indexing.index_failed" | "indexing.index_failed_due_to_version_conflict" | "indexing.index_time" | "indexing.index_total" | "ip" | "jdk" | "load_1m" | "load_5m" | "load_15m" | "mappings.total_count" | "mappings.total_estimated_overhead_in_bytes" | "master" | "merges.current" | "merges.current_docs" | "merges.current_size" | "merges.total" | "merges.total_docs" | "merges.total_size" | "merges.total_time" | "name" | "node.role" | "pid" | "port" | "query_cache.memory_size" | "query_cache.evictions" | "query_cache.hit_count" | "query_cache.miss_count" | "ram.current" | "ram.max" | "ram.percent" | "refresh.total" | "refresh.time" | "request_cache.memory_size" | "request_cache.evictions" | "request_cache.hit_count" | "request_cache.miss_count" | "script.compilations" | "script.cache_evictions" | "search.fetch_current" | "search.fetch_time" | "search.fetch_total" | "search.open_contexts" | "search.query_current" | "search.query_time" | "search.query_total" | "search.scroll_current" | "search.scroll_time" | "search.scroll_total" | "segments.count" | "segments.fixed_bitset_memory" | "segments.index_writer_memory" | "segments.memory" | "segments.version_map_memory" | "shard_stats.total_count" | "suggest.current" | "suggest.time" | "suggest.total" | "uptime" | "version")[])*: A list of columns names to display.
It supports simple wildcards.
** *`s` (Optional, string | string[])*: A list of column names or aliases that determines the sort order.
Sorting defaults to ascending and can be changed by setting `:asc`
or `:desc` as a suffix to the column name.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values.
** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node.
** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values.
[discrete]
==== pending_tasks
@ -3782,7 +3783,7 @@ If no response is received before the timeout expires, the request fails and ret
Clear cluster voting config exclusions.
Remove master-eligible nodes from the voting configuration exclusion list.
{ref}/voting-config-exclusions.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-cluster-post-voting-config-exclusions[Endpoint documentation]
[source,ts]
----
client.cluster.deleteVotingConfigExclusions({ ... })
@ -3967,7 +3968,7 @@ In that case, you may safely retry the call.
NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period.
They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.
{ref}/voting-config-exclusions.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-cluster-post-voting-config-exclusions[Endpoint documentation]
[source,ts]
----
client.cluster.postVotingConfigExclusions({ ... })
@ -4070,8 +4071,8 @@ client.cluster.putSettings({ ... })
==== Arguments
* *Request (object):*
** *`persistent` (Optional, Record<string, User-defined value>)*
** *`transient` (Optional, Record<string, User-defined value>)*
** *`persistent` (Optional, Record<string, User-defined value>)*: The settings that persist after the cluster restarts.
** *`transient` (Optional, Record<string, User-defined value>)*: The settings that do not persist after the cluster restarts.
** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false)
** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node
** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout
@ -4087,7 +4088,7 @@ The API returns connection and endpoint information keyed by the configured remo
> This API returns information that reflects current state on the local cluster.
> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it.
> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster.
> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster).
> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the `/_resolve/cluster` endpoint.
{ref}/cluster-remote-info.html[Endpoint documentation]
[source,ts]
@ -5089,17 +5090,17 @@ count.
By default, the request waits for 1 second for the query results.
If the query completes during this period, results are returned
Otherwise, a query ID is returned that can later be used to retrieve the results.
** *`delimiter` (Optional, string)*: The character to use between values within a CSV row.
It is valid only for the CSV format.
** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results.
If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns.
** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, for example `json` or `yaml`.
** *`keep_alive` (Optional, string | -1 | 0)*: The period for which the query and its results are stored in the cluster.
The default period is five days.
When this period expires, the query and its results are deleted, even if the query is still ongoing.
If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value.
** *`keep_on_completion` (Optional, boolean)*: Indicates whether the query and its results are stored in the cluster.
If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter.
** *`delimiter` (Optional, string)*: The character to use between values within a CSV row.
It is valid only for the CSV format.
** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results.
If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns.
** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, for example `json` or `yaml`.
[discrete]
==== async_query_delete
@ -5147,6 +5148,7 @@ A query ID is provided in the ES|QL async query API response for a query that do
A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`.
** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results.
If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns.
** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, for example `json` or `yaml`.
** *`keep_alive` (Optional, string | -1 | 0)*: The period for which the query and its results are stored in the cluster.
When this period expires, the query and its results are deleted, even if the query is still ongoing.
** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for the request to finish.
@ -5301,9 +5303,10 @@ will cause Elasticsearch to immediately return the current global checkpoints.
[discrete]
==== msearch
Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) with a single API request.
The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) API. However, similar to the fleet search API, it
supports the wait_for_checkpoints parameter.
Executes several fleet searches with a single API request.
The API follows the same structure as the multi search (`_msearch`) API.
However, similar to the fleet search API, it supports the `wait_for_checkpoints` parameter.
{ref}/fleet-multi-search.html[Endpoint documentation]
[source,ts]
@ -5331,9 +5334,9 @@ client.fleet.msearch({ ... })
** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard
after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause
Elasticsearch to immediately execute the search.
** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns
an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`
which is true by default.
** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or shard failures.
If false, returns an error with no partial results.
Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default.
[discrete]
==== search
@ -5436,7 +5439,7 @@ the indices stats API.
** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard
after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause
Elasticsearch to immediately execute the search.
** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns
** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or shard failures. If false, returns
an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`
which is true by default.
@ -5758,7 +5761,7 @@ The `index.analyze.max_token_count` setting enables you to limit the number of t
If more than this limit of tokens gets generated, an error occurs.
The `_analyze` endpoint without a specified index will always use `10000` as its limit.
{ref}/indices-analyze.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-analyze[Endpoint documentation]
[source,ts]
----
client.indices.analyze({ ... })
@ -5774,12 +5777,12 @@ If no index is specified or the index does not have a default analyzer, the anal
** *`analyzer` (Optional, string)*: The name of the analyzer that should be applied to the provided `text`.
This could be a built-in analyzer, or an analyzer thats been configured in the index.
** *`attributes` (Optional, string[])*: Array of token attributes used to filter the output of the `explain` parameter.
** *`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])*: Array of character filters used to preprocess characters before the tokenizer.
** *`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name, unicode_set_filter } | { type, normalize_kana, normalize_kanji }[])*: Array of character filters used to preprocess characters before the tokenizer.
** *`explain` (Optional, boolean)*: If `true`, the response includes token attributes and additional details.
** *`field` (Optional, string)*: Field used to derive the analyzer.
To use this parameter, you must specify an index.
If specified, the `analyzer` parameter overrides this value.
** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer.
** *`filter` (Optional, string | { type } | { type } | { type, preserve_original } | { type, ignored_scripts, output_unigrams } | { type } | { type } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type } | { type } | { type } | { type, dedup, dictionary, locale, longest_only } | { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, bucket_count, hash_count, hash_set_size, with_rotation } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, pattern, replacement } | { type } | { type } | { type, script } | { type } | { type } | { type } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type } | { type } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, ignore_keywords } | { type } | { type, stopwords } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer.
** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token.
** *`text` (Optional, string | string[])*: Text to analyze.
If an array of strings is provided, it is analyzed as a multi-value field.
@ -5829,7 +5832,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`fielddata` (Optional, boolean)*: If `true`, clears the fields cache.
Use the `fields` parameter to clear the cache of specific fields only.
** *`fields` (Optional, string | string[])*: List of field names used to limit the `fielddata` parameter.
@ -5943,7 +5945,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
If no response is received before the timeout expires, the request fails and returns an error.
@ -6093,7 +6094,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
If no response is received before the timeout expires, the request fails and returns an error.
@ -6187,6 +6187,7 @@ client.indices.deleteIndexTemplate({ name })
[discrete]
==== delete_template
Delete a legacy index template.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
{ref}/indices-delete-template-v1.html[Endpoint documentation]
[source,ts]
@ -6286,7 +6287,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response.
@ -6316,7 +6316,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error.
** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only.
@ -6448,7 +6447,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`force` (Optional, boolean)*: If `true`, the request forces a flush even if there are no changes to commit to the index.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`wait_if_ongoing` (Optional, boolean)*: If `true`, the flush operation blocks until execution when another flush operation is running.
@ -6586,7 +6584,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only.
@ -6611,7 +6608,6 @@ Supports wildcards (`*`).
To target all data streams, omit this parameter or use `*` or `_all`.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
@ -6679,7 +6675,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response.
** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only.
@ -6728,7 +6723,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
@ -6789,7 +6783,7 @@ error.
[discrete]
==== get_template
Get index templates.
Get legacy index templates.
Get information about one or more index templates.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
@ -6921,7 +6915,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
If no response is received before the timeout expires, the request fails and returns an error.
@ -7022,7 +7015,6 @@ When empty, every document in this data stream will be stored indefinitely.
that's disabled (enabled: `false`) will have no effect on the data stream.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `hidden`, `open`, `closed`, `none`.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is
received before the timeout expires, the request fails and returns an
error.
@ -7171,7 +7163,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
If no response is received before the timeout expires, the request fails and returns an error.
@ -7186,9 +7177,45 @@ Changes dynamic index settings in real time.
For data streams, index setting changes are applied to all backing indices by default.
To revert a setting to the default value, use a null value.
The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.
The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.
To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.
There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:
----
{
"number_of_replicas": 1
}
----
Or you can use an `index` setting object:
----
{
"index": {
"number_of_replicas": 1
}
}
----
Or you can use dot annotation:
----
{
"index.number_of_replicas": 1
}
----
Or you can embed any of the aforementioned options in a `settings` object. For example:
----
{
"settings": {
"index": {
"number_of_replicas": 1
}
}
}
----
NOTE: You can only define new analyzers on closed indices.
To add an analyzer, you must close the index, define the analyzer, and reopen the index.
You cannot close the write index of a data stream.
@ -7235,7 +7262,7 @@ will be closed temporarily and then reopened in order to apply the changes.
[discrete]
==== put_template
Create or update an index template.
Create or update a legacy index template.
Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
Elasticsearch applies templates to new indices based on an index pattern that matches the index name.
@ -7363,7 +7390,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
[discrete]
@ -7471,7 +7497,6 @@ options to the `_resolve/cluster` API endpoint that takes no index expression.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index
options to the `_resolve/cluster` API endpoint that takes no index expression.
** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen.
@ -7510,7 +7535,6 @@ Resources on remote clusters can be specified using the `<cluster>`:`<name>` syn
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.
This behavior applies even if the request targets other open indices.
@ -7616,7 +7640,6 @@ This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`verbose` (Optional, boolean)*: If `true`, the request returns a verbose response.
@ -7955,7 +7978,6 @@ This parameter can only be used when the `q` query string parameter is specified
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`explain` (Optional, boolean)*: If `true`, the response returns detailed information if an error has occurred.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.
@ -7968,6 +7990,14 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
==== chat_completion_unified
Perform chat completion inference
The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
It only works with the `chat_completion` task type for `openai`, `elastic` and `googlevertexai` inference services.
NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming.
The Chat completion inference API and the Stream inference API differ in their response structure and capabilities.
The Chat completion inference API provides more comprehensive customization options through more fields and function calling support.
If you use the `openai` service or the `elastic` service, use the Chat completion inference API.
{ref}/chat-completion-inference-api.html[Endpoint documentation]
[source,ts]
----
@ -8077,16 +8107,29 @@ These settings are specific to the task type you specified and override the task
[discrete]
==== put
Create an inference endpoint.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.
However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
The following integrations are available through the inference API. You can find the available task types next to the integration name:
* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)
* Amazon Bedrock (`completion`, `text_embedding`)
* Anthropic (`completion`)
* Azure AI Studio (`completion`, `text_embedding`)
* Azure OpenAI (`completion`, `text_embedding`)
* Cohere (`completion`, `rerank`, `text_embedding`)
* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)
* ELSER (`sparse_embedding`)
* Google AI Studio (`completion`, `text_embedding`)
* Google Vertex AI (`rerank`, `text_embedding`)
* Hugging Face (`text_embedding`)
* Mistral (`text_embedding`)
* OpenAI (`chat_completion`, `completion`, `text_embedding`)
* VoyageAI (`text_embedding`, `rerank`)
* Watsonx inference integration (`text_embedding`)
* JinaAI (`text_embedding`, `rerank`)
{ref}/put-inference-api.html[Endpoint documentation]
[source,ts]
----
@ -8098,7 +8141,7 @@ client.inference.put({ inference_id })
* *Request (object):*
** *`inference_id` (string)*: The inference Id
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The task type
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The task type. Refer to the integration list in the API description for the available task types.
** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })*
[discrete]
@ -8107,12 +8150,6 @@ Create an AlibabaCloud AI Search inference endpoint.
Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-alibabacloud-ai-search.html[Endpoint documentation]
[source,ts]
----
@ -8135,17 +8172,11 @@ These settings are specific to the task type you specified.
==== put_amazonbedrock
Create an Amazon Bedrock inference endpoint.
Creates an inference endpoint to perform an inference task with the `amazonbedrock` service.
Create an inference endpoint to perform an inference task with the `amazonbedrock` service.
>info
> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-amazon-bedrock.html[Endpoint documentation]
[source,ts]
----
@ -8170,12 +8201,6 @@ Create an Anthropic inference endpoint.
Create an inference endpoint to perform an inference task with the `anthropic` service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-anthropic.html[Endpoint documentation]
[source,ts]
----
@ -8201,12 +8226,6 @@ Create an Azure AI studio inference endpoint.
Create an inference endpoint to perform an inference task with the `azureaistudio` service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-azure-ai-studio.html[Endpoint documentation]
[source,ts]
----
@ -8238,12 +8257,6 @@ The list of chat completion models that you can choose from in your Azure OpenAI
The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-azure-openai.html[Endpoint documentation]
[source,ts]
----
@ -8269,12 +8282,6 @@ Create a Cohere inference endpoint.
Create an inference endpoint to perform an inference task with the `cohere` service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-cohere.html[Endpoint documentation]
[source,ts]
----
@ -8374,12 +8381,6 @@ Create an Google AI Studio inference endpoint.
Create an inference endpoint to perform an inference task with the `googleaistudio` service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-google-ai-studio.html[Endpoint documentation]
[source,ts]
----
@ -8402,12 +8403,6 @@ Create a Google Vertex AI inference endpoint.
Create an inference endpoint to perform an inference task with the `googlevertexai` service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-google-vertex-ai.html[Endpoint documentation]
[source,ts]
----
@ -8446,12 +8441,6 @@ The following models are recommended for the Hugging Face service:
* `multilingual-e5-base`
* `multilingual-e5-small`
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-hugging-face.html[Endpoint documentation]
[source,ts]
----
@ -8477,12 +8466,6 @@ Create an inference endpoint to perform an inference task with the `jinaai` serv
To review the available `rerank` models, refer to <https://jina.ai/reranker>.
To review the available `text_embedding` models, refer to the <https://jina.ai/embeddings/>.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-jinaai.html[Endpoint documentation]
[source,ts]
----
@ -8506,12 +8489,6 @@ These settings are specific to the task type you specified.
Create a Mistral inference endpoint.
Creates an inference endpoint to perform an inference task with the `mistral` service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
[source,ts]
----
client.inference.putMistral({ task_type, mistral_inference_id, service, service_settings })
@ -8534,12 +8511,6 @@ Create an OpenAI inference endpoint.
Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-openai.html[Endpoint documentation]
[source,ts]
----
@ -8593,12 +8564,6 @@ Create an inference endpoint to perform an inference task with the `watsonxai` s
You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service.
You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-watsonx-ai.html[Endpoint documentation]
[source,ts]
----
@ -9069,7 +9034,7 @@ You must then re-submit the API request with the acknowledge parameter set to tr
NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license.
If the operator privileges feature is enabled, only operator users can use this API.
{ref}/update-license.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-license-post[Endpoint documentation]
[source,ts]
----
client.license.post({ ... })
@ -9852,7 +9817,7 @@ be retrieved and then added to another cluster.
[discrete]
==== get_data_frame_analytics_stats
Get data frame analytics jobs usage info.
Get data frame analytics job stats.
{ref}/get-dfanalytics-stats.html[Endpoint documentation]
[source,ts]
@ -9884,7 +9849,7 @@ there are no matches or only partial matches.
[discrete]
==== get_datafeed_stats
Get datafeeds usage info.
Get datafeed stats.
You can get statistics for multiple datafeeds in a single API request by
using a list of datafeeds or a wildcard expression. You can
get statistics for all datafeeds by using `_all`, by specifying `*` as the
@ -10008,7 +9973,7 @@ means it is unset and results are not limited to specific timestamps.
[discrete]
==== get_job_stats
Get anomaly detection jobs usage info.
Get anomaly detection job stats.
{ref}/ml-get-job-stats.html[Endpoint documentation]
[source,ts]
@ -10697,13 +10662,7 @@ client.ml.putJob({ job_id, analysis_config, data_description })
** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the
`_all` string or when no indices are specified.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines
whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are:
* `all`: Match any data stream or index, including hidden ones.
* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.
* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.
* `none`: Wildcard patterns are not accepted.
* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.
whether wildcard expressions match hidden data streams. Supports a list of values.
** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen.
** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored.
@ -11103,7 +11062,7 @@ restart the model deployment.
==== update_data_frame_analytics
Update a data frame analytics job.
{ref}/update-dfanalytics.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ml-update-data-frame-analytics[Endpoint documentation]
[source,ts]
----
client.ml.updateDataFrameAnalytics({ id })
@ -11192,13 +11151,7 @@ The maximum value is the value of `index.max_result_window`.
** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the
`_all` string or when no indices are specified.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines
whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are:
* `all`: Match any data stream or index, including hidden ones.
* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.
* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.
* `none`: Wildcard patterns are not accepted.
* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.
whether wildcard expressions match hidden data streams. Supports a list of values.
** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen.
** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored.
@ -11314,7 +11267,7 @@ snapshot will be deleted when the job is deleted.
==== update_trained_model_deployment
Update a trained model deployment.
{ref}/update-trained-model-deployment.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ml-update-trained-model-deployment[Endpoint documentation]
[source,ts]
----
client.ml.updateTrainedModelDeployment({ model_id })
@ -11403,7 +11356,7 @@ client.nodes.getRepositoriesMeteringInfo({ node_id })
* *Request (object):*
** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information.
All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes).
For more information about the nodes selective options, refer to the node specification documentation.
[discrete]
==== hot_threads
@ -16060,7 +16013,7 @@ privileges for the source indices. You must also have `index` and `read` privile
Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the
time of update and runs with those privileges.
{ref}/update-transform.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-transform-update-transform[Endpoint documentation]
[source,ts]
----
client.transform.updateTransform({ transform_id })
@ -16107,7 +16060,7 @@ A summary is returned when the upgrade is finished.
To ensure continuous transforms remain running during a major version upgrade of the cluster for example, from 7.16 to 8.0 it is recommended to upgrade transforms before upgrading the cluster.
You may want to perform a recent cluster backup prior to the upgrade.
{ref}/upgrade-transforms.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-transform-upgrade-transforms[Endpoint documentation]
[source,ts]
----
client.transform.upgradeTransforms({ ... })
@ -16136,7 +16089,7 @@ The reason for this behavior is to prevent overwriting the watch status from a w
Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`.
This happens when the condition of the watch is not met (the condition evaluates to false).
{ref}/watcher-api-ack-watch.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-ack-watch[Endpoint documentation]
[source,ts]
----
client.watcher.ackWatch({ watch_id })
@ -16155,7 +16108,7 @@ If you omit this parameter, all of the actions of the watch are acknowledged.
Activate a watch.
A watch can be either active or inactive.
{ref}/watcher-api-activate-watch.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-activate-watch[Endpoint documentation]
[source,ts]
----
client.watcher.activateWatch({ watch_id })
@ -16172,7 +16125,7 @@ client.watcher.activateWatch({ watch_id })
Deactivate a watch.
A watch can be either active or inactive.
{ref}/watcher-api-deactivate-watch.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-deactivate-watch[Endpoint documentation]
[source,ts]
----
client.watcher.deactivateWatch({ watch_id })
@ -16195,7 +16148,7 @@ IMPORTANT: Deleting a watch must be done by using only this API.
Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API
When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index.
{ref}/watcher-api-delete-watch.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-delete-watch[Endpoint documentation]
[source,ts]
----
client.watcher.deleteWatch({ id })
@ -16224,7 +16177,7 @@ If your user is allowed to read index `a`, but not index `b`, then the exact sam
When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.
{ref}/watcher-api-execute-watch.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-execute-watch[Endpoint documentation]
[source,ts]
----
client.watcher.executeWatch({ ... })
@ -16253,7 +16206,7 @@ Get Watcher index settings.
Get settings for the Watcher internal index (`.watches`).
Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`.
{ref}/watcher-api-get-settings.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-get-settings[Endpoint documentation]
[source,ts]
----
client.watcher.getSettings({ ... })
@ -16270,7 +16223,7 @@ If no response is received before the timeout expires, the request fails and ret
==== get_watch
Get a watch.
{ref}/watcher-api-get-watch.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-get-watch[Endpoint documentation]
[source,ts]
----
client.watcher.getWatch({ id })
@ -16297,7 +16250,7 @@ When you add a watch you can also define its initial active state by setting the
When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges.
If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs.
{ref}/watcher-api-put-watch.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-put-watch[Endpoint documentation]
[source,ts]
----
client.watcher.putWatch({ id })
@ -16332,7 +16285,7 @@ Get all registered watches in a paginated manner and optionally filter watches b
Note that only the `_id` and `metadata.*` fields are queryable or sortable.
{ref}/watcher-api-query-watches.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-query-watches[Endpoint documentation]
[source,ts]
----
client.watcher.queryWatches({ ... })
@ -16355,7 +16308,7 @@ It must be non-negative.
Start the watch service.
Start the Watcher service if it is not already running.
{ref}/watcher-api-start.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-start[Endpoint documentation]
[source,ts]
----
client.watcher.start({ ... })
@ -16373,7 +16326,7 @@ Get Watcher statistics.
This API always returns basic metrics.
You retrieve more metrics by using the metric parameter.
{ref}/watcher-api-stats.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-stats[Endpoint documentation]
[source,ts]
----
client.watcher.stats({ ... })
@ -16391,7 +16344,7 @@ client.watcher.stats({ ... })
Stop the watch service.
Stop the Watcher service if it is running.
{ref}/watcher-api-stop.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-stop[Endpoint documentation]
[source,ts]
----
client.watcher.stop({ ... })
@ -16415,7 +16368,7 @@ This includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.r
Modification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the
Watcher shards must always be in the `data_content` tier.
{ref}/watcher-api-update-settings.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-update-settings[Endpoint documentation]
[source,ts]
----
client.watcher.updateSettings({ ... })
@ -16465,7 +16418,7 @@ Get usage information.
Get information about the features that are currently enabled and available under the current license.
The API also provides some usage statistics.
{ref}/usage-api.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/group/endpoint-xpack[Endpoint documentation]
[source,ts]
----
client.xpack.usage({ ... })

1
index.d.ts vendored
View File

@ -25,3 +25,4 @@ export * as estypes from './lib/api/types'
export * as estypesWithBody from './lib/api/typesWithBodyKey'
export { Client, SniffingTransport }
export type { ClientOptions, NodeOptions } from './lib/client'
export * as helpers from './lib/helpers'

View File

@ -1,7 +1,7 @@
{
"name": "@elastic/elasticsearch",
"version": "8.18.0",
"versionCanary": "8.18.0-canary.0",
"version": "8.18.2",
"versionCanary": "8.18.2-canary.0",
"description": "The official Elasticsearch client for Node.js",
"main": "./index.js",
"types": "index.d.ts",
@ -18,7 +18,8 @@
"test:coverage-100": "npm run build && tap --coverage --100",
"test:coverage-report": "npm run build && tap --coverage && nyc report --reporter=text-lcov > coverage.lcov",
"test:coverage-ui": "npm run build && tap --coverage --coverage-report=html",
"test:integration": "tsc && node test/integration/index.js",
"test:integration-build": "npm run build && node test/integration/index.js",
"test:integration": "npm run test:integration-build && env tap run --jobs=1 --reporter=junit --reporter-file=report-junit.xml generated-tests/",
"lint": "ts-standard src",
"lint:fix": "ts-standard --fix src",
"license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause;0BSD'",
@ -76,8 +77,8 @@
"node-fetch": "2.7.0",
"ora": "5.4.1",
"proxy": "1.0.2",
"rimraf": "3.0.2",
"semver": "7.6.3",
"rimraf": "5.0.10",
"semver": "7.7.1",
"split2": "4.2.0",
"stoppable": "1.1.0",
"tap": "21.0.1",
@ -89,8 +90,8 @@
"zx": "7.2.3"
},
"dependencies": {
"@elastic/transport": "^8.9.1",
"apache-arrow": "^18.0.0",
"@elastic/transport": "^8.9.6",
"apache-arrow": "18.x - 19.x",
"tslib": "^2.4.0"
},
"tap": {

View File

@ -17,162 +17,102 @@
* under the License.
*/
'use strict'
const { join } = require('path')
const minimist = require('minimist')
const stream = require('stream')
const { promisify } = require('util')
const { createWriteStream, promises } = require('fs')
const rimraf = require('rimraf')
const { rimraf } = require('rimraf')
const fetch = require('node-fetch')
const crossZip = require('cross-zip')
const ora = require('ora')
const { mkdir, writeFile } = promises
const { mkdir, cp } = promises
const pipeline = promisify(stream.pipeline)
const unzip = promisify(crossZip.unzip)
const rm = promisify(rimraf)
const esFolder = join(__dirname, '..', 'elasticsearch')
const zipFolder = join(esFolder, 'artifacts.zip')
const specFolder = join(esFolder, 'rest-api-spec', 'api')
const freeTestFolder = join(esFolder, 'rest-api-spec', 'test', 'free')
const xPackTestFolder = join(esFolder, 'rest-api-spec', 'test', 'platinum')
const artifactInfo = join(esFolder, 'info.json')
const testYamlFolder = join(__dirname, '..', 'yaml-rest-tests')
const zipFile = join(__dirname, '..', 'elasticsearch-clients-tests.zip')
async function downloadArtifacts (opts) {
if (typeof opts.version !== 'string') {
throw new Error('Missing version')
}
const schemaFolder = join(__dirname, '..', 'schema')
const schemaJson = join(schemaFolder, 'schema.json')
async function downloadArtifacts (localTests, version = 'main') {
const log = ora('Checking out spec and test').start()
log.text = 'Resolving versions'
let resolved
try {
resolved = await resolve(opts.version, opts.hash)
} catch (err) {
log.fail(err.message)
process.exit(1)
const { GITHUB_TOKEN } = process.env
if (version !== 'main') {
version = version.split('.').slice(0, 2).join('.')
}
opts.id = opts.id || resolved.id
opts.hash = opts.hash || resolved.hash
opts.version = resolved.version
log.text = 'Clean tests folder'
await rimraf(testYamlFolder)
await mkdir(testYamlFolder, { recursive: true })
const info = loadInfo()
log.text = `Fetch test YAML files for version ${version}`
if (info && info.version === opts.version) {
if (info.hash === opts.hash && info.id === opts.id) {
log.succeed('The artifact copy present locally is already up to date')
return
if (localTests) {
log.text = `Copying local tests from ${localTests}`
await cp(localTests, testYamlFolder, { recursive: true })
} else {
if (!GITHUB_TOKEN) {
log.fail("Missing required environment variable 'GITHUB_TOKEN'")
process.exit(1)
}
const response = await fetch(`https://api.github.com/repos/elastic/elasticsearch-clients-tests/zipball/${version}`, {
headers: {
Authorization: `Bearer ${GITHUB_TOKEN}`,
Accept: 'application/vnd.github+json'
}
})
if (!response.ok) {
log.fail(`unexpected response ${response.statusText}`)
process.exit(1)
}
log.text = 'Downloading tests zipball'
await pipeline(response.body, createWriteStream(zipFile))
log.text = 'Unzipping tests'
await unzip(zipFile, testYamlFolder)
log.text = 'Cleanup'
await rimraf(zipFile)
}
log.text = 'Cleanup checkouts/elasticsearch'
await rm(esFolder)
await mkdir(esFolder, { recursive: true })
log.text = 'Fetching Elasticsearch specification'
await rimraf(schemaFolder)
await mkdir(schemaFolder, { recursive: true })
log.text = 'Downloading artifacts'
const response = await fetch(resolved.url)
const response = await fetch(`https://raw.githubusercontent.com/elastic/elasticsearch-specification/${version}/output/schema/schema.json`)
if (!response.ok) {
log.fail(`unexpected response ${response.statusText}`)
process.exit(1)
}
await pipeline(response.body, createWriteStream(zipFolder))
log.text = 'Unzipping'
await unzip(zipFolder, esFolder)
log.text = 'Cleanup'
await rm(zipFolder)
log.text = 'Update info'
await writeFile(artifactInfo, JSON.stringify(opts), 'utf8')
log.text = 'Downloading schema.json'
await pipeline(response.body, createWriteStream(schemaJson))
log.succeed('Done')
}
function loadInfo () {
try {
return require(artifactInfo)
} catch (err) {
return null
}
async function main () {
await downloadArtifacts()
}
async function resolve (version, hash) {
const response = await fetch(`https://artifacts-api.elastic.co/v1/versions/${version}`)
if (!response.ok) {
throw new Error(`unexpected response ${response.statusText}`)
}
const data = await response.json()
const esBuilds = data.version.builds
.filter(build => build.projects.elasticsearch != null)
.map(build => {
return {
projects: build.projects.elasticsearch,
buildId: build.build_id,
date: build.start_time,
version: build.version
}
})
.sort((a, b) => {
const dA = new Date(a.date)
const dB = new Date(b.date)
if (dA > dB) return -1
if (dA < dB) return 1
return 0
})
if (hash != null) {
const build = esBuilds.find(build => build.projects.commit_hash === hash)
if (!build) {
throw new Error(`Can't find any build with hash '${hash}'`)
}
const zipKey = Object.keys(build.projects.packages).find(key => key.startsWith('rest-resources-zip-') && key.endsWith('.zip'))
return {
url: build.projects.packages[zipKey].url,
id: build.buildId,
hash: build.projects.commit_hash,
version: build.version
}
}
const lastBuild = esBuilds[0]
const zipKey = Object.keys(lastBuild.projects.packages).find(key => key.startsWith('rest-resources-zip-') && key.endsWith('.zip'))
return {
url: lastBuild.projects.packages[zipKey].url,
id: lastBuild.buildId,
hash: lastBuild.projects.commit_hash,
version: lastBuild.version
}
}
async function main (options) {
delete options._
await downloadArtifacts(options)
}
if (require.main === module) {
process.on('unhandledRejection', function (err) {
console.error(err)
process.exit(1)
})
const options = minimist(process.argv.slice(2), {
string: ['id', 'version', 'hash']
})
main(options).catch(t => {
main().catch(t => {
console.log(t)
process.exit(2)
})
}
module.exports = downloadArtifacts
module.exports.locations = {
specFolder,
freeTestFolder,
xPackTestFolder
}
module.exports.locations = { testYamlFolder, zipFile, schemaJson }

View File

@ -20,7 +20,7 @@
const { join } = require('path')
const { writeFile } = require('fs/promises')
const fetch = require('node-fetch')
const rimraf = require('rimraf')
const { rimraf } = require('rimraf')
const ora = require('ora')
const { convertRequests } = require('@elastic/request-converter')
const minimist = require('minimist')

View File

@ -120,7 +120,7 @@ export default class Cluster {
/**
* Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/voting-config-exclusions.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation}
*/
async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterDeleteVotingConfigExclusionsResponse>
async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.ClusterDeleteVotingConfigExclusionsResponse, unknown>>
@ -354,7 +354,7 @@ export default class Cluster {
/**
* Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the clusters voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/voting-config-exclusions.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation}
*/
async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterPostVotingConfigExclusionsResponse>
async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.ClusterPostVotingConfigExclusionsResponse, unknown>>
@ -469,7 +469,7 @@ export default class Cluster {
}
/**
* Get remote cluster information. Get information about configured remote clusters. The API returns connection and endpoint information keyed by the configured remote cluster alias. > info > This API returns information that reflects current state on the local cluster. > The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. > Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. > To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster).
* Get remote cluster information. Get information about configured remote clusters. The API returns connection and endpoint information keyed by the configured remote cluster alias. > info > This API returns information that reflects current state on the local cluster. > The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. > Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. > To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the `/_resolve/cluster` endpoint.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/cluster-remote-info.html | Elasticsearch API documentation}
*/
async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterRemoteInfoResponse>

View File

@ -53,7 +53,7 @@ export default class Esql {
async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest | TB.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise<T.EsqlAsyncQueryResponse>
async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest | TB.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = []
const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata', 'wait_for_completion_timeout']
const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata', 'wait_for_completion_timeout', 'keep_alive', 'keep_on_completion']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body

View File

@ -139,7 +139,7 @@ export default class Fleet {
}
/**
* Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) API. However, similar to the fleet search API, it supports the wait_for_checkpoints parameter.
* Executes several fleet searches with a single API request. The API follows the same structure as the multi search (`_msearch`) API. However, similar to the fleet search API, it supports the `wait_for_checkpoints` parameter.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/fleet-multi-search.html | Elasticsearch API documentation}
*/
async msearch<TDocument = unknown> (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.FleetMsearchResponse<TDocument>>

View File

@ -79,7 +79,7 @@ export default class Indices {
/**
* Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens. Generating excessive amount of tokens may cause a node to run out of memory. The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/indices-analyze.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-analyze | Elasticsearch API documentation}
*/
async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesAnalyzeResponse>
async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesAnalyzeResponse, unknown>>
@ -601,7 +601,7 @@ export default class Indices {
}
/**
* Delete a legacy index template.
* Delete a legacy index template. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/indices-delete-template-v1.html | Elasticsearch API documentation}
*/
async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesDeleteTemplateResponse>
@ -1363,7 +1363,7 @@ export default class Indices {
}
/**
* Get index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
* Get legacy index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/indices-get-template-v1.html | Elasticsearch API documentation}
*/
async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesGetTemplateResponse>
@ -1758,7 +1758,7 @@ export default class Indices {
}
/**
* Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.
* Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { "number_of_replicas": 1 } ``` Or you can use an `index` setting object: ``` { "index": { "number_of_replicas": 1 } } ``` Or you can use dot annotation: ``` { "index.number_of_replicas": 1 } ``` Or you can embed any of the aforementioned options in a `settings` object. For example: ``` { "settings": { "index": { "number_of_replicas": 1 } } } ``` NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/indices-update-settings.html | Elasticsearch API documentation}
*/
async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPutSettingsResponse>
@ -1802,7 +1802,7 @@ export default class Indices {
}
/**
* Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Indices matching multiple templates** Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.
* Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Indices matching multiple templates** Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/indices-templates-v1.html | Elasticsearch API documentation}
*/
async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPutTemplateResponse>

View File

@ -45,7 +45,7 @@ export default class Inference {
}
/**
* Perform chat completion inference
* Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai`, `elastic` and `googlevertexai` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/chat-completion-inference-api.html | Elasticsearch API documentation}
*/
async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceChatCompletionUnifiedResponse>
@ -262,7 +262,7 @@ export default class Inference {
}
/**
* Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
* Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`text_embedding`) * Mistral (`text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`)
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/put-inference-api.html | Elasticsearch API documentation}
*/
async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutResponse>
@ -307,7 +307,7 @@ export default class Inference {
}
/**
* Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-alibabacloud-ai-search.html | Elasticsearch API documentation}
*/
async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest | TB.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAlibabacloudResponse>
@ -352,7 +352,7 @@ export default class Inference {
}
/**
* Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create an Amazon Bedrock inference endpoint. Create an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-amazon-bedrock.html | Elasticsearch API documentation}
*/
async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest | TB.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAmazonbedrockResponse>
@ -397,7 +397,7 @@ export default class Inference {
}
/**
* Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-anthropic.html | Elasticsearch API documentation}
*/
async putAnthropic (this: That, params: T.InferencePutAnthropicRequest | TB.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAnthropicResponse>
@ -442,7 +442,7 @@ export default class Inference {
}
/**
* Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-azure-ai-studio.html | Elasticsearch API documentation}
*/
async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest | TB.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAzureaistudioResponse>
@ -487,7 +487,7 @@ export default class Inference {
}
/**
* Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-azure-openai.html | Elasticsearch API documentation}
*/
async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest | TB.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAzureopenaiResponse>
@ -532,7 +532,7 @@ export default class Inference {
}
/**
* Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-cohere.html | Elasticsearch API documentation}
*/
async putCohere (this: That, params: T.InferencePutCohereRequest | TB.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutCohereResponse>
@ -667,7 +667,7 @@ export default class Inference {
}
/**
* Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-google-ai-studio.html | Elasticsearch API documentation}
*/
async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest | TB.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutGoogleaistudioResponse>
@ -712,7 +712,7 @@ export default class Inference {
}
/**
* Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-google-vertex-ai.html | Elasticsearch API documentation}
*/
async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest | TB.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutGooglevertexaiResponse>
@ -757,7 +757,7 @@ export default class Inference {
}
/**
* Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small`
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-hugging-face.html | Elasticsearch API documentation}
*/
async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest | TB.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutHuggingFaceResponse>
@ -802,7 +802,7 @@ export default class Inference {
}
/**
* Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to <https://jina.ai/reranker>. To review the available `text_embedding` models, refer to the <https://jina.ai/embeddings/>. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to <https://jina.ai/reranker>. To review the available `text_embedding` models, refer to the <https://jina.ai/embeddings/>.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-jinaai.html | Elasticsearch API documentation}
*/
async putJinaai (this: That, params: T.InferencePutJinaaiRequest | TB.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutJinaaiResponse>
@ -847,7 +847,7 @@ export default class Inference {
}
/**
* Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/{brnach}/infer-service-mistral.html | Elasticsearch API documentation}
*/
async putMistral (this: That, params: T.InferencePutMistralRequest | TB.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutMistralResponse>
@ -892,7 +892,7 @@ export default class Inference {
}
/**
* Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-openai.html | Elasticsearch API documentation}
*/
async putOpenai (this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutOpenaiResponse>
@ -982,7 +982,7 @@ export default class Inference {
}
/**
* Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-watsonx-ai.html | Elasticsearch API documentation}
*/
async putWatsonx (this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutWatsonxResponse>

View File

@ -166,7 +166,7 @@ export default class License {
/**
* Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/update-license.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-license-post | Elasticsearch API documentation}
*/
async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.LicensePostResponse>
async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.LicensePostResponse, unknown>>

View File

@ -988,7 +988,7 @@ export default class Ml {
}
/**
* Get data frame analytics jobs usage info.
* Get data frame analytics job stats.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/get-dfanalytics-stats.html | Elasticsearch API documentation}
*/
async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetDataFrameAnalyticsStatsResponse>
@ -1028,7 +1028,7 @@ export default class Ml {
}
/**
* Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds.
* Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/ml-get-datafeed-stats.html | Elasticsearch API documentation}
*/
async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetDatafeedStatsResponse>
@ -1192,7 +1192,7 @@ export default class Ml {
}
/**
* Get anomaly detection jobs usage info.
* Get anomaly detection job stats.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/ml-get-job-stats.html | Elasticsearch API documentation}
*/
async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetJobStatsResponse>
@ -2623,7 +2623,7 @@ export default class Ml {
/**
* Update a data frame analytics job.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/update-dfanalytics.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ml-update-data-frame-analytics | Elasticsearch API documentation}
*/
async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlUpdateDataFrameAnalyticsResponse>
async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.MlUpdateDataFrameAnalyticsResponse, unknown>>
@ -2844,7 +2844,7 @@ export default class Ml {
/**
* Update a trained model deployment.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/update-trained-model-deployment.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ml-update-trained-model-deployment | Elasticsearch API documentation}
*/
async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlUpdateTrainedModelDeploymentResponse>
async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.MlUpdateTrainedModelDeploymentResponse, unknown>>

View File

@ -403,7 +403,7 @@ export default class Transform {
/**
* Update a transform. Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/update-transform.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-transform-update-transform | Elasticsearch API documentation}
*/
async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.TransformUpdateTransformResponse>
async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TransformUpdateTransformResponse, unknown>>
@ -447,7 +447,7 @@ export default class Transform {
/**
* Upgrade all transforms. Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished. To ensure continuous transforms remain running during a major version upgrade of the cluster for example, from 7.16 to 8.0 it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/upgrade-transforms.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-transform-upgrade-transforms | Elasticsearch API documentation}
*/
async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.TransformUpgradeTransformsResponse>
async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TransformUpgradeTransformsResponse, unknown>>

View File

@ -46,7 +46,7 @@ export default class Watcher {
/**
* Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions.<id>.ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false).
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-ack-watch.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-ack-watch | Elasticsearch API documentation}
*/
async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherAckWatchResponse>
async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherAckWatchResponse, unknown>>
@ -86,7 +86,7 @@ export default class Watcher {
/**
* Activate a watch. A watch can be either active or inactive.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-activate-watch.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-activate-watch | Elasticsearch API documentation}
*/
async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherActivateWatchResponse>
async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherActivateWatchResponse, unknown>>
@ -118,7 +118,7 @@ export default class Watcher {
/**
* Deactivate a watch. A watch can be either active or inactive.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-deactivate-watch.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-deactivate-watch | Elasticsearch API documentation}
*/
async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherDeactivateWatchResponse>
async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherDeactivateWatchResponse, unknown>>
@ -150,7 +150,7 @@ export default class Watcher {
/**
* Delete a watch. When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. Deleting a watch does not delete any watch execution records related to this watch from the watch history. IMPORTANT: Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-delete-watch.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-delete-watch | Elasticsearch API documentation}
*/
async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherDeleteWatchResponse>
async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherDeleteWatchResponse, unknown>>
@ -182,7 +182,7 @@ export default class Watcher {
/**
* Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-execute-watch.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-execute-watch | Elasticsearch API documentation}
*/
async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherExecuteWatchResponse>
async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherExecuteWatchResponse, unknown>>
@ -234,7 +234,7 @@ export default class Watcher {
/**
* Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-get-settings.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-get-settings | Elasticsearch API documentation}
*/
async getSettings (this: That, params?: T.WatcherGetSettingsRequest | TB.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherGetSettingsResponse>
async getSettings (this: That, params?: T.WatcherGetSettingsRequest | TB.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherGetSettingsResponse, unknown>>
@ -264,7 +264,7 @@ export default class Watcher {
/**
* Get a watch.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-get-watch.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-get-watch | Elasticsearch API documentation}
*/
async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherGetWatchResponse>
async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherGetWatchResponse, unknown>>
@ -296,7 +296,7 @@ export default class Watcher {
/**
* Create or update a watch. When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. IMPORTANT: You must use Kibana or this API to create a watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. When you add a watch you can also define its initial active state by setting the *active* parameter. When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-put-watch.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-put-watch | Elasticsearch API documentation}
*/
async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherPutWatchResponse>
async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherPutWatchResponse, unknown>>
@ -340,7 +340,7 @@ export default class Watcher {
/**
* Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-query-watches.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-query-watches | Elasticsearch API documentation}
*/
async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherQueryWatchesResponse>
async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherQueryWatchesResponse, unknown>>
@ -382,7 +382,7 @@ export default class Watcher {
/**
* Start the watch service. Start the Watcher service if it is not already running.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-start.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-start | Elasticsearch API documentation}
*/
async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherStartResponse>
async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherStartResponse, unknown>>
@ -412,7 +412,7 @@ export default class Watcher {
/**
* Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-stats.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-stats | Elasticsearch API documentation}
*/
async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherStatsResponse>
async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherStatsResponse, unknown>>
@ -452,7 +452,7 @@ export default class Watcher {
/**
* Stop the watch service. Stop the Watcher service if it is running.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-stop.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-stop | Elasticsearch API documentation}
*/
async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherStopResponse>
async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherStopResponse, unknown>>
@ -482,7 +482,7 @@ export default class Watcher {
/**
* Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`, `index.routing.allocation.include.*` and `index.routing.allocation.require.*`. Modification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the Watcher shards must always be in the `data_content` tier.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/watcher-api-update-settings.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-update-settings | Elasticsearch API documentation}
*/
async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest | TB.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherUpdateSettingsResponse>
async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest | TB.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.WatcherUpdateSettingsResponse, unknown>>

View File

@ -76,7 +76,7 @@ export default class Xpack {
/**
* Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/usage-api.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/group/endpoint-xpack | Elasticsearch API documentation}
*/
async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.XpackUsageResponse>
async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.XpackUsageResponse, unknown>>

View File

@ -1528,7 +1528,7 @@ export interface SearchInnerHits {
ignore_unmapped?: boolean
script_fields?: Record<Field, ScriptField>
seq_no_primary_term?: boolean
fields?: Fields
fields?: Field[]
sort?: Sort
_source?: SearchSourceConfig
stored_fields?: Fields
@ -1720,7 +1720,7 @@ export interface SearchShardProfile {
index: IndexName
node_id: NodeId
searches: SearchSearchProfile[]
shard_id: long
shard_id: integer
}
export interface SearchSmoothingModelContainer {
@ -2243,7 +2243,7 @@ export type EpochTime<Unit = unknown> = Unit
export interface ErrorCauseKeys {
type: string
reason?: string
reason?: string | null
stack_trace?: string
caused_by?: ErrorCause
root_cause?: ErrorCause[]
@ -2426,6 +2426,12 @@ export interface InlineGetKeys<TDocument = unknown> {
export type InlineGet<TDocument = unknown> = InlineGetKeys<TDocument>
& { [property: string]: any }
export interface InnerRetriever {
retriever: RetrieverContainer
weight: float
normalizer: ScoreNormalizer
}
export type Ip = string
export interface KnnQuery extends QueryDslQueryBase {
@ -2471,6 +2477,11 @@ export type Level = 'cluster' | 'indices' | 'shards'
export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED'
export interface LinearRetriever extends RetrieverBase {
retrievers?: InnerRetriever[]
rank_window_size: integer
}
export type MapboxVectorTiles = ArrayBuffer
export interface MergesStats {
@ -2559,6 +2570,13 @@ export type Password = string
export type Percentage = string | float
export interface PinnedRetriever extends RetrieverBase {
retriever: RetrieverContainer
ids?: string[]
docs?: SpecifiedDocument[]
rank_window_size: integer
}
export type PipelineName = string
export interface PluginStats {
@ -2644,6 +2662,11 @@ export interface RescoreVector {
oversample: float
}
export interface RescorerRetriever extends RetrieverBase {
retriever: RetrieverContainer
rescore: SearchRescore | SearchRescore[]
}
export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop'
export interface Retries {
@ -2654,6 +2677,7 @@ export interface Retries {
export interface RetrieverBase {
filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
min_score?: float
_name?: string
}
export interface RetrieverContainer {
@ -2662,6 +2686,9 @@ export interface RetrieverContainer {
rrf?: RRFRetriever
text_similarity_reranker?: TextSimilarityReranker
rule?: RuleRetriever
rescorer?: RescorerRetriever
linear?: LinearRetriever
pinned?: PinnedRetriever
}
export type Routing = string
@ -2672,7 +2699,7 @@ export interface RrfRank {
}
export interface RuleRetriever extends RetrieverBase {
ruleset_ids: Id[]
ruleset_ids: Id | Id[]
match_criteria: any
retriever: RetrieverContainer
rank_window_size?: integer
@ -2680,6 +2707,8 @@ export interface RuleRetriever extends RetrieverBase {
export type ScalarValue = long | double | string | boolean | null
export type ScoreNormalizer = 'none' | 'minmax'
export interface ScoreSort {
order?: SortOrder
}
@ -2828,6 +2857,11 @@ export type SortOrder = 'asc' | 'desc'
export type SortResults = FieldValue[]
export interface SpecifiedDocument {
index?: IndexName
id: Id
}
export interface StandardRetriever extends RetrieverBase {
query?: QueryDslQueryContainer
search_after?: SortResults
@ -4395,6 +4429,10 @@ export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetr
export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer
export interface AnalysisApostropheTokenFilter extends AnalysisTokenFilterBase {
type: 'apostrophe'
}
export interface AnalysisArabicAnalyzer {
type: 'arabic'
stopwords?: AnalysisStopWords
@ -4402,6 +4440,10 @@ export interface AnalysisArabicAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'arabic_normalization'
}
export interface AnalysisArmenianAnalyzer {
type: 'armenian'
stopwords?: AnalysisStopWords
@ -4474,6 +4516,22 @@ export interface AnalysisCjkAnalyzer {
stopwords_path?: string
}
export type AnalysisCjkBigramIgnoredScript = 'han' | 'hangul' | 'hiragana' | 'katakana'
export interface AnalysisCjkBigramTokenFilter extends AnalysisTokenFilterBase {
type: 'cjk_bigram'
ignored_scripts?: AnalysisCjkBigramIgnoredScript[]
output_unigrams?: boolean
}
export interface AnalysisCjkWidthTokenFilter extends AnalysisTokenFilterBase {
type: 'cjk_width'
}
export interface AnalysisClassicTokenFilter extends AnalysisTokenFilterBase {
type: 'classic'
}
export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase {
type: 'classic'
max_token_length?: integer
@ -4488,7 +4546,6 @@ export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase
}
export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase {
hyphenation_patterns_path?: string
max_subword_size?: integer
min_subword_size?: integer
min_word_size?: integer
@ -4531,6 +4588,10 @@ export interface AnalysisDanishAnalyzer {
stopwords_path?: string
}
export interface AnalysisDecimalDigitTokenFilter extends AnalysisTokenFilterBase {
type: 'decimal_digit'
}
export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity'
export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase {
@ -4611,6 +4672,10 @@ export interface AnalysisFinnishAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisFlattenGraphTokenFilter extends AnalysisTokenFilterBase {
type: 'flatten_graph'
}
export interface AnalysisFrenchAnalyzer {
type: 'french'
stopwords?: AnalysisStopWords
@ -4632,6 +4697,10 @@ export interface AnalysisGermanAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'german_normalization'
}
export interface AnalysisGreekAnalyzer {
type: 'greek'
stopwords?: AnalysisStopWords
@ -4645,6 +4714,10 @@ export interface AnalysisHindiAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisHindiNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'hindi_normalization'
}
export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase {
type: 'html_strip'
escaped_tags?: string[]
@ -4662,11 +4735,16 @@ export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase {
dedup?: boolean
dictionary?: string
locale: string
lang: string
language: string
longest_only?: boolean
}
export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase {
type: 'hyphenation_decompounder'
hyphenation_patterns_path: string
no_sub_matches?: boolean
no_overlapping_matches?: boolean
}
export interface AnalysisIcuAnalyzer {
@ -4708,6 +4786,7 @@ export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBa
type: 'icu_normalizer'
mode?: AnalysisIcuNormalizationMode
name?: AnalysisIcuNormalizationType
unicode_set_filter?: string
}
export type AnalysisIcuNormalizationMode = 'decompose' | 'compose'
@ -4732,6 +4811,10 @@ export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase
id: string
}
export interface AnalysisIndicNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'indic_normalization'
}
export interface AnalysisIndonesianAnalyzer {
type: 'indonesian'
stopwords?: AnalysisStopWords
@ -4753,6 +4836,11 @@ export interface AnalysisItalianAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisJaStopTokenFilter extends AnalysisTokenFilterBase {
type: 'ja_stop'
stopwords?: AnalysisStopWords
}
export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase {
type: 'kstem'
}
@ -4762,7 +4850,7 @@ export type AnalysisKeepTypesMode = 'include' | 'exclude'
export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase {
type: 'keep_types'
mode?: AnalysisKeepTypesMode
types?: string[]
types: string[]
}
export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase {
@ -4785,6 +4873,10 @@ export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBas
keywords_pattern?: string
}
export interface AnalysisKeywordRepeatTokenFilter extends AnalysisTokenFilterBase {
type: 'keyword_repeat'
}
export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase {
type: 'keyword'
buffer_size?: integer
@ -4866,9 +4958,11 @@ export interface AnalysisLowercaseNormalizer {
export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase {
type: 'lowercase'
language?: string
language?: AnalysisLowercaseTokenFilterLanguages
}
export type AnalysisLowercaseTokenFilterLanguages = 'greek' | 'irish' | 'turkish'
export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase {
type: 'lowercase'
}
@ -4879,6 +4973,14 @@ export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase {
mappings_path?: string
}
export interface AnalysisMinHashTokenFilter extends AnalysisTokenFilterBase {
type: 'min_hash'
bucket_count?: integer
hash_count?: integer
hash_set_size?: integer
with_rotation?: boolean
}
export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase {
type: 'multiplexer'
filters: string[]
@ -4966,7 +5068,6 @@ export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase
export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase {
type: 'pattern_replace'
all?: boolean
flags?: string
pattern: string
replacement?: string
}
@ -4984,6 +5085,10 @@ export interface AnalysisPersianAnalyzer {
stopwords_path?: string
}
export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'persian_normalization'
}
export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff'
export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish'
@ -5040,6 +5145,14 @@ export interface AnalysisRussianAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase {
type: 'scandinavian_folding'
}
export interface AnalysisScandinavianNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'scandinavian_normalization'
}
export interface AnalysisSerbianAnalyzer {
type: 'serbian'
stopwords?: AnalysisStopWords
@ -5047,11 +5160,15 @@ export interface AnalysisSerbianAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisSerbianNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'serbian_normalization'
}
export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase {
type: 'shingle'
filler_token?: string
max_shingle_size?: integer | string
min_shingle_size?: integer | string
max_shingle_size?: SpecUtilsStringified<integer>
min_shingle_size?: SpecUtilsStringified<integer>
output_unigrams?: boolean
output_unigrams_if_no_shingles?: boolean
token_separator?: string
@ -5079,7 +5196,7 @@ export interface AnalysisSnowballAnalyzer {
stopwords?: AnalysisStopWords
}
export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish'
export type AnalysisSnowballLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Irish' | 'Kp' | 'Lithuanian' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Serbian' | 'Spanish' | 'Swedish' | 'Turkish'
export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase {
type: 'snowball'
@ -5093,6 +5210,10 @@ export interface AnalysisSoraniAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisSoraniNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'sorani_normalization'
}
export interface AnalysisSpanishAnalyzer {
type: 'spanish'
stopwords?: AnalysisStopWords
@ -5138,7 +5259,9 @@ export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase {
stopwords_path?: string
}
export type AnalysisStopWords = string | string[]
export type AnalysisStopWordLanguage = '_arabic_' | '_armenian_' | '_basque_' | '_bengali_' | '_brazilian_' | '_bulgarian_' | '_catalan_' | '_cjk_' | '_czech_' | '_danish_' | '_dutch_' | '_english_' | '_estonian_' | '_finnish_' | '_french_' | '_galician_' | '_german_' | '_greek_' | '_hindi_' | '_hungarian_' | '_indonesian_' | '_irish_' | '_italian_' | '_latvian_' | '_lithuanian_' | '_norwegian_' | '_persian_' | '_portuguese_' | '_romanian_' | '_russian_' | '_serbian_' | '_sorani_' | '_spanish_' | '_swedish_' | '_thai_' | '_turkish_' | '_none_'
export type AnalysisStopWords = AnalysisStopWordLanguage | string[]
export interface AnalysisSwedishAnalyzer {
type: 'swedish'
@ -5149,20 +5272,15 @@ export interface AnalysisSwedishAnalyzer {
export type AnalysisSynonymFormat = 'solr' | 'wordnet'
export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase {
export interface AnalysisSynonymGraphTokenFilter extends AnalysisSynonymTokenFilterBase {
type: 'synonym_graph'
expand?: boolean
format?: AnalysisSynonymFormat
lenient?: boolean
synonyms?: string[]
synonyms_path?: string
synonyms_set?: string
tokenizer?: string
updateable?: boolean
}
export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase {
export interface AnalysisSynonymTokenFilter extends AnalysisSynonymTokenFilterBase {
type: 'synonym'
}
export interface AnalysisSynonymTokenFilterBase extends AnalysisTokenFilterBase {
expand?: boolean
format?: AnalysisSynonymFormat
lenient?: boolean
@ -5191,7 +5309,7 @@ export interface AnalysisTokenFilterBase {
version?: VersionString
}
export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter
export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter
export type AnalysisTokenizer = string | AnalysisTokenizerDefinition
@ -5241,27 +5359,17 @@ export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase {
max_token_length?: integer
}
export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase {
export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisWordDelimiterTokenFilterBase {
type: 'word_delimiter_graph'
adjust_offsets?: boolean
catenate_all?: boolean
catenate_numbers?: boolean
catenate_words?: boolean
generate_number_parts?: boolean
generate_word_parts?: boolean
ignore_keywords?: boolean
preserve_original?: SpecUtilsStringified<boolean>
protected_words?: string[]
protected_words_path?: string
split_on_case_change?: boolean
split_on_numerics?: boolean
stem_english_possessive?: boolean
type_table?: string[]
type_table_path?: string
}
export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase {
export interface AnalysisWordDelimiterTokenFilter extends AnalysisWordDelimiterTokenFilterBase {
type: 'word_delimiter'
}
export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilterBase {
catenate_all?: boolean
catenate_numbers?: boolean
catenate_words?: boolean
@ -6026,7 +6134,7 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys
export type QueryDslGeoExecution = 'memory' | 'indexed'
export interface QueryDslGeoGridQuery extends QueryDslQueryBase {
geogrid?: GeoTile
geotile?: GeoTile
geohash?: GeoHash
geohex?: GeoHexCell
}
@ -6096,6 +6204,8 @@ export interface QueryDslIntervalsContainer {
fuzzy?: QueryDslIntervalsFuzzy
match?: QueryDslIntervalsMatch
prefix?: QueryDslIntervalsPrefix
range?: QueryDslIntervalsRange
regexp?: QueryDslIntervalsRegexp
wildcard?: QueryDslIntervalsWildcard
}
@ -6141,9 +6251,26 @@ export interface QueryDslIntervalsQuery extends QueryDslQueryBase {
fuzzy?: QueryDslIntervalsFuzzy
match?: QueryDslIntervalsMatch
prefix?: QueryDslIntervalsPrefix
range?: QueryDslIntervalsRange
regexp?: QueryDslIntervalsRegexp
wildcard?: QueryDslIntervalsWildcard
}
export interface QueryDslIntervalsRange {
analyzer?: string
gte?: string
gt?: string
lte?: string
lt?: string
use_field?: Field
}
export interface QueryDslIntervalsRegexp {
analyzer?: string
pattern: string
use_field?: Field
}
export interface QueryDslIntervalsWildcard {
analyzer?: string
pattern: string
@ -6461,7 +6588,8 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase {
export interface QueryDslRuleQuery extends QueryDslQueryBase {
organic: QueryDslQueryContainer
ruleset_ids: Id[]
ruleset_ids?: Id | Id[]
ruleset_id?: string
match_criteria: any
}
@ -6871,6 +6999,10 @@ export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' |
export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[]
export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string
export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[]
export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters {
}
@ -8061,7 +8193,7 @@ export interface CatNodesRequest extends CatCatRequestBase {
bytes?: Bytes
full_id?: boolean | string
include_unloaded_segments?: boolean
h?: Names
h?: CatCatNodeColumns
s?: Names
master_timeout?: Duration
time?: TimeUnit
@ -9424,6 +9556,39 @@ export interface ClusterStateRequest extends RequestBase {
export type ClusterStateResponse = any
export interface ClusterStatsCCSStats {
clusters?: Record<string, ClusterStatsRemoteClusterInfo>
_search: ClusterStatsCCSUsageStats
_esql?: ClusterStatsCCSUsageStats
}
export interface ClusterStatsCCSUsageClusterStats {
total: integer
skipped: integer
took: ClusterStatsCCSUsageTimeValue
}
export interface ClusterStatsCCSUsageStats {
total: integer
success: integer
skipped: integer
took: ClusterStatsCCSUsageTimeValue
took_mrt_true?: ClusterStatsCCSUsageTimeValue
took_mrt_false?: ClusterStatsCCSUsageTimeValue
remotes_per_search_max: integer
remotes_per_search_avg: double
failure_reasons: Record<string, integer>
features: Record<string, integer>
clients: Record<string, integer>
clusters: Record<string, ClusterStatsCCSUsageClusterStats>
}
export interface ClusterStatsCCSUsageTimeValue {
max: DurationValue<UnitMillis>
avg: DurationValue<UnitMillis>
p90: DurationValue<UnitMillis>
}
export interface ClusterStatsCharFilterTypes {
analyzer_types: ClusterStatsFieldTypes[]
built_in_analyzers: ClusterStatsFieldTypes[]
@ -9647,6 +9812,24 @@ export interface ClusterStatsOperatingSystemMemoryInfo {
used_percent: integer
}
export interface ClusterStatsRemoteClusterInfo {
cluster_uuid: string
mode: string
skip_unavailable: boolean
transport_compress: string
status: HealthStatus
version: VersionString[]
nodes_count: integer
shards_count: integer
indices_count: integer
indices_total_size_in_bytes: long
indices_total_size?: string
max_heap_in_bytes: long
max_heap?: string
mem_total_in_bytes: long
mem_total?: string
}
export interface ClusterStatsRequest extends RequestBase {
node_id?: NodeIds
include_remotes?: boolean
@ -9679,6 +9862,7 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase {
nodes: ClusterStatsClusterNodes
status: HealthStatus
timestamp: long
ccs: ClusterStatsCCSStats
}
export interface ConnectorConnector {
@ -10434,8 +10618,6 @@ export interface EsqlAsyncQueryRequest extends RequestBase {
delimiter?: string
drop_null_columns?: boolean
format?: EsqlQueryEsqlFormat
keep_alive?: Duration
keep_on_completion?: boolean
columnar?: boolean
filter?: QueryDslQueryContainer
locale?: string
@ -10445,6 +10627,8 @@ export interface EsqlAsyncQueryRequest extends RequestBase {
tables?: Record<string, Record<string, EsqlTableValuesContainer>>
include_ccs_metadata?: boolean
wait_for_completion_timeout?: Duration
keep_alive?: Duration
keep_on_completion?: boolean
}
export type EsqlAsyncQueryResponse = EsqlResult
@ -10458,6 +10642,7 @@ export type EsqlAsyncQueryDeleteResponse = AcknowledgedResponseBase
export interface EsqlAsyncQueryGetRequest extends RequestBase {
id: Id
drop_null_columns?: boolean
format?: EsqlQueryEsqlFormat
keep_alive?: Duration
wait_for_completion_timeout?: Duration
}
@ -10811,7 +10996,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged {
lifecycle_date?: DateTime
lifecycle_date_millis?: EpochTime<UnitMillis>
managed: true
phase: Name
phase?: Name
phase_time?: DateTime
phase_time_millis?: EpochTime<UnitMillis>
policy?: Name
@ -11886,10 +12071,6 @@ export interface IndicesGetRequest extends RequestBase {
export type IndicesGetResponse = Record<IndexName, IndicesIndexState>
export interface IndicesGetAliasIndexAliases {
aliases: Record<string, IndicesAliasDefinition>
}
export interface IndicesGetAliasRequest extends RequestBase {
name?: Names
index?: Indices
@ -11901,6 +12082,17 @@ export interface IndicesGetAliasRequest extends RequestBase {
export type IndicesGetAliasResponse = Record<IndexName, IndicesGetAliasIndexAliases>
export interface IndicesGetAliasIndexAliases {
aliases: Record<string, IndicesAliasDefinition>
}
export interface IndicesGetAliasNotFoundAliasesKeys {
error: string
status: number
}
export type IndicesGetAliasNotFoundAliases = IndicesGetAliasNotFoundAliasesKeys
& { [property: string]: IndicesGetAliasIndexAliases | string | number }
export interface IndicesGetDataLifecycleDataStreamWithLifecycle {
name: DataStreamName
lifecycle?: IndicesDataStreamLifecycleWithRollover
@ -13099,6 +13291,86 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi
task_type: InferenceTaskType
}
export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeAlibabaCloudAI
}
export interface InferenceInferenceEndpointInfoAmazonBedrock extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeAmazonBedrock
}
export interface InferenceInferenceEndpointInfoAnthropic extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeAnthropic
}
export interface InferenceInferenceEndpointInfoAzureAIStudio extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeAzureAIStudio
}
export interface InferenceInferenceEndpointInfoAzureOpenAI extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeAzureOpenAI
}
export interface InferenceInferenceEndpointInfoCohere extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeCohere
}
export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeELSER
}
export interface InferenceInferenceEndpointInfoElasticsearch extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeElasticsearch
}
export interface InferenceInferenceEndpointInfoGoogleAIStudio extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeGoogleAIStudio
}
export interface InferenceInferenceEndpointInfoGoogleVertexAI extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeGoogleVertexAI
}
export interface InferenceInferenceEndpointInfoHuggingFace extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeHuggingFace
}
export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeJinaAi
}
export interface InferenceInferenceEndpointInfoMistral extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeMistral
}
export interface InferenceInferenceEndpointInfoOpenAI extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeOpenAI
}
export interface InferenceInferenceEndpointInfoVoyageAI extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeVoyageAI
}
export interface InferenceInferenceEndpointInfoWatsonx extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeWatsonx
}
export interface InferenceInferenceResult {
text_embedding_bytes?: InferenceTextEmbeddingByteResult[]
text_embedding_bits?: InferenceTextEmbeddingByteResult[]
@ -13207,6 +13479,38 @@ export type InferenceTaskSettings = any
export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion'
export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding'
export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion'
export type InferenceTaskTypeAnthropic = 'completion'
export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion'
export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion'
export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion'
export type InferenceTaskTypeELSER = 'sparse_embedding'
export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank'
export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion'
export type InferenceTaskTypeGoogleVertexAI = 'text_embedding' | 'rerank'
export type InferenceTaskTypeHuggingFace = 'text_embedding'
export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank'
export type InferenceTaskTypeMistral = 'text_embedding'
export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion'
export type InferenceTaskTypeVoyageAI = 'text_embedding' | 'rerank'
export type InferenceTaskTypeWatsonx = 'text_embedding'
export interface InferenceTextEmbeddingByteResult {
embedding: InferenceDenseByteVector
}
@ -13325,7 +13629,7 @@ export interface InferencePutAlibabacloudRequest extends RequestBase {
task_settings?: InferenceAlibabaCloudTaskSettings
}
export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfo
export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfoAlibabaCloudAI
export interface InferencePutAmazonbedrockRequest extends RequestBase {
task_type: InferenceAmazonBedrockTaskType
@ -13336,7 +13640,7 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase {
task_settings?: InferenceAmazonBedrockTaskSettings
}
export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfo
export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock
export interface InferencePutAnthropicRequest extends RequestBase {
task_type: InferenceAnthropicTaskType
@ -13347,7 +13651,7 @@ export interface InferencePutAnthropicRequest extends RequestBase {
task_settings?: InferenceAnthropicTaskSettings
}
export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfo
export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic
export interface InferencePutAzureaistudioRequest extends RequestBase {
task_type: InferenceAzureAiStudioTaskType
@ -13358,7 +13662,7 @@ export interface InferencePutAzureaistudioRequest extends RequestBase {
task_settings?: InferenceAzureAiStudioTaskSettings
}
export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfo
export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio
export interface InferencePutAzureopenaiRequest extends RequestBase {
task_type: InferenceAzureOpenAITaskType
@ -13369,7 +13673,7 @@ export interface InferencePutAzureopenaiRequest extends RequestBase {
task_settings?: InferenceAzureOpenAITaskSettings
}
export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfo
export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI
export interface InferencePutCohereRequest extends RequestBase {
task_type: InferenceCohereTaskType
@ -13380,7 +13684,7 @@ export interface InferencePutCohereRequest extends RequestBase {
task_settings?: InferenceCohereTaskSettings
}
export type InferencePutCohereResponse = InferenceInferenceEndpointInfo
export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere
export interface InferencePutElasticsearchRequest extends RequestBase {
task_type: InferenceElasticsearchTaskType
@ -13391,7 +13695,7 @@ export interface InferencePutElasticsearchRequest extends RequestBase {
task_settings?: InferenceElasticsearchTaskSettings
}
export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfo
export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch
export interface InferencePutElserRequest extends RequestBase {
task_type: InferenceElserTaskType
@ -13401,7 +13705,7 @@ export interface InferencePutElserRequest extends RequestBase {
service_settings: InferenceElserServiceSettings
}
export type InferencePutElserResponse = InferenceInferenceEndpointInfo
export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER
export interface InferencePutGoogleaistudioRequest extends RequestBase {
task_type: InferenceGoogleAiStudioTaskType
@ -13411,7 +13715,7 @@ export interface InferencePutGoogleaistudioRequest extends RequestBase {
service_settings: InferenceGoogleAiStudioServiceSettings
}
export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfo
export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio
export interface InferencePutGooglevertexaiRequest extends RequestBase {
task_type: InferenceGoogleVertexAITaskType
@ -13422,7 +13726,7 @@ export interface InferencePutGooglevertexaiRequest extends RequestBase {
task_settings?: InferenceGoogleVertexAITaskSettings
}
export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfo
export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI
export interface InferencePutHuggingFaceRequest extends RequestBase {
task_type: InferenceHuggingFaceTaskType
@ -13432,7 +13736,7 @@ export interface InferencePutHuggingFaceRequest extends RequestBase {
service_settings: InferenceHuggingFaceServiceSettings
}
export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfo
export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace
export interface InferencePutJinaaiRequest extends RequestBase {
task_type: InferenceJinaAITaskType
@ -13443,7 +13747,7 @@ export interface InferencePutJinaaiRequest extends RequestBase {
task_settings?: InferenceJinaAITaskSettings
}
export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfo
export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi
export interface InferencePutMistralRequest extends RequestBase {
task_type: InferenceMistralTaskType
@ -13453,7 +13757,7 @@ export interface InferencePutMistralRequest extends RequestBase {
service_settings: InferenceMistralServiceSettings
}
export type InferencePutMistralResponse = InferenceInferenceEndpointInfo
export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral
export interface InferencePutOpenaiRequest extends RequestBase {
task_type: InferenceOpenAITaskType
@ -13464,7 +13768,7 @@ export interface InferencePutOpenaiRequest extends RequestBase {
task_settings?: InferenceOpenAITaskSettings
}
export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfo
export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI
export interface InferencePutVoyageaiRequest extends RequestBase {
task_type: InferenceVoyageAITaskType
@ -13475,7 +13779,7 @@ export interface InferencePutVoyageaiRequest extends RequestBase {
task_settings?: InferenceVoyageAITaskSettings
}
export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfo
export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI
export interface InferencePutWatsonxRequest extends RequestBase {
task_type: InferenceWatsonxTaskType
@ -13484,7 +13788,7 @@ export interface InferencePutWatsonxRequest extends RequestBase {
service_settings: InferenceWatsonxServiceSettings
}
export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfo
export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx
export interface InferenceRerankRequest extends RequestBase {
inference_id: Id
@ -13864,16 +14168,18 @@ export interface IngestPipelineProcessor extends IngestProcessorBase {
ignore_missing_pipeline?: boolean
}
export interface IngestPipelineSimulation {
export interface IngestPipelineProcessorResult {
doc?: IngestDocumentSimulation
tag?: string
processor_type?: string
status?: WatcherActionStatusOptions
status?: IngestPipelineSimulationStatusOptions
description?: string
ignored_error?: ErrorCause
error?: ErrorCause
}
export type IngestPipelineSimulationStatusOptions = 'success' | 'error' | 'error_ignored' | 'skipped' | 'dropped'
export interface IngestProcessorBase {
description?: string
if?: string
@ -13995,7 +14301,7 @@ export type IngestShapeType = 'geo_shape' | 'shape'
export interface IngestSimulateDocumentResult {
doc?: IngestDocumentSimulation
error?: ErrorCause
processor_results?: IngestPipelineSimulation[]
processor_results?: IngestPipelineProcessorResult[]
}
export interface IngestSortProcessor extends IngestProcessorBase {
@ -19743,6 +20049,14 @@ export interface SlmSnapshotLifecycle {
stats: SlmStatistics
}
export interface SlmSnapshotPolicyStats {
policy: string
snapshots_taken: long
snapshots_failed: long
snapshots_deleted: long
snapshot_deletion_failures: long
}
export interface SlmStatistics {
retention_deletion_time?: Duration
retention_deletion_time_millis?: DurationValue<UnitMillis>
@ -19808,7 +20122,7 @@ export interface SlmGetStatsResponse {
total_snapshot_deletion_failures: long
total_snapshots_failed: long
total_snapshots_taken: long
policy_stats: string[]
policy_stats: SlmSnapshotPolicyStats[]
}
export interface SlmGetStatusRequest extends RequestBase {
@ -20000,7 +20314,7 @@ export interface SnapshotSnapshotShardFailure {
index: IndexName
node_id?: Id
reason: string
shard_id: Id
shard_id: integer
index_uuid: Id
status: string
}

View File

@ -1587,7 +1587,7 @@ export interface SearchInnerHits {
ignore_unmapped?: boolean
script_fields?: Record<Field, ScriptField>
seq_no_primary_term?: boolean
fields?: Fields
fields?: Field[]
sort?: Sort
_source?: SearchSourceConfig
stored_fields?: Fields
@ -1779,7 +1779,7 @@ export interface SearchShardProfile {
index: IndexName
node_id: NodeId
searches: SearchSearchProfile[]
shard_id: long
shard_id: integer
}
export interface SearchSmoothingModelContainer {
@ -2320,7 +2320,7 @@ export type EpochTime<Unit = unknown> = Unit
export interface ErrorCauseKeys {
type: string
reason?: string
reason?: string | null
stack_trace?: string
caused_by?: ErrorCause
root_cause?: ErrorCause[]
@ -2503,6 +2503,12 @@ export interface InlineGetKeys<TDocument = unknown> {
export type InlineGet<TDocument = unknown> = InlineGetKeys<TDocument>
& { [property: string]: any }
export interface InnerRetriever {
retriever: RetrieverContainer
weight: float
normalizer: ScoreNormalizer
}
export type Ip = string
export interface KnnQuery extends QueryDslQueryBase {
@ -2548,6 +2554,11 @@ export type Level = 'cluster' | 'indices' | 'shards'
export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED'
export interface LinearRetriever extends RetrieverBase {
retrievers?: InnerRetriever[]
rank_window_size: integer
}
export type MapboxVectorTiles = ArrayBuffer
export interface MergesStats {
@ -2636,6 +2647,13 @@ export type Password = string
export type Percentage = string | float
export interface PinnedRetriever extends RetrieverBase {
retriever: RetrieverContainer
ids?: string[]
docs?: SpecifiedDocument[]
rank_window_size: integer
}
export type PipelineName = string
export interface PluginStats {
@ -2721,6 +2739,11 @@ export interface RescoreVector {
oversample: float
}
export interface RescorerRetriever extends RetrieverBase {
retriever: RetrieverContainer
rescore: SearchRescore | SearchRescore[]
}
export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop'
export interface Retries {
@ -2731,6 +2754,7 @@ export interface Retries {
export interface RetrieverBase {
filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
min_score?: float
_name?: string
}
export interface RetrieverContainer {
@ -2739,6 +2763,9 @@ export interface RetrieverContainer {
rrf?: RRFRetriever
text_similarity_reranker?: TextSimilarityReranker
rule?: RuleRetriever
rescorer?: RescorerRetriever
linear?: LinearRetriever
pinned?: PinnedRetriever
}
export type Routing = string
@ -2749,7 +2776,7 @@ export interface RrfRank {
}
export interface RuleRetriever extends RetrieverBase {
ruleset_ids: Id[]
ruleset_ids: Id | Id[]
match_criteria: any
retriever: RetrieverContainer
rank_window_size?: integer
@ -2757,6 +2784,8 @@ export interface RuleRetriever extends RetrieverBase {
export type ScalarValue = long | double | string | boolean | null
export type ScoreNormalizer = 'none' | 'minmax'
export interface ScoreSort {
order?: SortOrder
}
@ -2905,6 +2934,11 @@ export type SortOrder = 'asc' | 'desc'
export type SortResults = FieldValue[]
export interface SpecifiedDocument {
index?: IndexName
id: Id
}
export interface StandardRetriever extends RetrieverBase {
query?: QueryDslQueryContainer
search_after?: SortResults
@ -4472,6 +4506,10 @@ export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetr
export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer
export interface AnalysisApostropheTokenFilter extends AnalysisTokenFilterBase {
type: 'apostrophe'
}
export interface AnalysisArabicAnalyzer {
type: 'arabic'
stopwords?: AnalysisStopWords
@ -4479,6 +4517,10 @@ export interface AnalysisArabicAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'arabic_normalization'
}
export interface AnalysisArmenianAnalyzer {
type: 'armenian'
stopwords?: AnalysisStopWords
@ -4551,6 +4593,22 @@ export interface AnalysisCjkAnalyzer {
stopwords_path?: string
}
export type AnalysisCjkBigramIgnoredScript = 'han' | 'hangul' | 'hiragana' | 'katakana'
export interface AnalysisCjkBigramTokenFilter extends AnalysisTokenFilterBase {
type: 'cjk_bigram'
ignored_scripts?: AnalysisCjkBigramIgnoredScript[]
output_unigrams?: boolean
}
export interface AnalysisCjkWidthTokenFilter extends AnalysisTokenFilterBase {
type: 'cjk_width'
}
export interface AnalysisClassicTokenFilter extends AnalysisTokenFilterBase {
type: 'classic'
}
export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase {
type: 'classic'
max_token_length?: integer
@ -4565,7 +4623,6 @@ export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase
}
export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase {
hyphenation_patterns_path?: string
max_subword_size?: integer
min_subword_size?: integer
min_word_size?: integer
@ -4608,6 +4665,10 @@ export interface AnalysisDanishAnalyzer {
stopwords_path?: string
}
export interface AnalysisDecimalDigitTokenFilter extends AnalysisTokenFilterBase {
type: 'decimal_digit'
}
export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity'
export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase {
@ -4688,6 +4749,10 @@ export interface AnalysisFinnishAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisFlattenGraphTokenFilter extends AnalysisTokenFilterBase {
type: 'flatten_graph'
}
export interface AnalysisFrenchAnalyzer {
type: 'french'
stopwords?: AnalysisStopWords
@ -4709,6 +4774,10 @@ export interface AnalysisGermanAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'german_normalization'
}
export interface AnalysisGreekAnalyzer {
type: 'greek'
stopwords?: AnalysisStopWords
@ -4722,6 +4791,10 @@ export interface AnalysisHindiAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisHindiNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'hindi_normalization'
}
export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase {
type: 'html_strip'
escaped_tags?: string[]
@ -4739,11 +4812,16 @@ export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase {
dedup?: boolean
dictionary?: string
locale: string
lang: string
language: string
longest_only?: boolean
}
export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase {
type: 'hyphenation_decompounder'
hyphenation_patterns_path: string
no_sub_matches?: boolean
no_overlapping_matches?: boolean
}
export interface AnalysisIcuAnalyzer {
@ -4785,6 +4863,7 @@ export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBa
type: 'icu_normalizer'
mode?: AnalysisIcuNormalizationMode
name?: AnalysisIcuNormalizationType
unicode_set_filter?: string
}
export type AnalysisIcuNormalizationMode = 'decompose' | 'compose'
@ -4809,6 +4888,10 @@ export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase
id: string
}
export interface AnalysisIndicNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'indic_normalization'
}
export interface AnalysisIndonesianAnalyzer {
type: 'indonesian'
stopwords?: AnalysisStopWords
@ -4830,6 +4913,11 @@ export interface AnalysisItalianAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisJaStopTokenFilter extends AnalysisTokenFilterBase {
type: 'ja_stop'
stopwords?: AnalysisStopWords
}
export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase {
type: 'kstem'
}
@ -4839,7 +4927,7 @@ export type AnalysisKeepTypesMode = 'include' | 'exclude'
export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase {
type: 'keep_types'
mode?: AnalysisKeepTypesMode
types?: string[]
types: string[]
}
export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase {
@ -4862,6 +4950,10 @@ export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBas
keywords_pattern?: string
}
export interface AnalysisKeywordRepeatTokenFilter extends AnalysisTokenFilterBase {
type: 'keyword_repeat'
}
export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase {
type: 'keyword'
buffer_size?: integer
@ -4943,9 +5035,11 @@ export interface AnalysisLowercaseNormalizer {
export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase {
type: 'lowercase'
language?: string
language?: AnalysisLowercaseTokenFilterLanguages
}
export type AnalysisLowercaseTokenFilterLanguages = 'greek' | 'irish' | 'turkish'
export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase {
type: 'lowercase'
}
@ -4956,6 +5050,14 @@ export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase {
mappings_path?: string
}
export interface AnalysisMinHashTokenFilter extends AnalysisTokenFilterBase {
type: 'min_hash'
bucket_count?: integer
hash_count?: integer
hash_set_size?: integer
with_rotation?: boolean
}
export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase {
type: 'multiplexer'
filters: string[]
@ -5043,7 +5145,6 @@ export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase
export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase {
type: 'pattern_replace'
all?: boolean
flags?: string
pattern: string
replacement?: string
}
@ -5061,6 +5162,10 @@ export interface AnalysisPersianAnalyzer {
stopwords_path?: string
}
export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'persian_normalization'
}
export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff'
export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish'
@ -5117,6 +5222,14 @@ export interface AnalysisRussianAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase {
type: 'scandinavian_folding'
}
export interface AnalysisScandinavianNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'scandinavian_normalization'
}
export interface AnalysisSerbianAnalyzer {
type: 'serbian'
stopwords?: AnalysisStopWords
@ -5124,11 +5237,15 @@ export interface AnalysisSerbianAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisSerbianNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'serbian_normalization'
}
export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase {
type: 'shingle'
filler_token?: string
max_shingle_size?: integer | string
min_shingle_size?: integer | string
max_shingle_size?: SpecUtilsStringified<integer>
min_shingle_size?: SpecUtilsStringified<integer>
output_unigrams?: boolean
output_unigrams_if_no_shingles?: boolean
token_separator?: string
@ -5156,7 +5273,7 @@ export interface AnalysisSnowballAnalyzer {
stopwords?: AnalysisStopWords
}
export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish'
export type AnalysisSnowballLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Irish' | 'Kp' | 'Lithuanian' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Serbian' | 'Spanish' | 'Swedish' | 'Turkish'
export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase {
type: 'snowball'
@ -5170,6 +5287,10 @@ export interface AnalysisSoraniAnalyzer {
stem_exclusion?: string[]
}
export interface AnalysisSoraniNormalizationTokenFilter extends AnalysisTokenFilterBase {
type: 'sorani_normalization'
}
export interface AnalysisSpanishAnalyzer {
type: 'spanish'
stopwords?: AnalysisStopWords
@ -5215,7 +5336,9 @@ export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase {
stopwords_path?: string
}
export type AnalysisStopWords = string | string[]
export type AnalysisStopWordLanguage = '_arabic_' | '_armenian_' | '_basque_' | '_bengali_' | '_brazilian_' | '_bulgarian_' | '_catalan_' | '_cjk_' | '_czech_' | '_danish_' | '_dutch_' | '_english_' | '_estonian_' | '_finnish_' | '_french_' | '_galician_' | '_german_' | '_greek_' | '_hindi_' | '_hungarian_' | '_indonesian_' | '_irish_' | '_italian_' | '_latvian_' | '_lithuanian_' | '_norwegian_' | '_persian_' | '_portuguese_' | '_romanian_' | '_russian_' | '_serbian_' | '_sorani_' | '_spanish_' | '_swedish_' | '_thai_' | '_turkish_' | '_none_'
export type AnalysisStopWords = AnalysisStopWordLanguage | string[]
export interface AnalysisSwedishAnalyzer {
type: 'swedish'
@ -5226,20 +5349,15 @@ export interface AnalysisSwedishAnalyzer {
export type AnalysisSynonymFormat = 'solr' | 'wordnet'
export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase {
export interface AnalysisSynonymGraphTokenFilter extends AnalysisSynonymTokenFilterBase {
type: 'synonym_graph'
expand?: boolean
format?: AnalysisSynonymFormat
lenient?: boolean
synonyms?: string[]
synonyms_path?: string
synonyms_set?: string
tokenizer?: string
updateable?: boolean
}
export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase {
export interface AnalysisSynonymTokenFilter extends AnalysisSynonymTokenFilterBase {
type: 'synonym'
}
export interface AnalysisSynonymTokenFilterBase extends AnalysisTokenFilterBase {
expand?: boolean
format?: AnalysisSynonymFormat
lenient?: boolean
@ -5268,7 +5386,7 @@ export interface AnalysisTokenFilterBase {
version?: VersionString
}
export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter
export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter
export type AnalysisTokenizer = string | AnalysisTokenizerDefinition
@ -5318,27 +5436,17 @@ export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase {
max_token_length?: integer
}
export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase {
export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisWordDelimiterTokenFilterBase {
type: 'word_delimiter_graph'
adjust_offsets?: boolean
catenate_all?: boolean
catenate_numbers?: boolean
catenate_words?: boolean
generate_number_parts?: boolean
generate_word_parts?: boolean
ignore_keywords?: boolean
preserve_original?: SpecUtilsStringified<boolean>
protected_words?: string[]
protected_words_path?: string
split_on_case_change?: boolean
split_on_numerics?: boolean
stem_english_possessive?: boolean
type_table?: string[]
type_table_path?: string
}
export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase {
export interface AnalysisWordDelimiterTokenFilter extends AnalysisWordDelimiterTokenFilterBase {
type: 'word_delimiter'
}
export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilterBase {
catenate_all?: boolean
catenate_numbers?: boolean
catenate_words?: boolean
@ -6103,7 +6211,7 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys
export type QueryDslGeoExecution = 'memory' | 'indexed'
export interface QueryDslGeoGridQuery extends QueryDslQueryBase {
geogrid?: GeoTile
geotile?: GeoTile
geohash?: GeoHash
geohex?: GeoHexCell
}
@ -6173,6 +6281,8 @@ export interface QueryDslIntervalsContainer {
fuzzy?: QueryDslIntervalsFuzzy
match?: QueryDslIntervalsMatch
prefix?: QueryDslIntervalsPrefix
range?: QueryDslIntervalsRange
regexp?: QueryDslIntervalsRegexp
wildcard?: QueryDslIntervalsWildcard
}
@ -6218,9 +6328,26 @@ export interface QueryDslIntervalsQuery extends QueryDslQueryBase {
fuzzy?: QueryDslIntervalsFuzzy
match?: QueryDslIntervalsMatch
prefix?: QueryDslIntervalsPrefix
range?: QueryDslIntervalsRange
regexp?: QueryDslIntervalsRegexp
wildcard?: QueryDslIntervalsWildcard
}
export interface QueryDslIntervalsRange {
analyzer?: string
gte?: string
gt?: string
lte?: string
lt?: string
use_field?: Field
}
export interface QueryDslIntervalsRegexp {
analyzer?: string
pattern: string
use_field?: Field
}
export interface QueryDslIntervalsWildcard {
analyzer?: string
pattern: string
@ -6538,7 +6665,8 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase {
export interface QueryDslRuleQuery extends QueryDslQueryBase {
organic: QueryDslQueryContainer
ruleset_ids: Id[]
ruleset_ids?: Id | Id[]
ruleset_id?: string
match_criteria: any
}
@ -6952,6 +7080,10 @@ export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' |
export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[]
export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string
export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[]
export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters {
}
@ -8142,7 +8274,7 @@ export interface CatNodesRequest extends CatCatRequestBase {
bytes?: Bytes
full_id?: boolean | string
include_unloaded_segments?: boolean
h?: Names
h?: CatCatNodeColumns
s?: Names
master_timeout?: Duration
time?: TimeUnit
@ -9529,6 +9661,39 @@ export interface ClusterStateRequest extends RequestBase {
export type ClusterStateResponse = any
export interface ClusterStatsCCSStats {
clusters?: Record<string, ClusterStatsRemoteClusterInfo>
_search: ClusterStatsCCSUsageStats
_esql?: ClusterStatsCCSUsageStats
}
export interface ClusterStatsCCSUsageClusterStats {
total: integer
skipped: integer
took: ClusterStatsCCSUsageTimeValue
}
export interface ClusterStatsCCSUsageStats {
total: integer
success: integer
skipped: integer
took: ClusterStatsCCSUsageTimeValue
took_mrt_true?: ClusterStatsCCSUsageTimeValue
took_mrt_false?: ClusterStatsCCSUsageTimeValue
remotes_per_search_max: integer
remotes_per_search_avg: double
failure_reasons: Record<string, integer>
features: Record<string, integer>
clients: Record<string, integer>
clusters: Record<string, ClusterStatsCCSUsageClusterStats>
}
export interface ClusterStatsCCSUsageTimeValue {
max: DurationValue<UnitMillis>
avg: DurationValue<UnitMillis>
p90: DurationValue<UnitMillis>
}
export interface ClusterStatsCharFilterTypes {
analyzer_types: ClusterStatsFieldTypes[]
built_in_analyzers: ClusterStatsFieldTypes[]
@ -9752,6 +9917,24 @@ export interface ClusterStatsOperatingSystemMemoryInfo {
used_percent: integer
}
export interface ClusterStatsRemoteClusterInfo {
cluster_uuid: string
mode: string
skip_unavailable: boolean
transport_compress: string
status: HealthStatus
version: VersionString[]
nodes_count: integer
shards_count: integer
indices_count: integer
indices_total_size_in_bytes: long
indices_total_size?: string
max_heap_in_bytes: long
max_heap?: string
mem_total_in_bytes: long
mem_total?: string
}
export interface ClusterStatsRequest extends RequestBase {
node_id?: NodeIds
include_remotes?: boolean
@ -9784,6 +9967,7 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase {
nodes: ClusterStatsClusterNodes
status: HealthStatus
timestamp: long
ccs: ClusterStatsCCSStats
}
export interface ConnectorConnector {
@ -10605,8 +10789,6 @@ export interface EsqlAsyncQueryRequest extends RequestBase {
delimiter?: string
drop_null_columns?: boolean
format?: EsqlQueryEsqlFormat
keep_alive?: Duration
keep_on_completion?: boolean
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
columnar?: boolean
@ -10618,6 +10800,8 @@ export interface EsqlAsyncQueryRequest extends RequestBase {
tables?: Record<string, Record<string, EsqlTableValuesContainer>>
include_ccs_metadata?: boolean
wait_for_completion_timeout?: Duration
keep_alive?: Duration
keep_on_completion?: boolean
}
}
@ -10632,6 +10816,7 @@ export type EsqlAsyncQueryDeleteResponse = AcknowledgedResponseBase
export interface EsqlAsyncQueryGetRequest extends RequestBase {
id: Id
drop_null_columns?: boolean
format?: EsqlQueryEsqlFormat
keep_alive?: Duration
wait_for_completion_timeout?: Duration
}
@ -10995,7 +11180,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged {
lifecycle_date?: DateTime
lifecycle_date_millis?: EpochTime<UnitMillis>
managed: true
phase: Name
phase?: Name
phase_time?: DateTime
phase_time_millis?: EpochTime<UnitMillis>
policy?: Name
@ -12090,10 +12275,6 @@ export interface IndicesGetRequest extends RequestBase {
export type IndicesGetResponse = Record<IndexName, IndicesIndexState>
export interface IndicesGetAliasIndexAliases {
aliases: Record<string, IndicesAliasDefinition>
}
export interface IndicesGetAliasRequest extends RequestBase {
name?: Names
index?: Indices
@ -12105,6 +12286,17 @@ export interface IndicesGetAliasRequest extends RequestBase {
export type IndicesGetAliasResponse = Record<IndexName, IndicesGetAliasIndexAliases>
export interface IndicesGetAliasIndexAliases {
aliases: Record<string, IndicesAliasDefinition>
}
export interface IndicesGetAliasNotFoundAliasesKeys {
error: string
status: number
}
export type IndicesGetAliasNotFoundAliases = IndicesGetAliasNotFoundAliasesKeys
& { [property: string]: IndicesGetAliasIndexAliases | string | number }
export interface IndicesGetDataLifecycleDataStreamWithLifecycle {
name: DataStreamName
lifecycle?: IndicesDataStreamLifecycleWithRollover
@ -13341,6 +13533,86 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi
task_type: InferenceTaskType
}
export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeAlibabaCloudAI
}
export interface InferenceInferenceEndpointInfoAmazonBedrock extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeAmazonBedrock
}
export interface InferenceInferenceEndpointInfoAnthropic extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeAnthropic
}
export interface InferenceInferenceEndpointInfoAzureAIStudio extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeAzureAIStudio
}
export interface InferenceInferenceEndpointInfoAzureOpenAI extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeAzureOpenAI
}
export interface InferenceInferenceEndpointInfoCohere extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeCohere
}
export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeELSER
}
export interface InferenceInferenceEndpointInfoElasticsearch extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeElasticsearch
}
export interface InferenceInferenceEndpointInfoGoogleAIStudio extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeGoogleAIStudio
}
export interface InferenceInferenceEndpointInfoGoogleVertexAI extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeGoogleVertexAI
}
export interface InferenceInferenceEndpointInfoHuggingFace extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeHuggingFace
}
export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeJinaAi
}
export interface InferenceInferenceEndpointInfoMistral extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeMistral
}
export interface InferenceInferenceEndpointInfoOpenAI extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeOpenAI
}
export interface InferenceInferenceEndpointInfoVoyageAI extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeVoyageAI
}
export interface InferenceInferenceEndpointInfoWatsonx extends InferenceInferenceEndpoint {
inference_id: string
task_type: InferenceTaskTypeWatsonx
}
export interface InferenceInferenceResult {
text_embedding_bytes?: InferenceTextEmbeddingByteResult[]
text_embedding_bits?: InferenceTextEmbeddingByteResult[]
@ -13449,6 +13721,38 @@ export type InferenceTaskSettings = any
export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion'
export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding'
export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion'
export type InferenceTaskTypeAnthropic = 'completion'
export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion'
export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion'
export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion'
export type InferenceTaskTypeELSER = 'sparse_embedding'
export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank'
export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion'
export type InferenceTaskTypeGoogleVertexAI = 'text_embedding' | 'rerank'
export type InferenceTaskTypeHuggingFace = 'text_embedding'
export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank'
export type InferenceTaskTypeMistral = 'text_embedding'
export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion'
export type InferenceTaskTypeVoyageAI = 'text_embedding' | 'rerank'
export type InferenceTaskTypeWatsonx = 'text_embedding'
export interface InferenceTextEmbeddingByteResult {
embedding: InferenceDenseByteVector
}
@ -13578,7 +13882,7 @@ export interface InferencePutAlibabacloudRequest extends RequestBase {
}
}
export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfo
export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfoAlibabaCloudAI
export interface InferencePutAmazonbedrockRequest extends RequestBase {
task_type: InferenceAmazonBedrockTaskType
@ -13592,7 +13896,7 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase {
}
}
export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfo
export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock
export interface InferencePutAnthropicRequest extends RequestBase {
task_type: InferenceAnthropicTaskType
@ -13606,7 +13910,7 @@ export interface InferencePutAnthropicRequest extends RequestBase {
}
}
export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfo
export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic
export interface InferencePutAzureaistudioRequest extends RequestBase {
task_type: InferenceAzureAiStudioTaskType
@ -13620,7 +13924,7 @@ export interface InferencePutAzureaistudioRequest extends RequestBase {
}
}
export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfo
export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio
export interface InferencePutAzureopenaiRequest extends RequestBase {
task_type: InferenceAzureOpenAITaskType
@ -13634,7 +13938,7 @@ export interface InferencePutAzureopenaiRequest extends RequestBase {
}
}
export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfo
export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI
export interface InferencePutCohereRequest extends RequestBase {
task_type: InferenceCohereTaskType
@ -13648,7 +13952,7 @@ export interface InferencePutCohereRequest extends RequestBase {
}
}
export type InferencePutCohereResponse = InferenceInferenceEndpointInfo
export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere
export interface InferencePutElasticsearchRequest extends RequestBase {
task_type: InferenceElasticsearchTaskType
@ -13662,7 +13966,7 @@ export interface InferencePutElasticsearchRequest extends RequestBase {
}
}
export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfo
export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch
export interface InferencePutElserRequest extends RequestBase {
task_type: InferenceElserTaskType
@ -13675,7 +13979,7 @@ export interface InferencePutElserRequest extends RequestBase {
}
}
export type InferencePutElserResponse = InferenceInferenceEndpointInfo
export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER
export interface InferencePutGoogleaistudioRequest extends RequestBase {
task_type: InferenceGoogleAiStudioTaskType
@ -13688,7 +13992,7 @@ export interface InferencePutGoogleaistudioRequest extends RequestBase {
}
}
export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfo
export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio
export interface InferencePutGooglevertexaiRequest extends RequestBase {
task_type: InferenceGoogleVertexAITaskType
@ -13702,7 +14006,7 @@ export interface InferencePutGooglevertexaiRequest extends RequestBase {
}
}
export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfo
export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI
export interface InferencePutHuggingFaceRequest extends RequestBase {
task_type: InferenceHuggingFaceTaskType
@ -13715,7 +14019,7 @@ export interface InferencePutHuggingFaceRequest extends RequestBase {
}
}
export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfo
export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace
export interface InferencePutJinaaiRequest extends RequestBase {
task_type: InferenceJinaAITaskType
@ -13729,7 +14033,7 @@ export interface InferencePutJinaaiRequest extends RequestBase {
}
}
export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfo
export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi
export interface InferencePutMistralRequest extends RequestBase {
task_type: InferenceMistralTaskType
@ -13742,7 +14046,7 @@ export interface InferencePutMistralRequest extends RequestBase {
}
}
export type InferencePutMistralResponse = InferenceInferenceEndpointInfo
export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral
export interface InferencePutOpenaiRequest extends RequestBase {
task_type: InferenceOpenAITaskType
@ -13756,7 +14060,7 @@ export interface InferencePutOpenaiRequest extends RequestBase {
}
}
export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfo
export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI
export interface InferencePutVoyageaiRequest extends RequestBase {
task_type: InferenceVoyageAITaskType
@ -13770,7 +14074,7 @@ export interface InferencePutVoyageaiRequest extends RequestBase {
}
}
export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfo
export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI
export interface InferencePutWatsonxRequest extends RequestBase {
task_type: InferenceWatsonxTaskType
@ -13782,7 +14086,7 @@ export interface InferencePutWatsonxRequest extends RequestBase {
}
}
export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfo
export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx
export interface InferenceRerankRequest extends RequestBase {
inference_id: Id
@ -14175,16 +14479,18 @@ export interface IngestPipelineProcessor extends IngestProcessorBase {
ignore_missing_pipeline?: boolean
}
export interface IngestPipelineSimulation {
export interface IngestPipelineProcessorResult {
doc?: IngestDocumentSimulation
tag?: string
processor_type?: string
status?: WatcherActionStatusOptions
status?: IngestPipelineSimulationStatusOptions
description?: string
ignored_error?: ErrorCause
error?: ErrorCause
}
export type IngestPipelineSimulationStatusOptions = 'success' | 'error' | 'error_ignored' | 'skipped' | 'dropped'
export interface IngestProcessorBase {
description?: string
if?: string
@ -14306,7 +14612,7 @@ export type IngestShapeType = 'geo_shape' | 'shape'
export interface IngestSimulateDocumentResult {
doc?: IngestDocumentSimulation
error?: ErrorCause
processor_results?: IngestPipelineSimulation[]
processor_results?: IngestPipelineProcessorResult[]
}
export interface IngestSortProcessor extends IngestProcessorBase {
@ -20322,6 +20628,14 @@ export interface SlmSnapshotLifecycle {
stats: SlmStatistics
}
export interface SlmSnapshotPolicyStats {
policy: string
snapshots_taken: long
snapshots_failed: long
snapshots_deleted: long
snapshot_deletion_failures: long
}
export interface SlmStatistics {
retention_deletion_time?: Duration
retention_deletion_time_millis?: DurationValue<UnitMillis>
@ -20387,7 +20701,7 @@ export interface SlmGetStatsResponse {
total_snapshot_deletion_failures: long
total_snapshots_failed: long
total_snapshots_taken: long
policy_stats: string[]
policy_stats: SlmSnapshotPolicyStats[]
}
export interface SlmGetStatusRequest extends RequestBase {
@ -20582,7 +20896,7 @@ export interface SnapshotSnapshotShardFailure {
index: IndexName
node_id?: Id
reason: string
shard_id: Id
shard_id: integer
index_uuid: Id
status: string
}

View File

@ -78,6 +78,13 @@ export interface NodeOptions {
ssl?: TlsConnectionOptions
/** @property headers Custom HTTP headers that should be sent with each request */
headers?: Record<string, any>
/** @property roles Common Elasticsearch roles that can be assigned to this node. Can be helpful when writing custom nodeFilter or nodeSelector functions. */
roles?: {
master: boolean
data: boolean
ingest: boolean
ml: boolean
}
}
export interface ClientOptions {
@ -135,7 +142,7 @@ export interface ClientOptions {
* @defaultValue null */
agent?: HttpAgentOptions | UndiciAgentOptions | agentFn | false
/** @property nodeFilter A custom function used by the connection pool to determine which nodes are qualified to receive a request
* @defaultValue () => true */
* @defaultValue A function that uses the Connection `roles` property to avoid master-only nodes */
nodeFilter?: nodeFilterFn
/** @property nodeSelector A custom function used by the connection pool to determine which node should receive the next request
* @defaultValue A "round robin" function that loops sequentially through each node in the pool. */

View File

@ -25,7 +25,7 @@ import assert from 'node:assert'
import * as timersPromises from 'node:timers/promises'
import { Readable } from 'node:stream'
import { errors, TransportResult, TransportRequestOptions, TransportRequestOptionsWithMeta } from '@elastic/transport'
import { Table, TypeMap, tableFromIPC, RecordBatchStreamReader } from 'apache-arrow/Arrow.node'
import { Table, TypeMap, tableFromIPC, AsyncRecordBatchStreamReader } from 'apache-arrow/Arrow.node'
import Client from './client'
import * as T from './api/types'
import { Id } from './api/types'
@ -158,7 +158,7 @@ export interface EsqlResponse {
export interface EsqlHelper {
toRecords: <TDocument>() => Promise<EsqlToRecords<TDocument>>
toArrowTable: () => Promise<Table<TypeMap>>
toArrowReader: () => Promise<RecordBatchStreamReader>
toArrowReader: () => Promise<AsyncRecordBatchStreamReader>
}
export interface EsqlToRecords<TDocument> {
@ -1023,7 +1023,7 @@ export default class Helpers {
return tableFromIPC(response)
},
async toArrowReader (): Promise<RecordBatchStreamReader> {
async toArrowReader (): Promise<AsyncRecordBatchStreamReader> {
if (metaHeader !== null) {
reqOptions.headers = reqOptions.headers ?? {}
reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qa`
@ -1032,8 +1032,9 @@ export default class Helpers {
params.format = 'arrow'
const response = await client.esql.query(params, reqOptions)
return RecordBatchStreamReader.from(response)
// @ts-expect-error response is a Readable when asStream is true
const response: Readable = await client.esql.query(params, reqOptions)
return await AsyncRecordBatchStreamReader.from(Readable.from(response))
}
}

View File

@ -24,436 +24,63 @@ process.on('unhandledRejection', function (err) {
process.exit(1)
})
const { writeFileSync, readFileSync, readdirSync, statSync } = require('fs')
const { join, sep } = require('path')
const yaml = require('js-yaml')
const minimist = require('minimist')
const ms = require('ms')
const { Client } = require('../../index')
const build = require('./test-runner')
const { sleep } = require('./helper')
const createJunitReporter = require('./reporter')
const assert = require('node:assert')
const url = require('node:url')
const fs = require('node:fs')
const path = require('node:path')
const globby = require('globby')
const semver = require('semver')
const downloadArtifacts = require('../../scripts/download-artifacts')
const yamlFolder = downloadArtifacts.locations.freeTestFolder
const xPackYamlFolder = downloadArtifacts.locations.xPackTestFolder
const buildTests = require('./test-builder')
const MAX_API_TIME = 1000 * 90
const MAX_FILE_TIME = 1000 * 30
const MAX_TEST_TIME = 1000 * 3
const yamlFolder = downloadArtifacts.locations.testYamlFolder
const options = minimist(process.argv.slice(2), {
boolean: ['bail'],
string: ['suite', 'test'],
})
const freeSkips = {
// working on fixes for these
'/free/aggregations/bucket_selector.yml': ['bad script'],
'/free/aggregations/bucket_script.yml': ['bad script'],
// either the YAML test definition is wrong, or this fails because JSON.stringify is coercing "1.0" to "1"
'/free/aggregations/percentiles_bucket.yml': ['*'],
// not supported yet
'/free/cluster.desired_nodes/10_basic.yml': ['*'],
// Cannot find methods on `Internal` object
'/free/cluster.desired_balance/10_basic.yml': ['*'],
'/free/cluster.desired_nodes/20_dry_run.yml': ['*'],
'/free/cluster.prevalidate_node_removal/10_basic.yml': ['*'],
// the v8 client never sends the scroll_id in querystring,
// the way the test is structured causes a security exception
'free/scroll/10_basic.yml': ['Body params override query string'],
'free/scroll/11_clear.yml': [
'Body params with array param override query string',
'Body params with string param scroll id override query string'
],
'free/cat.allocation/10_basic.yml': ['*'],
'free/cat.snapshots/10_basic.yml': ['Test cat snapshots output'],
'indices.stats/50_disk_usage.yml': ['Disk usage stats'],
'indices.stats/60_field_usage.yml': ['Field usage stats'],
// skipping because we are booting ES with `discovery.type=single-node`
// and this test will fail because of this configuration
'nodes.stats/30_discovery.yml': ['*'],
// the expected error is returning a 503,
// which triggers a retry and the node to be marked as dead
'search.aggregation/240_max_buckets.yml': ['*'],
// long values and json do not play nicely together
'search.aggregation/40_range.yml': ['Min and max long range bounds'],
// the yaml runner assumes that null means "does not exists",
// while null is a valid json value, so the check will fail
'search/320_disallow_queries.yml': ['Test disallow expensive queries'],
'free/tsdb/90_unsupported_operations.yml': ['noop update'],
}
const platinumDenyList = {
'api_key/10_basic.yml': ['Test get api key'],
'api_key/20_query.yml': ['*'],
'api_key/11_invalidation.yml': ['Test invalidate api key by realm name'],
'analytics/histogram.yml': ['Histogram requires values in increasing order'],
// object keys must me strings, and `0.0.toString()` is `0`
'ml/evaluate_data_frame.yml': [
'Test binary_soft_classifition precision',
'Test binary_soft_classifition recall',
'Test binary_soft_classifition confusion_matrix'
],
// The cleanup fails with a index not found when retrieving the jobs
'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'],
'ml/bucket_correlation_agg.yml': ['Test correlation bucket agg simple'],
// start should be a string
'ml/jobs_get_result_overall_buckets.yml': ['Test overall buckets given epoch start and end params'],
// this can't happen with the client
'ml/start_data_frame_analytics.yml': ['Test start with inconsistent body/param ids'],
'ml/stop_data_frame_analytics.yml': ['Test stop with inconsistent body/param ids'],
'ml/preview_datafeed.yml': ['*'],
// Investigate why is failing
'ml/inference_crud.yml': ['*'],
'ml/categorization_agg.yml': ['Test categorization aggregation with poor settings'],
'ml/filter_crud.yml': ['*'],
// investigate why this is failing
'monitoring/bulk/10_basic.yml': ['*'],
'monitoring/bulk/20_privileges.yml': ['*'],
'license/20_put_license.yml': ['*'],
'snapshot/10_basic.yml': ['*'],
'snapshot/20_operator_privileges_disabled.yml': ['*'],
// the body is correct, but the regex is failing
'sql/sql.yml': ['Getting textual representation'],
'searchable_snapshots/10_usage.yml': ['*'],
'service_accounts/10_basic.yml': ['*'],
// we are setting two certificates in the docker config
'ssl/10_basic.yml': ['*'],
'token/10_basic.yml': ['*'],
'token/11_invalidation.yml': ['*'],
// very likely, the index template has not been loaded yet.
// we should run a indices.existsTemplate, but the name of the
// template may vary during time.
'transforms_crud.yml': [
'Test basic transform crud',
'Test transform with query and array of indices in source',
'Test PUT continuous transform',
'Test PUT continuous transform without delay set'
],
'transforms_force_delete.yml': [
'Test force deleting a running transform'
],
'transforms_cat_apis.yml': ['*'],
'transforms_start_stop.yml': ['*'],
'transforms_stats.yml': ['*'],
'transforms_stats_continuous.yml': ['*'],
'transforms_update.yml': ['*'],
// js does not support ulongs
'unsigned_long/10_basic.yml': ['*'],
'unsigned_long/20_null_value.yml': ['*'],
'unsigned_long/30_multi_fields.yml': ['*'],
'unsigned_long/40_different_numeric.yml': ['*'],
'unsigned_long/50_script_values.yml': ['*'],
// the v8 client flattens the body into the parent object
'platinum/users/10_basic.yml': ['Test put user with different username in body'],
// docker issue?
'watcher/execute_watch/60_http_input.yml': ['*'],
// the checks are correct, but for some reason the test is failing on js side
// I bet is because the backslashes in the rg
'watcher/execute_watch/70_invalid.yml': ['*'],
'watcher/put_watch/10_basic.yml': ['*'],
'xpack/15_basic.yml': ['*'],
// test that are failing that needs to be investigated
// the error cause can either be in the yaml test or in the specification
// start should be a string in the yaml test
'platinum/ml/delete_job_force.yml': ['Test force delete an open job that is referred by a started datafeed'],
'platinum/ml/evaluate_data_frame.yml': ['*'],
'platinum/ml/get_datafeed_stats.yml': ['*'],
// start should be a string in the yaml test
'platinum/ml/start_stop_datafeed.yml': ['*'],
}
function runner (opts = {}) {
const options = { node: opts.node }
if (opts.isXPack) {
options.tls = {
ca: readFileSync(join(__dirname, '..', '..', '.buildkite', 'certs', 'ca.crt'), 'utf8'),
rejectUnauthorized: false
const getAllFiles = async dir => {
const files = await globby(dir, {
expandDirectories: {
extensions: ['yml', 'yaml']
}
}
const client = new Client(options)
log('Loading yaml suite')
start({ client, isXPack: opts.isXPack })
.catch(err => {
if (err.name === 'ResponseError') {
console.error(err)
console.log(JSON.stringify(err.meta, null, 2))
} else {
console.error(err)
}
process.exit(1)
})
})
return files.sort()
}
async function waitCluster (client, times = 0) {
try {
await client.cluster.health({ wait_for_status: 'green', timeout: '50s' })
} catch (err) {
if (++times < 10) {
await sleep(5000)
return waitCluster(client, times)
}
console.error(err)
process.exit(1)
}
}
async function start ({ client, isXPack }) {
log('Waiting for Elasticsearch')
await waitCluster(client)
const body = await client.info()
const { number: version, build_hash: hash } = body.version
log(`Downloading artifacts for hash ${hash}...`)
await downloadArtifacts({ hash, version })
log(`Testing ${isXPack ? 'Platinum' : 'Free'} api...`)
const junit = createJunitReporter()
const junitTestSuites = junit.testsuites(`Integration test for ${isXPack ? 'Platinum' : 'Free'} api`)
const stats = {
total: 0,
skip: 0,
pass: 0,
assertions: 0
}
const folders = getAllFiles(isXPack ? xPackYamlFolder : yamlFolder)
.filter(t => !/(README|TODO)/g.test(t))
// we cluster the array based on the folder names,
// to provide a better test log output
.reduce((arr, file) => {
const path = file.slice(file.indexOf('/rest-api-spec/test'), file.lastIndexOf('/'))
let inserted = false
for (let i = 0; i < arr.length; i++) {
if (arr[i][0].includes(path)) {
inserted = true
arr[i].push(file)
break
}
}
if (!inserted) arr.push([file])
return arr
}, [])
const totalTime = now()
for (const folder of folders) {
// pretty name
const apiName = folder[0].slice(
folder[0].indexOf(`${sep}rest-api-spec${sep}test`) + 19,
folder[0].lastIndexOf(sep)
)
log('Testing ' + apiName.slice(1))
const apiTime = now()
for (const file of folder) {
const testRunner = build({
client,
version,
isXPack: file.includes('platinum')
})
const fileTime = now()
const data = readFileSync(file, 'utf8')
// get the test yaml (as object), some file has multiple yaml documents inside,
// every document is separated by '---', so we split on the separator
// and then we remove the empty strings, finally we parse them
const tests = data
.split('\n---\n')
.map(s => s.trim())
// empty strings
.filter(Boolean)
.map(parse)
// null values
.filter(Boolean)
// get setup and teardown if present
let setupTest = null
let teardownTest = null
for (const test of tests) {
if (test.setup) setupTest = test.setup
if (test.teardown) teardownTest = test.teardown
}
const cleanPath = file.slice(file.lastIndexOf(apiName))
// skip if --suite CLI arg doesn't match
if (options.suite && !cleanPath.endsWith(options.suite)) continue
log(' ' + cleanPath)
const junitTestSuite = junitTestSuites.testsuite(apiName.slice(1) + ' - ' + cleanPath)
for (const test of tests) {
const testTime = now()
const name = Object.keys(test)[0]
// skip setups, teardowns and anything that doesn't match --test flag when present
if (name === 'setup' || name === 'teardown') continue
if (options.test && !name.endsWith(options.test)) continue
const junitTestCase = junitTestSuite.testcase(name, `node_${process.version}: ${cleanPath}`)
stats.total += 1
if (shouldSkip(isXPack, file, name)) {
stats.skip += 1
junitTestCase.skip('This test is in the skip list of the client')
junitTestCase.end()
continue
}
log(' - ' + name)
try {
await testRunner.run(setupTest, test[name], teardownTest, stats, junitTestCase)
stats.pass += 1
} catch (err) {
junitTestCase.failure(err)
junitTestCase.end()
junitTestSuite.end()
junitTestSuites.end()
generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free')
err.meta = JSON.stringify(err.meta ?? {}, null, 2)
console.error(err)
if (options.bail) {
process.exit(1)
} else {
continue
}
}
const totalTestTime = now() - testTime
junitTestCase.end()
if (totalTestTime > MAX_TEST_TIME) {
log(' took too long: ' + ms(totalTestTime))
} else {
log(' took: ' + ms(totalTestTime))
}
}
junitTestSuite.end()
const totalFileTime = now() - fileTime
if (totalFileTime > MAX_FILE_TIME) {
log(` ${cleanPath} took too long: ` + ms(totalFileTime))
} else {
log(` ${cleanPath} took: ` + ms(totalFileTime))
}
}
const totalApiTime = now() - apiTime
if (totalApiTime > MAX_API_TIME) {
log(`${apiName} took too long: ` + ms(totalApiTime))
} else {
log(`${apiName} took: ` + ms(totalApiTime))
}
}
junitTestSuites.end()
generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free')
log(`Total testing time: ${ms(now() - totalTime)}`)
log(`Test stats:
- Total: ${stats.total}
- Skip: ${stats.skip}
- Pass: ${stats.pass}
- Fail: ${stats.total - (stats.pass + stats.skip)}
- Assertions: ${stats.assertions}
`)
}
function log (text) {
process.stdout.write(text + '\n')
}
function now () {
const ts = process.hrtime()
return (ts[0] * 1e3) + (ts[1] / 1e6)
}
function parse (data) {
let doc
try {
doc = yaml.load(data, { schema: yaml.CORE_SCHEMA })
} catch (err) {
console.error(err)
return
}
return doc
}
function generateJunitXmlReport (junit, suite) {
writeFileSync(
join(__dirname, '..', '..', `${suite}-report-junit.xml`),
junit.prettyPrint()
)
async function doTestBuilder (version, clientOptions) {
await downloadArtifacts(undefined, version)
const files = await getAllFiles(yamlFolder)
await buildTests(files, clientOptions)
}
if (require.main === module) {
const scheme = process.env.TEST_SUITE === 'platinum' ? 'https' : 'http'
const node = process.env.TEST_ES_SERVER || `${scheme}://elastic:changeme@localhost:9200`
const opts = {
node,
isXPack: process.env.TEST_SUITE !== 'free'
const node = process.env.TEST_ES_SERVER
const apiKey = process.env.ES_API_SECRET_KEY
const password = process.env.ELASTIC_PASSWORD
let version = process.env.STACK_VERSION
assert(node != null, 'Environment variable missing: TEST_ES_SERVER')
assert(apiKey != null || password != null, 'Environment variable missing: ES_API_SECRET_KEY or ELASTIC_PASSWORD')
assert(version != null, 'Environment variable missing: STACK_VERSION')
version = semver.clean(version.includes('SNAPSHOT') ? version.split('-')[0] : version)
const clientOptions = { node }
if (apiKey != null) {
clientOptions.auth = { apiKey }
} else {
clientOptions.auth = { username: 'elastic', password }
}
runner(opts)
}
const shouldSkip = (isXPack, file, name) => {
if (options.suite || options.test) return false
let list = Object.keys(freeSkips)
for (let i = 0; i < list.length; i++) {
const freeTest = freeSkips[list[i]]
for (let j = 0; j < freeTest.length; j++) {
if (file.endsWith(list[i]) && (name === freeTest[j] || freeTest[j] === '*')) {
const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name
log(`Skipping test ${testName} because it is denylisted in the free test suite`)
return true
}
const nodeUrl = new url.URL(node)
if (nodeUrl.protocol === 'https:') {
clientOptions.tls = {
ca: fs.readFileSync(path.join(__dirname, '..', '..', '.buildkite', 'certs', 'ca.crt'), 'utf8'),
rejectUnauthorized: false
}
}
if (file.includes('x-pack') || isXPack) {
list = Object.keys(platinumDenyList)
for (let i = 0; i < list.length; i++) {
const platTest = platinumDenyList[list[i]]
for (let j = 0; j < platTest.length; j++) {
if (file.endsWith(list[i]) && (name === platTest[j] || platTest[j] === '*')) {
const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name
log(`Skipping test ${testName} because it is denylisted in the platinum test suite`)
return true
}
}
}
}
return false
doTestBuilder(version, clientOptions)
.then(() => process.exit(0))
.catch(err => {
console.error(err)
process.exit(1)
})
}
const getAllFiles = dir =>
readdirSync(dir).reduce((files, file) => {
const name = join(dir, file)
const isDirectory = statSync(name).isDirectory()
return isDirectory ? [...files, ...getAllFiles(name)] : [...files, name]
}, [])
module.exports = runner

View File

@ -1,110 +0,0 @@
'use strict'
const assert = require('node:assert')
const { create } = require('xmlbuilder2')
function createJunitReporter () {
const report = {}
return { testsuites, prettyPrint }
function prettyPrint () {
return create(report).end({ prettyPrint: true })
}
function testsuites (name) {
assert(name, 'The testsuites name is required')
assert(report.testsuites === undefined, 'Cannot set more than one testsuites block')
const startTime = Date.now()
report.testsuites = {
'@id': new Date().toISOString(),
'@name': name
}
const testsuiteList = []
return {
testsuite: createTestSuite(testsuiteList),
end () {
report.testsuites['@time'] = Math.round((Date.now() - startTime) / 1000)
report.testsuites['@tests'] = testsuiteList.reduce((acc, val) => {
acc += val['@tests']
return acc
}, 0)
report.testsuites['@failures'] = testsuiteList.reduce((acc, val) => {
acc += val['@failures']
return acc
}, 0)
report.testsuites['@skipped'] = testsuiteList.reduce((acc, val) => {
acc += val['@skipped']
return acc
}, 0)
if (testsuiteList.length) {
report.testsuites.testsuite = testsuiteList
}
}
}
}
function createTestSuite (testsuiteList) {
return function testsuite (name) {
assert(name, 'The testsuite name is required')
const startTime = Date.now()
const suite = {
'@id': new Date().toISOString(),
'@name': name
}
const testcaseList = []
testsuiteList.push(suite)
return {
testcase: createTestCase(testcaseList),
end () {
suite['@time'] = Math.round((Date.now() - startTime) / 1000)
suite['@tests'] = testcaseList.length
suite['@failures'] = testcaseList.filter(t => t.failure).length
suite['@skipped'] = testcaseList.filter(t => t.skipped).length
if (testcaseList.length) {
suite.testcase = testcaseList
}
}
}
}
}
function createTestCase (testcaseList) {
return function testcase (name, file) {
assert(name, 'The testcase name is required')
const startTime = Date.now()
const tcase = {
'@id': new Date().toISOString(),
'@name': name
}
if (file) tcase['@file'] = file
testcaseList.push(tcase)
return {
failure (error) {
assert(error, 'The failure error object is required')
tcase.failure = {
'#': error.stack,
'@message': error.message,
'@type': error.code
}
},
skip (reason) {
if (typeof reason !== 'string') {
reason = JSON.stringify(reason, null, 2)
}
tcase.skipped = {
'#': reason
}
},
end () {
tcase['@time'] = Math.round((Date.now() - startTime) / 1000)
}
}
}
}
}
module.exports = createJunitReporter

View File

@ -0,0 +1,482 @@
/*
* Copyright Elasticsearch B.V. and contributors
* SPDX-License-Identifier: Apache-2.0
*/
'use strict'
const { join, sep } = require('node:path')
const { readFileSync, writeFileSync, promises } = require('node:fs')
const yaml = require('js-yaml')
const { rimraf } = require('rimraf')
const { mkdir } = promises
const generatedTestsPath = join(__dirname, '..', '..', 'generated-tests')
const stackSkips = [
// test definition bug: response is empty string
'cat/fielddata.yml',
// test definition bug: response is empty string
'cluster/delete_voting_config_exclusions.yml',
// test definition bug: response is empty string
'cluster/voting_config_exclusions.yml',
// client bug: ILM request takes a "body" param, but "body" is a special keyword in the JS client
'ilm/10_basic.yml',
// health report is... not healthy
'health_report.yml',
// TODO: `contains` action only supports checking for primitives inside arrays or strings inside strings, not referenced values like objects inside arrays
'entsearch/10_basic.yml',
// test definition bug: error message does not match
'entsearch/30_sync_jobs_stack.yml',
// no handler found for uri [/knn_test/_knn_search]
'knn_search.yml',
// TODO: fix license on ES startup - "Operation failed: Current license is basic."
'license/10_stack.yml',
// response.body should be truthy. found: ""
'logstash/10_basic.yml',
// test definition bug? security_exception: unable to authenticate user [x_pack_rest_user] for REST request [/_ml/trained_models/test_model/definition/0]
'machine_learning/clear_tm_deployment_cache.yml',
// client bug: 0.99995 does not equal 0.5
'machine_learning/data_frame_evaluate.yml',
// test definition bug? regex has whitespace, maybe needs to be removed
'machine_learning/explain_data_frame_analytics.yml',
// client bug: 4 != 227
'machine_learning/preview_datafeed.yml',
// test definition bug: error message does not match
'machine_learning/revert_model_snapshot.yml',
// test definition bug: error message does not match
'machine_learning/update_model_snapshot.yml',
// version_conflict_engine_exception
'machine_learning/jobs_crud.yml',
// test definition bug: error message does not match
'machine_learning/model_snapshots.yml',
// test definition bug: error message does not match
'query_rules/30_test.yml',
// client bug: 0 != 0.1
'script/10_basic.yml',
// client bug: request takes a "body" param, but "body" is a special keyword in the JS client
'searchable_snapshots/10_basic.yml',
// test builder bug: does `match` action need to support "array contains value"?
'security/10_api_key_basic.yml',
// test definition bug: error message does not match
'security/140_user.yml',
// test definition bug: error message does not match
'security/30_privileges_stack.yml',
// unknown issue: $profile.enabled path doesn't exist in response
'security/130_user_profile.yml',
// test definition bug: error message does not match
'security/change_password.yml',
// test builder bug: media_type_header_exception
'simulate/ingest.yml',
// client bug: request takes a "body" param, but "body" is a special keyword in the JS client
'snapshot/10_basic.yml',
// test definition bug: illegal_argument_exception
'sql/10_basic.yml',
// test definition bug: illegal_argument_exception
'text_structure/10_basic.yml',
// test definition bug: illegal_argument_exception
'transform/10_basic.yml',
]
const serverlessSkips = [
// TODO: sql.getAsync does not set a content-type header but ES expects one
// transport only sets a content-type if the body is not empty
'sql/10_basic.yml',
// TODO: bulk call in setup fails due to "malformed action/metadata line"
// bulk body is being sent as a Buffer, unsure if related.
'transform/10_basic.yml',
// TODO: scripts_painless_execute expects {"result":"0.1"}, gets {"result":"0"}
// body sent as Buffer, unsure if related
'script/10_basic.yml',
// TODO: expects {"outlier_detection.auc_roc.value":0.99995}, gets {"outlier_detection.auc_roc.value":0.5}
// remove if/when https://github.com/elastic/elasticsearch-clients-tests/issues/37 is resolved
'machine_learning/data_frame_evaluate.yml',
// TODO: Cannot perform requested action because job [job-crud-test-apis] is not open
'machine_learning/jobs_crud.yml',
// TODO: test runner needs to support ignoring 410 errors
'enrich/10_basic.yml',
// TODO: parameter `enabled` is not allowed in source
// Same underlying problem as https://github.com/elastic/elasticsearch-clients-tests/issues/55
'cluster/component_templates.yml',
// TODO: expecting `ct_field` field mapping to be returned, but instead only finds `field`
'indices/simulate_template.yml',
'indices/simulate_index_template.yml',
// TODO: test currently times out
'inference/10_basic.yml',
// TODO: Fix: "Trained model deployment [test_model] is not allocated to any nodes"
'machine_learning/20_trained_model_serverless.yml',
// TODO: query_rules api not available yet
'query_rules/10_query_rules.yml',
'query_rules/20_rulesets.yml',
'query_rules/30_test.yml',
// TODO: security.putRole API not available
'security/50_roles_serverless.yml',
// TODO: expected undefined to equal 'some_table'
'entsearch/50_connector_updates.yml',
// TODO: resource_not_found_exception
'tasks_serverless.yml',
]
function parse (data) {
let doc
try {
doc = yaml.load(data, { schema: yaml.CORE_SCHEMA })
} catch (err) {
console.error(err)
return
}
return doc
}
async function build (yamlFiles, clientOptions) {
await rimraf(generatedTestsPath)
await mkdir(generatedTestsPath, { recursive: true })
for (const file of yamlFiles) {
const apiName = file.split(`${sep}tests${sep}`)[1]
const data = readFileSync(file, 'utf8')
const tests = data
.split('\n---\n')
.map(s => s.trim())
// empty strings
.filter(Boolean)
.map(parse)
// null values
.filter(Boolean)
let code = "import { test } from 'tap'\n"
code += "import { Client } from '@elastic/elasticsearch'\n\n"
const requires = tests.find(test => test.requires != null)
let skip = new Set()
if (requires != null) {
const { serverless = true, stack = true } = requires.requires
if (!serverless) skip.add('process.env.TEST_ES_SERVERLESS === "1"')
if (!stack) skip.add('process.env.TEST_ES_STACK === "1"')
}
if (stackSkips.includes(apiName)) skip.add('process.env.TEST_ES_STACK === "1"')
if (serverlessSkips.includes(apiName)) skip.add('process.env.TEST_ES_SERVERLESS === "1"')
if (skip.size > 0) {
code += `test('${apiName}', { skip: ${Array.from(skip).join(' || ')} }, t => {\n`
} else {
code += `test('${apiName}', t => {\n`
}
for (const test of tests) {
if (test.setup != null) {
code += ' t.before(async () => {\n'
code += indent(buildActions(test.setup), 4)
code += ' })\n\n'
}
if (test.teardown != null) {
code += ' t.after(async () => {\n'
code += indent(buildActions(test.teardown), 4)
code += ' })\n\n'
}
for (const key of Object.keys(test).filter(k => !['setup', 'teardown', 'requires'].includes(k))) {
if (test[key].find(action => Object.keys(action)[0] === 'skip') != null) {
code += ` t.test('${key}', { skip: true }, async t => {\n`
} else {
code += ` t.test('${key}', async t => {\n`
}
code += indent(buildActions(test[key]), 4)
code += '\n t.end()\n'
code += ' })\n'
}
// if (test.requires != null) requires = test.requires
}
code += '\n t.end()\n'
code += '})\n'
const testDir = join(generatedTestsPath, apiName.split(sep).slice(0, -1).join(sep))
const testFile = join(testDir, apiName.split(sep).pop().replace(/\.ya?ml$/, '.mjs'))
await mkdir(testDir, { recursive: true })
writeFileSync(testFile, code, 'utf8')
}
function buildActions (actions) {
let code = `const client = new Client(${JSON.stringify(clientOptions, null, 2)})\n`
code += 'let response\n\n'
const vars = new Set()
for (const action of actions) {
const key = Object.keys(action)[0]
switch (key) {
case 'do':
code += buildDo(action.do)
break
case 'set':
const setResult = buildSet(action.set, vars)
vars.add(setResult.varName)
code += setResult.code
break
case 'transform_and_set':
code += buildTransformAndSet(action.transform_and_set)
break
case 'match':
code += buildMatch(action.match)
break
case 'lt':
code += buildLt(action.lt)
break
case 'lte':
code += buildLte(action.lte)
break
case 'gt':
code += buildGt(action.gt)
break
case 'gte':
code += buildGte(action.gte)
break
case 'length':
code += buildLength(action.length)
break
case 'is_true':
code += buildIsTrue(action.is_true)
break
case 'is_false':
code += buildIsFalse(action.is_false)
break
case 'contains':
code += buildContains(action.contains)
break
case 'exists':
code += buildExists(action.exists)
break
case 'skip':
break
default:
console.warn(`Action not supported: ${key}`)
break
}
}
return code
}
}
function buildDo (action) {
let code = ''
const keys = Object.keys(action)
if (keys.includes('catch')) {
code += 'try {\n'
code += indent(buildRequest(action), 2)
code += '} catch (err) {\n'
code += ` t.match(err.toString(), ${buildValLiteral(action.catch)})\n`
code += '}\n'
} else {
code += buildRequest(action)
}
return code
}
function buildRequest(action) {
let code = ''
const options = { meta: true }
for (const key of Object.keys(action)) {
if (key === 'catch') continue
if (key === 'headers') {
options.headers = action.headers
continue
}
const params = action[key]
if (params.ignore != null) {
if (Array.isArray(params.ignore)) {
options.ignore = params.ignore
} else {
options.ignore = [params.ignore]
}
}
code += `response = await client.${toCamelCase(key)}(${buildApiParams(action[key])}, ${JSON.stringify(options)})\n`
}
return code
}
function buildSet (action, vars) {
const key = Object.keys(action)[0]
const varName = action[key]
const lookup = buildLookup(key)
let code = ''
if (vars.has(varName)) {
code = `${varName} = ${lookup}\n`
} else {
code =`let ${varName} = ${lookup}\n`
}
return { code, varName }
}
function buildTransformAndSet (action) {
return `// TODO buildTransformAndSet: ${JSON.stringify(action)}\n`
}
function buildMatch (action) {
const key = Object.keys(action)[0]
let lookup = buildLookup(key)
const val = buildValLiteral(action[key])
return `t.match(${lookup}, ${val})\n`
}
function buildLt (action) {
const key = Object.keys(action)[0]
const lookup = buildLookup(key)
const val = buildValLiteral(action[key])
return `t.ok(${lookup} < ${val})\n`
}
function buildLte (action) {
const key = Object.keys(action)[0]
const lookup = buildLookup(key)
const val = buildValLiteral(action[key])
return `t.ok(${lookup} <= ${val})\n`
}
function buildGt (action) {
const key = Object.keys(action)[0]
const lookup = buildLookup(key)
const val = buildValLiteral(action[key])
return `t.ok(${lookup} > ${val})\n`
}
function buildGte (action) {
const key = Object.keys(action)[0]
const lookup = buildLookup(key)
const val = buildValLiteral(action[key])
return `t.ok(${lookup} >= ${val})\n`
}
function buildLength (action) {
const key = Object.keys(action)[0]
const lookup = buildLookup(key)
const val = buildValLiteral(action[key])
let code = ''
code += `if (typeof ${lookup} === 'object' && !Array.isArray(${lookup})) {\n`
code += ` t.equal(Object.keys(${lookup}).length, ${val})\n`
code += `} else {\n`
code += ` t.equal(${lookup}.length, ${val})\n`
code += `}\n`
return code
}
function buildIsTrue (action) {
let lookup = `${buildLookup(action)}`
let errMessage = `\`${action} should be truthy. found: '\$\{JSON.stringify(${lookup})\}'\``
if (lookup.includes('JSON.stringify')) errMessage = `\`${action} should be truthy. found: '\$\{${lookup}\}'\``
return `t.ok(${lookup} === "true" || (Boolean(${lookup}) && ${lookup} !== "false"), ${errMessage})\n`
}
function buildIsFalse (action) {
let lookup = `${buildLookup(action)}`
let errMessage = `\`${action} should be falsy. found: '\$\{JSON.stringify(${lookup})\}'\``
if (lookup.includes('JSON.stringify')) errMessage = `\`${action} should be falsy. found: '\$\{${lookup}\}'\``
return `t.ok(${lookup} === "false" || !Boolean(${lookup}), ${errMessage})\n`
}
function buildContains (action) {
const key = Object.keys(action)[0]
const lookup = buildLookup(key)
const val = buildValLiteral(action[key])
return `t.ok(${lookup}.includes(${val}), '${JSON.stringify(val)} not found in ${key}')\n`
}
function buildExists (keyName) {
const lookup = buildLookup(keyName)
return `t.ok(${lookup} != null, \`Key "${keyName}" not found in response body: \$\{JSON.stringify(response.body, null, 2)\}\`)\n`
}
function buildApiParams (params) {
if (Object.keys(params).length === 0) {
return 'undefined'
} else {
const out = {}
Object.keys(params).filter(k => k !== 'ignore' && k !== 'headers').forEach(k => out[k] = params[k])
return buildValLiteral(out)
}
}
function toCamelCase (name) {
return name.replace(/_([a-z])/g, g => g[1].toUpperCase())
}
function indent (str, spaces) {
const tabs = ' '.repeat(spaces)
return str.replace(/\s+$/, '').split('\n').map(l => `${tabs}${l}`).join('\n') + '\n'
}
function buildLookup (path) {
if (path === '$body') return '(typeof response.body === "string" ? response.body : JSON.stringify(response.body))'
const outPath = path.split('.').map(step => {
if (parseInt(step, 10).toString() === step) {
return `[${step}]`
} else if (step.match(/^\$[a-zA-Z0-9_]+$/)) {
const lookup = step.replace(/^\$/, '')
if (lookup === 'body') return ''
return `[${lookup}]`
} else if (step === '') {
return ''
} else {
return `['${step}']`
}
}).join('')
return `response.body${outPath}`
}
function buildValLiteral (val) {
if (typeof val === 'string') val = val.trim()
if (isRegExp(val)) {
return JSON.stringify(val).replace(/^"/, '').replace(/"$/, '').replaceAll('\\\\', '\\')
} else if (isVariable(val)) {
if (val === '$body') return 'JSON.stringify(response.body)'
return val.replace(/^\$/, '')
} else if (isPlainObject(val)) {
return JSON.stringify(cleanObject(val), null, 2).replace(/"\$([a-zA-Z0-9_]+)"/g, '$1')
} else {
return JSON.stringify(val)
}
}
function isRegExp (str) {
return typeof str === 'string' && str.startsWith('/') && str.endsWith('/')
}
function isVariable (str) {
return typeof str === 'string' && str.match(/^\$[a-zA-Z0-9_]+$/) != null
}
function cleanObject (obj) {
Object.keys(obj).forEach(key => {
let val = obj[key]
if (typeof val === 'string' && val.trim().startsWith('{') && val.trim().endsWith('}')) {
// attempt to parse as object
try {
val = JSON.parse(val)
} catch {
}
} else if (isPlainObject(val)) {
val = cleanObject(val)
} else if (Array.isArray(val)) {
val = val.map(item => isPlainObject(item) ? cleanObject(item) : item)
}
obj[key] = val
})
return obj
}
function isPlainObject(obj) {
return typeof obj === 'object' && !Array.isArray(obj) && obj != null
}
module.exports = build

File diff suppressed because it is too large Load Diff

View File

@ -77,6 +77,31 @@ test('Missing node(s)', t => {
t.end()
})
test('multi nodes with roles, using default node filter', async t => {
const client = new Client({
nodes: [
{
url: new URL('http://node1:9200'),
roles: { master: true, data: false, ingest: false, ml: false }
},
{
url: new URL('http://node2:9200'),
roles: { master: true, data: true, ingest: false, ml: false }
},
]
})
const conn = client.connectionPool.getConnection({
now: Date.now() + 1000 * 60 * 3,
requestId: 1,
name: 'elasticsearch-js',
context: null
})
t.equal(conn?.url.hostname, 'node2')
t.end()
})
test('Custom headers', t => {
const client = new Client({
node: 'http://localhost:9200',

View File

@ -172,17 +172,28 @@ test('ES|QL helper', t => {
t.end()
})
test('toArrowReader', t => {
t.test('Parses a binary response into an Arrow stream reader', async t => {
const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA='
test('toArrowReader', async t => {
const testRecords = [
{ amount: 4.900000095367432, },
{ amount: 8.199999809265137, },
{ amount: 15.5, },
{ amount: 9.899999618530273, },
{ amount: 13.899999618530273, },
]
// build reusable Arrow table
const table = arrow.tableFromJSON(testRecords)
const rawData = await arrow.RecordBatchStreamWriter.writeAll(table).toUint8Array()
t.test('Parses a binary response into an Arrow stream reader', async t => {
const MockConnection = connection.buildMockConnection({
onRequest (_params) {
return {
body: Buffer.from(binaryContent, 'base64'),
body: Buffer.from(rawData),
statusCode: 200,
headers: {
'content-type': 'application/vnd.elasticsearch+arrow+stream'
'content-type': 'application/vnd.elasticsearch+arrow+stream',
'transfer-encoding': 'chunked'
}
}
}
@ -196,26 +207,28 @@ test('ES|QL helper', t => {
const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader()
t.ok(result.isStream())
const recordBatch = result.next().value
t.same(recordBatch.get(0)?.toJSON(), {
amount: 4.900000095367432,
date: 1729532586965,
})
let count = 0
for await (const recordBatch of result) {
for (const record of recordBatch) {
t.same(record.toJSON(), testRecords[count])
count++
}
}
t.end()
})
t.test('ESQL helper uses correct x-elastic-client-meta helper value', async t => {
const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA='
const MockConnection = connection.buildMockConnection({
onRequest (params) {
const header = params.headers?.['x-elastic-client-meta'] ?? ''
t.ok(header.includes('h=qa'), `Client meta header does not include ESQL helper value: ${header}`)
return {
body: Buffer.from(binaryContent, 'base64'),
body: Buffer.from(rawData),
statusCode: 200,
headers: {
'content-type': 'application/vnd.elasticsearch+arrow+stream'
'content-type': 'application/vnd.elasticsearch+arrow+stream',
'transfer-encoding': 'chunked'
}
}
}
@ -254,10 +267,12 @@ test('ES|QL helper', t => {
new arrow.RecordBatch(schema, batch3.data),
])
const rawData = await arrow.RecordBatchStreamWriter.writeAll(table).toUint8Array()
const MockConnection = connection.buildMockConnection({
onRequest (_params) {
return {
body: Buffer.from(arrow.tableToIPC(table, "stream")),
body: Buffer.from(rawData),
statusCode: 200,
headers: {
'content-type': 'application/vnd.elasticsearch+arrow+stream'
@ -275,7 +290,7 @@ test('ES|QL helper', t => {
t.ok(result.isStream())
let counter = 0
for (const batch of result) {
for await (const batch of result) {
for (const row of batch) {
counter++
const { id, val } = row.toJSON()