Auto-generated API code (#2885)

This commit is contained in:
Elastic Machine
2025-06-30 18:46:05 +02:00
committed by GitHub
parent d6a4aebb87
commit 384898ac5d
10 changed files with 299 additions and 68 deletions

View File

@ -1577,54 +1577,6 @@ Internally, Elasticsearch translates a vector tile search API request into a sea
* Optionally, a `geo_bounds` aggregation on the `<field>`. The search only includes this aggregation if the `exact_bounds` parameter is `true`.
* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.
For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search
```
GET my-index/_search
{
"size": 10000,
"query": {
"geo_bounding_box": {
"my-geo-field": {
"top_left": {
"lat": -40.979898069620134,
"lon": -45
},
"bottom_right": {
"lat": -66.51326044311186,
"lon": 0
}
}
}
},
"aggregations": {
"grid": {
"geotile_grid": {
"field": "my-geo-field",
"precision": 11,
"size": 65536,
"bounds": {
"top_left": {
"lat": -40.979898069620134,
"lon": -45
},
"bottom_right": {
"lat": -66.51326044311186,
"lon": 0
}
}
}
},
"bounds": {
"geo_bounds": {
"field": "my-geo-field",
"wrap_longitude": false
}
}
}
}
```
The API returns results as a binary Mapbox vector tile.
Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:
@ -1700,6 +1652,8 @@ Some cells may intersect more than one vector tile.
To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.
Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density.
Learn how to use the vector tile search API with practical examples in the [Vector tile search examples](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search) guide.
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt)
```ts
@ -4701,6 +4655,7 @@ A query ID is provided in the ES|QL async query API response for a query that do
A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`.
- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results.
If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns.
- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, for example `json` or `yaml`.
- **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster.
When this period expires, the query and its results are deleted, even if the query is still ongoing.
- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the request to finish.
@ -6704,7 +6659,7 @@ a new date field is added instead of string.
not used at all by Elasticsearch, but can be used to store
application-specific metadata.
- **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields.
- **`properties` (Optional, Record<string, { type } \| { boost, fielddata, index, null_value, ignore_malformed, script, on_script_error, time_series_dimension, type } \| { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } \| { relations, eager_global_ordinals, type } \| { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, similarity, split_queries_on_whitespace, time_series_dimension, type } \| { type, fields, meta, copy_to } \| { type } \| { positive_score_impact, type } \| { positive_score_impact, type } \| { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, similarity, term_vector, type } \| { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, similarity, term_vector, type } \| { type } \| { type, null_value } \| { boost, format, ignore_malformed, index, script, on_script_error, null_value, precision_step, type } \| { boost, fielddata, format, ignore_malformed, index, script, on_script_error, null_value, precision_step, locale, type } \| { type, default_metric, metrics, time_series_metric } \| { type, dims, element_type, index, index_options, similarity } \| { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } \| { enabled, include_in_parent, include_in_root, type } \| { enabled, subobjects, type } \| { type, enabled, priority, time_series_dimension } \| { type, meta, inference_id, search_inference_id, chunking_settings } \| { type } \| { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } \| { value, type } \| { type, index } \| { path, type } \| { ignore_malformed, type } \| { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } \| { type } \| { analyzer, boost, index, null_value, enable_position_increments, type } \| { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } \| { coerce, ignore_malformed, ignore_z_value, index, orientation, strategy, type } \| { ignore_malformed, ignore_z_value, null_value, type } \| { coerce, ignore_malformed, ignore_z_value, orientation, type } \| { type, null_value } \| { type, null_value } \| { type, null_value } \| { type, null_value } \| { type, null_value } \| { type, null_value } \| { type, null_value, scaling_factor } \| { type, null_value } \| { type, null_value } \| { format, type } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)**: Mapping for a field. For new fields, this mapping can include:
- **`properties` (Optional, Record<string, { type } \| { boost, fielddata, index, null_value, ignore_malformed, script, on_script_error, time_series_dimension, type } \| { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } \| { relations, eager_global_ordinals, type } \| { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, similarity, split_queries_on_whitespace, time_series_dimension, type } \| { type, fields, meta, copy_to } \| { type } \| { positive_score_impact, type } \| { positive_score_impact, type } \| { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, similarity, term_vector, type } \| { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, similarity, term_vector, type } \| { type } \| { type, null_value } \| { boost, format, ignore_malformed, index, script, on_script_error, null_value, precision_step, type } \| { boost, fielddata, format, ignore_malformed, index, script, on_script_error, null_value, precision_step, locale, type } \| { type, default_metric, metrics, time_series_metric } \| { type, dims, element_type, index, index_options, similarity } \| { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } \| { enabled, include_in_parent, include_in_root, type } \| { enabled, subobjects, type } \| { type, enabled, priority, time_series_dimension } \| { type, meta, inference_id, search_inference_id, index_options, chunking_settings } \| { type } \| { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } \| { value, type } \| { type, index } \| { path, type } \| { ignore_malformed, type } \| { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } \| { type } \| { analyzer, boost, index, null_value, enable_position_increments, type } \| { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } \| { coerce, ignore_malformed, ignore_z_value, index, orientation, strategy, type } \| { ignore_malformed, ignore_z_value, null_value, type } \| { coerce, ignore_malformed, ignore_z_value, orientation, type } \| { type, null_value } \| { type, null_value } \| { type, null_value } \| { type, null_value } \| { type, null_value } \| { type, null_value } \| { type, null_value, scaling_factor } \| { type, null_value } \| { type, null_value } \| { format, type } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)**: Mapping for a field. For new fields, this mapping can include:
- Field name
- Field data type
@ -6972,6 +6927,16 @@ client.indices.reloadSearchAnalyzers({ index })
- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed)
- **`resource` (Optional, string)**: Changed resource to reload analyzers from if applicable
## client.indices.removeBlock [_indices.remove_block]
Removes a block from an index.
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html)
```ts
client.indices.removeBlock()
```
## client.indices.resolveCluster [_indices.resolve_cluster]
Resolve the cluster.
@ -12662,6 +12627,7 @@ You can optionally filter the results with a query.
To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges.
If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.
If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.
Refer to the linked documentation for examples of how to find API keys:
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys)
@ -13042,6 +13008,8 @@ The owner user's information, such as the `username` and `realm`, is also update
NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.
To learn more about how to use this API, refer to the [Update cross cluter API key API examples page](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples).
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key)
```ts
@ -14007,6 +13975,12 @@ If you omit the `<snapshot>` request path parameter, the request retrieves infor
This usage is preferred.
If needed, you can specify `<repository>` and `<snapshot>` to retrieve information for specific snapshots, even if they're not currently running.
Note that the stats will not be available for any shard snapshots in an ongoing snapshot completed by a node that (even momentarily) left the cluster.
Loading the stats from the repository is an expensive operation (see the WARNING below).
Therefore the stats values for such shards will be -1 even though the "stage" value will be "DONE", in order to minimize latency.
A "description" field will be present for a shard snapshot completed by a departed node explaining why the shard snapshot's stats results are invalid.
Consequently, the total stats for the index will be less than expected due to the missing values from these shards.
WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive.
The API requires a read from the repository for each shard in each snapshot.
For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).
@ -15310,6 +15284,7 @@ When Elasticsearch security features are enabled on your cluster, watches are ru
If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch.
When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.
Refer to the external documentation for examples of watch execution requests, including existing, customized, and inline watches.
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch)

View File

@ -71,6 +71,7 @@ export default class Esql {
body: [],
query: [
'drop_null_columns',
'format',
'keep_alive',
'wait_for_completion_timeout'
]

View File

@ -733,6 +733,14 @@ export default class Indices {
'resource'
]
},
'indices.remove_block': {
path: [
'index',
'block'
],
body: [],
query: []
},
'indices.resolve_cluster': {
path: [
'name'
@ -3720,6 +3728,52 @@ export default class Indices {
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Removes a block from an index.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html | Elasticsearch API documentation}
*/
async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath
} = this.acceptedParams['indices.remove_block']
const userQuery = params?.querystring
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
let body: Record<string, any> | string | undefined
const userBody = params?.body
if (userBody != null) {
if (typeof userBody === 'string') {
body = userBody
} else {
body = { ...userBody }
}
}
params = params ?? {}
for (const key in params) {
if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
querystring[key] = params[key]
}
}
const method = 'DELETE'
const path = `/${encodeURIComponent(params.index.toString())}/_block/${encodeURIComponent(params.block.toString())}`
const meta: TransportRequestMetadata = {
name: 'indices.remove_block',
pathParts: {
index: params.index,
block: params.block
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. ## Note on backwards compatibility The ability to query without an index expression was added in version 8.18, so when querying remote clusters older than that, the local cluster will send the index expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference to that index expression even though you didn't request it. If it causes a problem, you can instead include an index expression like `*:*` to bypass the issue. ## Advantages of using this endpoint before a cross-cluster search You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. ## Test availability of remote clusters The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. The remote cluster may be available, while the local cluster is not currently connected to it. You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. The `connected` field in the response will indicate whether it was successful. If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status.
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster | Elasticsearch API documentation}

File diff suppressed because one or more lines are too long

View File

@ -3180,7 +3180,7 @@ export default class Security {
}
/**
* Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.
* Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. Refer to the linked documentation for examples of how to find API keys:
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys | Elasticsearch API documentation}
*/
async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SecurityQueryApiKeysResponse>
@ -3773,7 +3773,7 @@ export default class Security {
}
/**
* Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required. It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.
* Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required. It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. To learn more about how to use this API, refer to the [Update cross cluter API key API examples page](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples).
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key | Elasticsearch API documentation}
*/
async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SecurityUpdateCrossClusterApiKeyResponse>

View File

@ -773,7 +773,7 @@ export default class Snapshot {
}
/**
* Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. If you omit the `<snapshot>` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `<repository>` and `<snapshot>` to retrieve information for specific snapshots, even if they're not currently running. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs.
* Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. If you omit the `<snapshot>` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `<repository>` and `<snapshot>` to retrieve information for specific snapshots, even if they're not currently running. Note that the stats will not be available for any shard snapshots in an ongoing snapshot completed by a node that (even momentarily) left the cluster. Loading the stats from the repository is an expensive operation (see the WARNING below). Therefore the stats values for such shards will be -1 even though the "stage" value will be "DONE", in order to minimize latency. A "description" field will be present for a shard snapshot completed by a departed node explaining why the shard snapshot's stats results are invalid. Consequently, the total stats for the index will be less than expected due to the missing values from these shards. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs.
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status | Elasticsearch API documentation}
*/
async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SnapshotStatusResponse>

179
src/api/api/streams.ts Normal file
View File

@ -0,0 +1,179 @@
/*
* Copyright Elasticsearch B.V. and contributors
* SPDX-License-Identifier: Apache-2.0
*/
/* eslint-disable import/export */
/* eslint-disable @typescript-eslint/no-misused-new */
/* eslint-disable @typescript-eslint/no-extraneous-class */
/* eslint-disable @typescript-eslint/no-unused-vars */
// This file was automatically generated by elastic/elastic-client-generator-js
// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file,
// and elastic/elastic-client-generator-js to regenerate this file again.
import {
Transport,
TransportRequestMetadata,
TransportRequestOptions,
TransportRequestOptionsWithMeta,
TransportRequestOptionsWithOutMeta,
TransportResult
} from '@elastic/transport'
import * as T from '../types'
interface That {
transport: Transport
acceptedParams: Record<string, { path: string[], body: string[], query: string[] }>
}
export default class Streams {
transport: Transport
acceptedParams: Record<string, { path: string[], body: string[], query: string[] }>
constructor (transport: Transport) {
this.transport = transport
this.acceptedParams = {
'streams.logs_disable': {
path: [],
body: [],
query: []
},
'streams.logs_enable': {
path: [],
body: [],
query: []
},
'streams.status': {
path: [],
body: [],
query: []
}
}
}
/**
* Disable the Logs Streams feature for this cluster
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-logs-disable.html | Elasticsearch API documentation}
*/
async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath
} = this.acceptedParams['streams.logs_disable']
const userQuery = params?.querystring
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
let body: Record<string, any> | string | undefined
const userBody = params?.body
if (userBody != null) {
if (typeof userBody === 'string') {
body = userBody
} else {
body = { ...userBody }
}
}
params = params ?? {}
for (const key in params) {
if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
querystring[key] = params[key]
}
}
const method = 'POST'
const path = '/_streams/logs/_disable'
const meta: TransportRequestMetadata = {
name: 'streams.logs_disable'
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Enable the Logs Streams feature for this cluster
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-logs-enable.html | Elasticsearch API documentation}
*/
async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath
} = this.acceptedParams['streams.logs_enable']
const userQuery = params?.querystring
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
let body: Record<string, any> | string | undefined
const userBody = params?.body
if (userBody != null) {
if (typeof userBody === 'string') {
body = userBody
} else {
body = { ...userBody }
}
}
params = params ?? {}
for (const key in params) {
if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
querystring[key] = params[key]
}
}
const method = 'POST'
const path = '/_streams/logs/_enable'
const meta: TransportRequestMetadata = {
name: 'streams.logs_enable'
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Return the current status of the streams feature for each streams type
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-status.html | Elasticsearch API documentation}
*/
async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath
} = this.acceptedParams['streams.status']
const userQuery = params?.querystring
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
let body: Record<string, any> | string | undefined
const userBody = params?.body
if (userBody != null) {
if (typeof userBody === 'string') {
body = userBody
} else {
body = { ...userBody }
}
}
params = params ?? {}
for (const key in params) {
if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
querystring[key] = params[key]
}
}
const method = 'GET'
const path = '/_streams/status'
const meta: TransportRequestMetadata = {
name: 'streams.status'
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
}

View File

@ -354,7 +354,7 @@ export default class Watcher {
}
/**
* Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.
* Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. Refer to the external documentation for examples of watch execution requests, including existing, customized, and inline watches.
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch | Elasticsearch API documentation}
*/
async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.WatcherExecuteWatchResponse>

View File

@ -87,6 +87,7 @@ import SlmApi from './api/slm'
import SnapshotApi from './api/snapshot'
import SqlApi from './api/sql'
import SslApi from './api/ssl'
import StreamsApi from './api/streams'
import SynonymsApi from './api/synonyms'
import TasksApi from './api/tasks'
import termsEnumApi from './api/terms_enum'
@ -176,6 +177,7 @@ export default interface API {
snapshot: SnapshotApi
sql: SqlApi
ssl: SslApi
streams: StreamsApi
synonyms: SynonymsApi
tasks: TasksApi
termsEnum: typeof termsEnumApi
@ -224,6 +226,7 @@ const kSlm = Symbol('Slm')
const kSnapshot = Symbol('Snapshot')
const kSql = Symbol('Sql')
const kSsl = Symbol('Ssl')
const kStreams = Symbol('Streams')
const kSynonyms = Symbol('Synonyms')
const kTasks = Symbol('Tasks')
const kTextStructure = Symbol('TextStructure')
@ -267,6 +270,7 @@ export default class API {
[kSnapshot]: symbol | null
[kSql]: symbol | null
[kSsl]: symbol | null
[kStreams]: symbol | null
[kSynonyms]: symbol | null
[kTasks]: symbol | null
[kTextStructure]: symbol | null
@ -309,6 +313,7 @@ export default class API {
this[kSnapshot] = null
this[kSql] = null
this[kSsl] = null
this[kStreams] = null
this[kSynonyms] = null
this[kTasks] = null
this[kTextStructure] = null
@ -470,6 +475,9 @@ Object.defineProperties(API.prototype, {
ssl: {
get () { return this[kSsl] === null ? (this[kSsl] = new SslApi(this.transport)) : this[kSsl] }
},
streams: {
get () { return this[kStreams] === null ? (this[kStreams] = new StreamsApi(this.transport)) : this[kStreams] }
},
synonyms: {
get () { return this[kSynonyms] === null ? (this[kSynonyms] = new SynonymsApi(this.transport)) : this[kSynonyms] }
},

View File

@ -4149,7 +4149,7 @@ export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED'
export interface LinearRetriever extends RetrieverBase {
/** Inner retrievers. */
retrievers?: InnerRetriever[]
rank_window_size: integer
rank_window_size?: integer
}
export type MapboxVectorTiles = ArrayBuffer
@ -4253,7 +4253,7 @@ export interface PinnedRetriever extends RetrieverBase {
retriever: RetrieverContainer
ids?: string[]
docs?: SpecifiedDocument[]
rank_window_size: integer
rank_window_size?: integer
}
export type PipelineName = string
@ -4678,9 +4678,9 @@ export interface TextSimilarityReranker extends RetrieverBase {
/** Unique identifier of the inference endpoint created using the inference API. */
inference_id?: string
/** The text snippet used as the basis for similarity comparison */
inference_text?: string
inference_text: string
/** The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text */
field?: string
field: string
}
export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem'
@ -8377,6 +8377,10 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase
type: 'search_as_you_type'
}
export interface MappingSemanticTextIndexOptions {
dense_vector?: MappingDenseVectorIndexOptions
}
export interface MappingSemanticTextProperty {
type: 'semantic_text'
meta?: Record<string, string>
@ -8388,6 +8392,9 @@ export interface MappingSemanticTextProperty {
* You can update this parameter by using the Update mapping API. Use the Create inference API to create the endpoint.
* If not specified, the inference endpoint defined by inference_id will be used at both index and query time. */
search_inference_id?: Id
/** Settings for index_options that override any defaults used by semantic_text, for example
* specific quantization settings. */
index_options?: MappingSemanticTextIndexOptions
/** Settings for chunking text into smaller passages. If specified, these will override the
* chunking settings sent in the inference endpoint associated with inference_id. If chunking settings are updated,
* they will not be applied to existing documents until they are reindexed. */
@ -9729,14 +9736,12 @@ export interface QueryDslSparseVectorQuery extends QueryDslQueryBase {
query?: string
/** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance.
* If prune is true but the pruning_config is not specified, pruning will occur but default values will be used.
* Default: false
* @experimental */
* Default: false */
prune?: boolean
/** Optional pruning configuration.
* If enabled, this will omit non-significant tokens from the query in order to improve query performance.
* This is only used if prune is set to true.
* If prune is set to true but pruning_config is not specified, default values will be used.
* @experimental */
* If prune is set to true but pruning_config is not specified, default values will be used. */
pruning_config?: QueryDslTokenPruningConfig
}
@ -17837,6 +17842,8 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase {
/** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results.
* If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */
drop_null_columns?: boolean
/** A short version of the Accept header, for example `json` or `yaml`. */
format?: EsqlEsqlFormat
/** The period for which the query and its results are stored in the cluster.
* When this period expires, the query and its results are deleted, even if the query is still ongoing. */
keep_alive?: Duration
@ -17846,9 +17853,9 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase {
* Otherwise, the response returns an `is_running` value of `true` and no results. */
wait_for_completion_timeout?: Duration
/** All values in `body` will be added to the request body. */
body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never }
body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never }
/** All values in `querystring` will be added to the request querystring. */
querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never }
querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never }
}
export type EsqlAsyncQueryGetResponse = EsqlAsyncEsqlResult
@ -20190,10 +20197,6 @@ export interface IndicesGetRequest extends RequestBase {
export type IndicesGetResponse = Record<IndexName, IndicesIndexState>
export interface IndicesGetAliasIndexAliases {
aliases: Record<string, IndicesAliasDefinition>
}
export interface IndicesGetAliasRequest extends RequestBase {
/** Comma-separated list of aliases to retrieve.
* Supports wildcards (`*`).
@ -20223,6 +20226,17 @@ export interface IndicesGetAliasRequest extends RequestBase {
export type IndicesGetAliasResponse = Record<IndexName, IndicesGetAliasIndexAliases>
export interface IndicesGetAliasIndexAliases {
aliases: Record<string, IndicesAliasDefinition>
}
export interface IndicesGetAliasNotFoundAliasesKeys {
error: string
status: number
}
export type IndicesGetAliasNotFoundAliases = IndicesGetAliasNotFoundAliasesKeys
& { [property: string]: IndicesGetAliasIndexAliases | string | number }
export interface IndicesGetDataLifecycleDataStreamWithLifecycle {
name: DataStreamName
lifecycle?: IndicesDataStreamLifecycleWithRollover