Auto-generated API code (#2865)

This commit is contained in:
Elastic Machine
2025-06-09 17:29:01 +02:00
committed by GitHub
parent c485567c51
commit e5c10d80e2
4 changed files with 656 additions and 92 deletions

View File

@ -1651,7 +1651,7 @@ client.search({ ... })
- **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution.
- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL.
- **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases.
- **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`.
- **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule, rescorer, linear, pinned })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`.
- **`script_fields` (Optional, Record<string, { script, ignore_failure }>)**: Retrieve a script evaluation (based on different fields) for each hit.
- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: Used to retrieve the next page of hits using a set of sort values from the previous page.
- **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property.
@ -5818,14 +5818,22 @@ client.indices.deleteDataStream({ name })
- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`.
## client.indices.deleteDataStreamOptions [_indices.delete_data_stream_options]
Deletes the data stream options of the selected data streams.
Delete data stream options.
Removes the data stream options from a data stream.
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html)
```ts
client.indices.deleteDataStreamOptions()
client.indices.deleteDataStreamOptions({ name })
```
### Arguments [_arguments_indices.delete_data_stream_options]
#### Request (object) [_request_indices.delete_data_stream_options]
- **`name` (string \| string[])**: A list of data streams of which the data stream options will be deleted; use `*` to get all data streams
- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open)
- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master
- **`timeout` (Optional, string \| -1 \| 0)**: Explicit timestamp for the document
## client.indices.deleteIndexTemplate [_indices.delete_index_template]
Delete an index template.
@ -6286,14 +6294,45 @@ Supports a list of values, such as `open,hidden`.
- **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned.
## client.indices.getDataStreamOptions [_indices.get_data_stream_options]
Returns the data stream options of the selected data streams.
Get data stream options.
Get the data stream options configuration of one or more data streams.
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html)
```ts
client.indices.getDataStreamOptions()
client.indices.getDataStreamOptions({ name })
```
### Arguments [_arguments_indices.get_data_stream_options]
#### Request (object) [_request_indices.get_data_stream_options]
- **`name` (string \| string[])**: List of data streams to limit the request.
Supports wildcards (`*`).
To target all data streams, omit this parameter or use `*` or `_all`.
- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
## client.indices.getDataStreamSettings [_indices.get_data_stream_settings]
Get data stream settings.
Get setting information for one or more data streams.
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-settings)
```ts
client.indices.getDataStreamSettings({ name })
```
### Arguments [_arguments_indices.get_data_stream_settings]
#### Request (object) [_request_indices.get_data_stream_settings]
- **`name` (string \| string[])**: A list of data streams or data stream patterns. Supports wildcards (`*`).
- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is
received before the timeout expires, the request fails and returns an
error.
## client.indices.getFieldMapping [_indices.get_field_mapping]
Get mapping definitions.
@ -6651,14 +6690,58 @@ error.
If no response is received before the timeout expires, the request fails and returns an error.
## client.indices.putDataStreamOptions [_indices.put_data_stream_options]
Updates the data stream options of the selected data streams.
Update data stream options.
Update the data stream options of the specified data streams.
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html)
```ts
client.indices.putDataStreamOptions()
client.indices.putDataStreamOptions({ name })
```
### Arguments [_arguments_indices.put_data_stream_options]
#### Request (object) [_request_indices.put_data_stream_options]
- **`name` (string \| string[])**: List of data streams used to limit the request.
Supports wildcards (`*`).
To target all data streams use `*` or `_all`.
- **`failure_store` (Optional, { enabled, lifecycle })**: If defined, it will update the failure store configuration of every data stream resolved by the name expression.
- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `hidden`, `open`, `closed`, `none`.
- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is
received before the timeout expires, the request fails and returns an
error.
- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response.
If no response is received before the timeout expires, the request fails and returns an error.
## client.indices.putDataStreamSettings [_indices.put_data_stream_settings]
Update data stream settings.
This API can be used to override settings on specific data streams. These overrides will take precedence over what
is specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state,
only certain settings are allowed. If possible, the setting change is applied to all
backing indices. Otherwise, it will be applied when the data stream is next rolled over.
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-settings)
```ts
client.indices.putDataStreamSettings({ name })
```
### Arguments [_arguments_indices.put_data_stream_settings]
#### Request (object) [_request_indices.put_data_stream_settings]
- **`name` (string \| string[])**: A list of data streams or data stream patterns.
- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**
- **`dry_run` (Optional, boolean)**: If `true`, the request does not actually change the settings on any data streams or indices. Instead, it
simulates changing the settings and reports back to the user what would have happened had these settings
actually been applied.
- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is
received before the timeout expires, the request fails and returns an
error.
- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the
timeout expires, the request fails and returns an error.
## client.indices.putIndexTemplate [_indices.put_index_template]
Create or update an index template.
@ -6812,9 +6895,45 @@ Changes dynamic index settings in real time.
For data streams, index setting changes are applied to all backing indices by default.
To revert a setting to the default value, use a null value.
The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.
The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.
To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.
There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:
```
{
"number_of_replicas": 1
}
```
Or you can use an `index` setting object:
```
{
"index": {
"number_of_replicas": 1
}
}
```
Or you can use dot annotation:
```
{
"index.number_of_replicas": 1
}
```
Or you can embed any of the aforementioned options in a `settings` object. For example:
```
{
"settings": {
"index": {
"number_of_replicas": 1
}
}
}
```
NOTE: You can only define new analyzers on closed indices.
To add an analyzer, you must close the index, define the analyzer, and reopen the index.
You cannot close the write index of a data stream.
@ -7527,12 +7646,9 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
## client.inference.chatCompletionUnified [_inference.chat_completion_unified]
Perform chat completion inference
The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
It only works with the `chat_completion` task type for `openai` and `elastic` inference services.
IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming.
The Chat completion inference API and the Stream inference API differ in their response structure and capabilities.
The Chat completion inference API provides more comprehensive customization options through more fields and function calling support.
@ -7904,7 +8020,7 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser
### Arguments [_arguments_inference.put_googlevertexai]
#### Request (object) [_request_inference.put_googlevertexai]
- **`task_type` (Enum("rerank" \| "text_embedding"))**: The type of the inference task that the model will perform.
- **`task_type` (Enum("rerank" \| "text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform.
- **`googlevertexai_inference_id` (string)**: The unique identifier of the inference endpoint.
- **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`.
- **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service.

View File

@ -214,7 +214,11 @@ export default class Indices {
'name'
],
body: [],
query: []
query: [
'expand_wildcards',
'master_timeout',
'timeout'
]
},
'indices.delete_index_template': {
path: [
@ -420,14 +424,19 @@ export default class Indices {
'name'
],
body: [],
query: []
query: [
'expand_wildcards',
'master_timeout'
]
},
'indices.get_data_stream_settings': {
path: [
'name'
],
body: [],
query: []
query: [
'master_timeout'
]
},
'indices.get_field_mapping': {
path: [
@ -584,15 +593,27 @@ export default class Indices {
path: [
'name'
],
body: [],
query: []
body: [
'failure_store'
],
query: [
'expand_wildcards',
'master_timeout',
'timeout'
]
},
'indices.put_data_stream_settings': {
path: [
'name'
],
body: [],
query: []
body: [
'settings'
],
query: [
'dry_run',
'master_timeout',
'timeout'
]
},
'indices.put_index_template': {
path: [
@ -1600,13 +1621,13 @@ export default class Indices {
}
/**
* Deletes the data stream options of the selected data streams.
* Delete data stream options. Removes the data stream options from a data stream.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation}
*/
async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesDeleteDataStreamOptionsResponse>
async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesDeleteDataStreamOptionsResponse, unknown>>
async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptions): Promise<T.IndicesDeleteDataStreamOptionsResponse>
async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath
} = this.acceptedParams['indices.delete_data_stream_options']
@ -1624,11 +1645,11 @@ export default class Indices {
}
}
params = params ?? {}
for (const key in params) {
if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
// @ts-expect-error
querystring[key] = params[key]
}
}
@ -2458,13 +2479,13 @@ export default class Indices {
}
/**
* Returns the data stream options of the selected data streams.
* Get data stream options. Get the data stream options configuration of one or more data streams.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation}
*/
async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesGetDataStreamOptionsResponse>
async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesGetDataStreamOptionsResponse, unknown>>
async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptions): Promise<T.IndicesGetDataStreamOptionsResponse>
async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath
} = this.acceptedParams['indices.get_data_stream_options']
@ -2482,11 +2503,11 @@ export default class Indices {
}
}
params = params ?? {}
for (const key in params) {
if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
// @ts-expect-error
querystring[key] = params[key]
}
}
@ -2503,13 +2524,13 @@ export default class Indices {
}
/**
* Gets a data stream's settings
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation}
* Get data stream settings. Get setting information for one or more data streams.
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-settings | Elasticsearch API documentation}
*/
async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesGetDataStreamSettingsResponse>
async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesGetDataStreamSettingsResponse, unknown>>
async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptions): Promise<T.IndicesGetDataStreamSettingsResponse>
async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath
} = this.acceptedParams['indices.get_data_stream_settings']
@ -2527,11 +2548,11 @@ export default class Indices {
}
}
params = params ?? {}
for (const key in params) {
if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
// @ts-expect-error
querystring[key] = params[key]
}
}
@ -3220,15 +3241,17 @@ export default class Indices {
}
/**
* Updates the data stream options of the selected data streams.
* Update data stream options. Update the data stream options of the specified data streams.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation}
*/
async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPutDataStreamOptionsResponse>
async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesPutDataStreamOptionsResponse, unknown>>
async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptions): Promise<T.IndicesPutDataStreamOptionsResponse>
async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath
path: acceptedPath,
body: acceptedBody,
query: acceptedQuery
} = this.acceptedParams['indices.put_data_stream_options']
const userQuery = params?.querystring
@ -3244,12 +3267,22 @@ export default class Indices {
}
}
params = params ?? {}
for (const key in params) {
if (acceptedPath.includes(key)) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
querystring[key] = params[key]
if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) {
// @ts-expect-error
querystring[key] = params[key]
} else {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
}
}
}
@ -3265,36 +3298,38 @@ export default class Indices {
}
/**
* Updates a data stream's settings
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation}
* Update data stream settings. This API can be used to override settings on specific data streams. These overrides will take precedence over what is specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state, only certain settings are allowed. If possible, the setting change is applied to all backing indices. Otherwise, it will be applied when the data stream is next rolled over.
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-settings | Elasticsearch API documentation}
*/
async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPutDataStreamSettingsResponse>
async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesPutDataStreamSettingsResponse, unknown>>
async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptions): Promise<T.IndicesPutDataStreamSettingsResponse>
async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath
path: acceptedPath,
body: acceptedBody,
query: acceptedQuery
} = this.acceptedParams['indices.put_data_stream_settings']
const userQuery = params?.querystring
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
let body: Record<string, any> | string | undefined
const userBody = params?.body
if (userBody != null) {
if (typeof userBody === 'string') {
body = userBody
} else {
body = { ...userBody }
}
}
params = params ?? {}
let body: any = params.body ?? undefined
for (const key in params) {
if (acceptedPath.includes(key)) {
if (acceptedBody.includes(key)) {
// @ts-expect-error
body = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
querystring[key] = params[key]
if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) {
// @ts-expect-error
querystring[key] = params[key]
} else {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
}
}
}
@ -3424,7 +3459,7 @@ export default class Indices {
}
/**
* Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.
* Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { "number_of_replicas": 1 } ``` Or you can use an `index` setting object: ``` { "index": { "number_of_replicas": 1 } } ``` Or you can use dot annotation: ``` { "index.number_of_replicas": 1 } ``` Or you can embed any of the aforementioned options in a `settings` object. For example: ``` { "settings": { "index": { "number_of_replicas": 1 } } } ``` NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings | Elasticsearch API documentation}
*/
async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPutSettingsResponse>

View File

@ -364,7 +364,7 @@ export default class Inference {
}
/**
* Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API.
* Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API.
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation}
*/
async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceChatCompletionUnifiedResponse>

View File

@ -2490,7 +2490,7 @@ export interface SearchFieldSuggester {
export interface SearchHighlight extends SearchHighlightBase {
encoder?: SearchHighlighterEncoder
fields: Record<Field, SearchHighlightField>
fields: Partial<Record<Field, SearchHighlightField>> | Partial<Record<Field, SearchHighlightField>>[]
}
export interface SearchHighlightBase {
@ -3868,7 +3868,7 @@ export interface ErrorCauseKeys {
/** The type of error */
type: string
/** A human-readable explanation of the error, in English. */
reason?: string
reason?: string | null
/** The server stack trace. Present only if the `error_trace=true` parameter was sent with the request. */
stack_trace?: string
caused_by?: ErrorCause
@ -4062,6 +4062,12 @@ export interface InlineGetKeys<TDocument = unknown> {
export type InlineGet<TDocument = unknown> = InlineGetKeys<TDocument>
& { [property: string]: any }
export interface InnerRetriever {
retriever: RetrieverContainer
weight: float
normalizer: ScoreNormalizer
}
export type Ip = string
export interface KnnQuery extends QueryDslQueryBase {
@ -4134,6 +4140,12 @@ export type Level = 'cluster' | 'indices' | 'shards'
export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED'
export interface LinearRetriever extends RetrieverBase {
/** Inner retrievers. */
retrievers?: InnerRetriever[]
rank_window_size: integer
}
export type MapboxVectorTiles = ArrayBuffer
export interface MergesStats {
@ -4230,6 +4242,14 @@ export type Password = string
export type Percentage = string | float
export interface PinnedRetriever extends RetrieverBase {
/** Inner retriever. */
retriever: RetrieverContainer
ids?: string[]
docs?: SpecifiedDocument[]
rank_window_size: integer
}
export type PipelineName = string
export interface PluginStats {
@ -4329,6 +4349,12 @@ export interface RescoreVector {
oversample: float
}
export interface RescorerRetriever extends RetrieverBase {
/** Inner retriever. */
retriever: RetrieverContainer
rescore: SearchRescore | SearchRescore[]
}
export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop'
export interface Retries {
@ -4343,6 +4369,8 @@ export interface RetrieverBase {
filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
/** Minimum _score for matching documents. Documents with a lower _score are not included in the top documents. */
min_score?: float
/** Retriever name. */
_name?: string
}
export interface RetrieverContainer {
@ -4356,6 +4384,13 @@ export interface RetrieverContainer {
text_similarity_reranker?: TextSimilarityReranker
/** A retriever that replaces the functionality of a rule query. */
rule?: RuleRetriever
/** A retriever that re-scores only the results produced by its child retriever. */
rescorer?: RescorerRetriever
/** A retriever that supports the combination of different retrievers through a weighted linear combination. */
linear?: LinearRetriever
/** A pinned retriever applies pinned documents to the underlying retriever.
* This retriever will rewrite to a PinnedQueryBuilder. */
pinned?: PinnedRetriever
}
export type Routing = string
@ -4369,7 +4404,7 @@ export interface RrfRank {
export interface RuleRetriever extends RetrieverBase {
/** The ruleset IDs containing the rules this retriever is evaluating against. */
ruleset_ids: Id[]
ruleset_ids: Id | Id[]
/** The match criteria that will determine if a rule in the provided rulesets should be applied. */
match_criteria: any
/** The retriever whose results rules should be applied to. */
@ -4380,6 +4415,8 @@ export interface RuleRetriever extends RetrieverBase {
export type ScalarValue = long | double | string | boolean | null
export type ScoreNormalizer = 'none' | 'minmax' | 'l2_norm'
export interface ScoreSort {
order?: SortOrder
}
@ -4562,6 +4599,11 @@ export type SortOrder = 'asc' | 'desc'
export type SortResults = FieldValue[]
export interface SpecifiedDocument {
index?: IndexName
id: Id
}
export interface StandardRetriever extends RetrieverBase {
/** Defines a query to retrieve a set of top documents. */
query?: QueryDslQueryContainer
@ -8702,7 +8744,7 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys
export type QueryDslGeoExecution = 'memory' | 'indexed'
export interface QueryDslGeoGridQuery extends QueryDslQueryBase {
geogrid?: GeoTile
geotile?: GeoTile
geohash?: GeoHash
geohex?: GeoHexCell
}
@ -8806,6 +8848,8 @@ export interface QueryDslIntervalsContainer {
match?: QueryDslIntervalsMatch
/** Matches terms that start with a specified set of characters. */
prefix?: QueryDslIntervalsPrefix
range?: QueryDslIntervalsRange
regexp?: QueryDslIntervalsRegexp
/** Matches terms using a wildcard pattern. */
wildcard?: QueryDslIntervalsWildcard
}
@ -8886,10 +8930,38 @@ export interface QueryDslIntervalsQuery extends QueryDslQueryBase {
match?: QueryDslIntervalsMatch
/** Matches terms that start with a specified set of characters. */
prefix?: QueryDslIntervalsPrefix
range?: QueryDslIntervalsRange
regexp?: QueryDslIntervalsRegexp
/** Matches terms using a wildcard pattern. */
wildcard?: QueryDslIntervalsWildcard
}
export interface QueryDslIntervalsRange {
/** Analyzer used to analyze the `prefix`. */
analyzer?: string
/** Lower term, either gte or gt must be provided. */
gte?: string
/** Lower term, either gte or gt must be provided. */
gt?: string
/** Upper term, either lte or lt must be provided. */
lte?: string
/** Upper term, either lte or lt must be provided. */
lt?: string
/** If specified, match intervals from this field rather than the top-level field.
* The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */
use_field?: Field
}
export interface QueryDslIntervalsRegexp {
/** Analyzer used to analyze the `prefix`. */
analyzer?: string
/** Regex pattern. */
pattern: string
/** If specified, match intervals from this field rather than the top-level field.
* The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */
use_field?: Field
}
export interface QueryDslIntervalsWildcard {
/** Analyzer used to analyze the `pattern`.
* Defaults to the top-level field's analyzer. */
@ -9444,7 +9516,8 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase {
export interface QueryDslRuleQuery extends QueryDslQueryBase {
organic: QueryDslQueryContainer
ruleset_ids: Id[]
ruleset_ids?: Id | Id[]
ruleset_id?: string
match_criteria: any
}
@ -9728,7 +9801,7 @@ export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase<any> {
export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase {
/** The tokens representing this query */
tokens: Record<string, float>
tokens: Partial<Record<string, float>>[]
/** Token pruning configurations */
pruning_config?: QueryDslTokenPruningConfig
}
@ -15431,6 +15504,7 @@ export interface ClusterComponentTemplateSummary {
mappings?: MappingTypeMapping
aliases?: Record<string, IndicesAliasDefinition>
lifecycle?: IndicesDataStreamLifecycleWithRollover
data_stream_options?: IndicesDataStreamOptionsTemplate | null
}
export interface ClusterAllocationExplainAllocationDecision {
@ -18605,6 +18679,9 @@ export interface IndicesDataStream {
replicated?: boolean
/** If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too. */
rollover_on_write: boolean
/** The settings specific to this data stream that will take precedence over the settings in the matching index
* template. */
settings: IndicesIndexSettings
/** Health status of the data stream.
* This health status is based on the state of the primary and replica shards of the streams backing indices. */
status: HealthStatus
@ -18619,6 +18696,24 @@ export interface IndicesDataStream {
index_mode?: IndicesIndexMode
}
export interface IndicesDataStreamFailureStore {
/** If defined, it turns the failure store on/off (`true`/`false`) for this data stream. A data stream failure store
* that's disabled (enabled: `false`) will redirect no new failed indices to the failure store; however, it will
* not remove any existing data from the failure store. */
enabled?: boolean
/** If defined, it specifies the lifecycle configuration for the failure store of this data stream. */
lifecycle?: IndicesFailureStoreLifecycle
}
export interface IndicesDataStreamFailureStoreTemplate {
/** If defined, it turns the failure store on/off (`true`/`false`) for this data stream. A data stream failure store
* that's disabled (enabled: `false`) will redirect no new failed indices to the failure store; however, it will
* not remove any existing data from the failure store. */
enabled?: boolean | null
/** If defined, it specifies the lifecycle configuration for the failure store of this data stream. */
lifecycle?: IndicesFailureStoreLifecycleTemplate | null
}
export interface IndicesDataStreamIndex {
/** Name of the backing index. */
index_name: IndexName
@ -18671,6 +18766,15 @@ export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStrea
rollover?: IndicesDataStreamLifecycleRolloverConditions
}
export interface IndicesDataStreamOptions {
/** If defined, it specifies configuration for the failure store of this data stream. */
failure_store?: IndicesDataStreamFailureStore
}
export interface IndicesDataStreamOptionsTemplate {
failure_store?: IndicesDataStreamFailureStoreTemplate | null
}
export interface IndicesDataStreamTimestampField {
/** Name of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream. */
name: Field
@ -18699,6 +18803,26 @@ export interface IndicesFailureStore {
rollover_on_write: boolean
}
export interface IndicesFailureStoreLifecycle {
/** If defined, every document added to this data stream will be stored at least for this time frame.
* Any time after this duration the document could be deleted.
* When empty, every document in this data stream will be stored indefinitely. */
data_retention?: Duration
/** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle
* that's disabled (enabled: `false`) will have no effect on the data stream. */
enabled?: boolean
}
export interface IndicesFailureStoreLifecycleTemplate {
/** If defined, every document added to this data stream will be stored at least for this time frame.
* Any time after this duration the document could be deleted.
* When empty, every document in this data stream will be stored indefinitely. */
data_retention?: Duration | null
/** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle
* that's disabled (enabled: `false`) will have no effect on the data stream. */
enabled?: boolean
}
export interface IndicesFielddataFrequencyFilter {
max: double
min: double
@ -18932,6 +19056,7 @@ export interface IndicesIndexTemplateSummary {
/** Configuration options for the index. */
settings?: IndicesIndexSettings
lifecycle?: IndicesDataStreamLifecycleWithRollover
data_stream_options?: IndicesDataStreamOptionsTemplate | null
}
export interface IndicesIndexVersioning {
@ -19674,6 +19799,23 @@ export interface IndicesDeleteDataStreamRequest extends RequestBase {
export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase
export interface IndicesDeleteDataStreamOptionsRequest extends RequestBase {
/** A comma-separated list of data streams of which the data stream options will be deleted; use `*` to get all data streams */
name: DataStreamNames
/** Whether wildcard expressions should get expanded to open or closed indices (default: open) */
expand_wildcards?: ExpandWildcards
/** Specify timeout for connection to master */
master_timeout?: Duration
/** Explicit timestamp for the document */
timeout?: Duration
/** All values in `body` will be added to the request body. */
body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never }
/** All values in `querystring` will be added to the request querystring. */
querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never }
}
export type IndicesDeleteDataStreamOptionsResponse = AcknowledgedResponseBase
export interface IndicesDeleteIndexTemplateRequest extends RequestBase {
/** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */
name: Names
@ -20143,6 +20285,59 @@ export interface IndicesGetDataStreamResponse {
data_streams: IndicesDataStream[]
}
export interface IndicesGetDataStreamOptionsDataStreamWithOptions {
name: DataStreamName
options?: IndicesDataStreamOptions
}
export interface IndicesGetDataStreamOptionsRequest extends RequestBase {
/** Comma-separated list of data streams to limit the request.
* Supports wildcards (`*`).
* To target all data streams, omit this parameter or use `*` or `_all`. */
name: DataStreamNames
/** Type of data stream that wildcard patterns can match.
* Supports comma-separated values, such as `open,hidden`.
* Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */
expand_wildcards?: ExpandWildcards
/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */
master_timeout?: Duration
/** All values in `body` will be added to the request body. */
body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never }
/** All values in `querystring` will be added to the request querystring. */
querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never }
}
export interface IndicesGetDataStreamOptionsResponse {
data_streams: IndicesGetDataStreamOptionsDataStreamWithOptions[]
}
export interface IndicesGetDataStreamSettingsDataStreamSettings {
/** The name of the data stream. */
name: string
/** The settings specific to this data stream */
settings: IndicesIndexSettings
/** The settings specific to this data stream merged with the settings from its template. These `effective_settings`
* are the settings that will be used when a new index is created for this data stream. */
effective_settings: IndicesIndexSettings
}
export interface IndicesGetDataStreamSettingsRequest extends RequestBase {
/** A comma-separated list of data streams or data stream patterns. Supports wildcards (`*`). */
name: Indices
/** The period to wait for a connection to the master node. If no response is
* received before the timeout expires, the request fails and returns an
* error. */
master_timeout?: Duration
/** All values in `body` will be added to the request body. */
body?: string | { [key: string]: any } & { name?: never, master_timeout?: never }
/** All values in `querystring` will be added to the request querystring. */
querystring?: { [key: string]: any } & { name?: never, master_timeout?: never }
}
export interface IndicesGetDataStreamSettingsResponse {
data_streams: IndicesGetDataStreamSettingsDataStreamSettings[]
}
export interface IndicesGetFieldMappingRequest extends RequestBase {
/** Comma-separated list or wildcard expression of fields used to limit returned information.
* Supports wildcards (`*`). */
@ -20517,6 +20712,90 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase {
export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase
export interface IndicesPutDataStreamOptionsRequest extends RequestBase {
/** Comma-separated list of data streams used to limit the request.
* Supports wildcards (`*`).
* To target all data streams use `*` or `_all`. */
name: DataStreamNames
/** Type of data stream that wildcard patterns can match.
* Supports comma-separated values, such as `open,hidden`.
* Valid values are: `all`, `hidden`, `open`, `closed`, `none`. */
expand_wildcards?: ExpandWildcards
/** Period to wait for a connection to the master node. If no response is
* received before the timeout expires, the request fails and returns an
* error. */
master_timeout?: Duration
/** Period to wait for a response.
* If no response is received before the timeout expires, the request fails and returns an error. */
timeout?: Duration
/** If defined, it will update the failure store configuration of every data stream resolved by the name expression. */
failure_store?: IndicesDataStreamFailureStore
/** All values in `body` will be added to the request body. */
body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, failure_store?: never }
/** All values in `querystring` will be added to the request querystring. */
querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, failure_store?: never }
}
export type IndicesPutDataStreamOptionsResponse = AcknowledgedResponseBase
export interface IndicesPutDataStreamSettingsDataStreamSettingsError {
index: IndexName
/** A message explaining why the settings could not be applied to specific indices. */
error: string
}
export interface IndicesPutDataStreamSettingsIndexSettingResults {
/** The list of settings that were applied to the data stream but not to backing indices. These will be applied to
* the write index the next time the data stream is rolled over. */
applied_to_data_stream_only: string[]
/** The list of settings that were applied to the data stream and to all of its backing indices. These settings will
* also be applied to the write index the next time the data stream is rolled over. */
applied_to_data_stream_and_backing_indices: string[]
errors?: IndicesPutDataStreamSettingsDataStreamSettingsError[]
}
export interface IndicesPutDataStreamSettingsRequest extends RequestBase {
/** A comma-separated list of data streams or data stream patterns. */
name: Indices
/** If `true`, the request does not actually change the settings on any data streams or indices. Instead, it
* simulates changing the settings and reports back to the user what would have happened had these settings
* actually been applied. */
dry_run?: boolean
/** The period to wait for a connection to the master node. If no response is
* received before the timeout expires, the request fails and returns an
* error. */
master_timeout?: Duration
/** The period to wait for a response. If no response is received before the
* timeout expires, the request fails and returns an error. */
timeout?: Duration
settings?: IndicesIndexSettings
/** All values in `body` will be added to the request body. */
body?: string | { [key: string]: any } & { name?: never, dry_run?: never, master_timeout?: never, timeout?: never, settings?: never }
/** All values in `querystring` will be added to the request querystring. */
querystring?: { [key: string]: any } & { name?: never, dry_run?: never, master_timeout?: never, timeout?: never, settings?: never }
}
export interface IndicesPutDataStreamSettingsResponse {
data_streams: IndicesPutDataStreamSettingsUpdatedDataStreamSettings[]
}
export interface IndicesPutDataStreamSettingsUpdatedDataStreamSettings {
/** The data stream name. */
name: IndexName
/** If the settings were successfully applied to the data stream (or would have been, if running in `dry_run`
* mode), it is `true`. If an error occurred, it is `false`. */
applied_to_data_stream: boolean
/** A message explaining why the settings could not be applied to the data stream. */
error?: string
/** The settings that are specfic to this data stream that will override any settings from the matching index template. */
settings: IndicesIndexSettings
/** The settings that are effective on this data stream, taking into account the settings from the matching index
* template and the settings specific to this data stream. */
effective_settings: IndicesIndexSettings
/** Information about whether and where each setting was applied. */
index_settings_results: IndicesPutDataStreamSettingsIndexSettingResults
}
export interface IndicesPutIndexTemplateIndexTemplateMapping {
/** Aliases to add.
* If the index template includes a `data_stream` object, these are data stream aliases.
@ -22135,7 +22414,7 @@ export interface InferenceGoogleVertexAITaskSettings {
top_n?: integer
}
export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding'
export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' | 'completion' | 'chat_completion'
export interface InferenceHuggingFaceServiceSettings {
/** A valid access token for your HuggingFace account.
@ -22198,6 +22477,76 @@ export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceI
task_type: InferenceTaskTypeAlibabaCloudAI
}
export interface InferenceInferenceEndpointInfoAmazonBedrock extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeAmazonBedrock
}
export interface InferenceInferenceEndpointInfoAnthropic extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeAnthropic
}
export interface InferenceInferenceEndpointInfoAzureAIStudio extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeAzureAIStudio
}
export interface InferenceInferenceEndpointInfoAzureOpenAI extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeAzureOpenAI
}
export interface InferenceInferenceEndpointInfoCohere extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeCohere
}
export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeELSER
}
export interface InferenceInferenceEndpointInfoElasticsearch extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeElasticsearch
}
export interface InferenceInferenceEndpointInfoGoogleAIStudio extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeGoogleAIStudio
}
export interface InferenceInferenceEndpointInfoGoogleVertexAI extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeGoogleVertexAI
}
export interface InferenceInferenceEndpointInfoHuggingFace extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeHuggingFace
}
export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
@ -22205,6 +22554,34 @@ export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInference
task_type: InferenceTaskTypeJinaAi
}
export interface InferenceInferenceEndpointInfoMistral extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeMistral
}
export interface InferenceInferenceEndpointInfoOpenAI extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeOpenAI
}
export interface InferenceInferenceEndpointInfoVoyageAI extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeVoyageAI
}
export interface InferenceInferenceEndpointInfoWatsonx extends InferenceInferenceEndpoint {
/** The inference Id */
inference_id: string
/** The task type */
task_type: InferenceTaskTypeWatsonx
}
export interface InferenceInferenceResult {
text_embedding_bytes?: InferenceTextEmbeddingByteResult[]
text_embedding_bits?: InferenceTextEmbeddingByteResult[]
@ -22389,8 +22766,36 @@ export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank'
export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding'
export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion'
export type InferenceTaskTypeAnthropic = 'completion'
export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion'
export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion'
export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion'
export type InferenceTaskTypeELSER = 'sparse_embedding'
export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank'
export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion'
export type InferenceTaskTypeGoogleVertexAI = 'text_embedding' | 'rerank'
export type InferenceTaskTypeHuggingFace = 'text_embedding'
export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank'
export type InferenceTaskTypeMistral = 'text_embedding'
export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion'
export type InferenceTaskTypeVoyageAI = 'text_embedding' | 'rerank'
export type InferenceTaskTypeWatsonx = 'text_embedding'
export interface InferenceTextEmbeddingByteResult {
embedding: InferenceDenseByteVector
}
@ -22637,7 +23042,7 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
}
export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfo
export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock
export interface InferencePutAnthropicRequest extends RequestBase {
/** The task type.
@ -22660,7 +23065,7 @@ export interface InferencePutAnthropicRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
}
export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfo
export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic
export interface InferencePutAzureaistudioRequest extends RequestBase {
/** The type of the inference task that the model will perform. */
@ -22682,7 +23087,7 @@ export interface InferencePutAzureaistudioRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
}
export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfo
export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio
export interface InferencePutAzureopenaiRequest extends RequestBase {
/** The type of the inference task that the model will perform.
@ -22705,7 +23110,7 @@ export interface InferencePutAzureopenaiRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
}
export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfo
export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI
export interface InferencePutCohereRequest extends RequestBase {
/** The type of the inference task that the model will perform. */
@ -22728,7 +23133,7 @@ export interface InferencePutCohereRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
}
export type InferencePutCohereResponse = InferenceInferenceEndpointInfo
export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere
export interface InferencePutElasticsearchRequest extends RequestBase {
/** The type of the inference task that the model will perform. */
@ -22751,7 +23156,7 @@ export interface InferencePutElasticsearchRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
}
export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfo
export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch
export interface InferencePutElserRequest extends RequestBase {
/** The type of the inference task that the model will perform. */
@ -22770,7 +23175,7 @@ export interface InferencePutElserRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never }
}
export type InferencePutElserResponse = InferenceInferenceEndpointInfo
export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER
export interface InferencePutGoogleaistudioRequest extends RequestBase {
/** The type of the inference task that the model will perform. */
@ -22789,7 +23194,7 @@ export interface InferencePutGoogleaistudioRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never }
}
export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfo
export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio
export interface InferencePutGooglevertexaiRequest extends RequestBase {
/** The type of the inference task that the model will perform. */
@ -22811,7 +23216,7 @@ export interface InferencePutGooglevertexaiRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
}
export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfo
export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI
export interface InferencePutHuggingFaceRequest extends RequestBase {
/** The type of the inference task that the model will perform. */
@ -22830,7 +23235,7 @@ export interface InferencePutHuggingFaceRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never }
}
export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfo
export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace
export interface InferencePutJinaaiRequest extends RequestBase {
/** The type of the inference task that the model will perform. */
@ -22872,7 +23277,7 @@ export interface InferencePutMistralRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never }
}
export type InferencePutMistralResponse = InferenceInferenceEndpointInfo
export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral
export interface InferencePutOpenaiRequest extends RequestBase {
/** The type of the inference task that the model will perform.
@ -22895,7 +23300,7 @@ export interface InferencePutOpenaiRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
}
export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfo
export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI
export interface InferencePutVoyageaiRequest extends RequestBase {
/** The type of the inference task that the model will perform. */
@ -22917,7 +23322,7 @@ export interface InferencePutVoyageaiRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never }
}
export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfo
export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI
export interface InferencePutWatsonxRequest extends RequestBase {
/** The task type.
@ -22935,7 +23340,7 @@ export interface InferencePutWatsonxRequest extends RequestBase {
querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never }
}
export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfo
export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx
export interface InferenceRerankRequest extends RequestBase {
/** The unique identifier for the inference endpoint. */
@ -33720,6 +34125,14 @@ export interface SlmSnapshotLifecycle {
stats: SlmStatistics
}
export interface SlmSnapshotPolicyStats {
policy: string
snapshots_taken: long
snapshots_failed: long
snapshots_deleted: long
snapshot_deletion_failures: long
}
export interface SlmStatistics {
retention_deletion_time?: Duration
retention_deletion_time_millis?: DurationValue<UnitMillis>
@ -33830,7 +34243,7 @@ export interface SlmGetStatsResponse {
total_snapshot_deletion_failures: long
total_snapshots_failed: long
total_snapshots_taken: long
policy_stats: string[]
policy_stats: SlmSnapshotPolicyStats[]
}
export interface SlmGetStatusRequest extends RequestBase {