Auto-generated API code (#2611)

This commit is contained in:
Elastic Machine
2025-02-10 20:19:42 +01:00
committed by GitHub
parent 976282f95b
commit b62e2427fb
4 changed files with 101 additions and 40 deletions

View File

@ -149,6 +149,7 @@ client.bulk({ ... })
* *Request (object):*
** *`index` (Optional, string)*: The name of the data stream, index, or index alias to perform bulk actions on.
** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])*
** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors.
** *`list_executed_pipelines` (Optional, boolean)*: If `true`, the response will include the ingest pipelines that were run for each index or create.
** *`pipeline` (Optional, string)*: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter.
** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`.
@ -323,6 +324,7 @@ client.create({ id, index })
** *`id` (string)*: A unique identifier for the document. To automatically generate a document ID, use the `POST /<target>/_doc/` request format.
** *`index` (string)*: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesnt match a data stream template, this request creates the index.
** *`document` (Optional, object)*: A document.
** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors.
** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter.
** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes.
** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard.
@ -1031,6 +1033,7 @@ client.index({ index })
** *`document` (Optional, object)*: A document.
** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term.
** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number.
** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors.
** *`op_type` (Optional, Enum("index" | "create"))*: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `<index>/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required.
** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter.
** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes.
@ -2141,6 +2144,7 @@ client.update({ id, index })
** *`upsert` (Optional, object)*: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run.
** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term.
** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number.
** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors.
** *`lang` (Optional, string)*: The script language.
** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes.
** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias.
@ -2342,7 +2346,7 @@ client.asyncSearch.get({ id })
* *Request (object):*
** *`id` (string)*: A unique identifier for the async search.
** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search should be available in the cluster.
** *`keep_alive` (Optional, string | -1 | 0)*: The length of time that the async search should be available in the cluster.
When not specified, the `keep_alive` set with the corresponding submit async request will be used.
Otherwise, it is possible to override the value and extend the validity of the request.
When this period expires, the search, if still running, is cancelled.
@ -2357,7 +2361,10 @@ By default no timeout is set meaning that the currently available results will b
Get the async search status.
Get the status of a previously submitted async search request given its identifier, without retrieving search results.
If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role.
If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:
* The user or API key that submitted the original async search request.
* Users that have the `monitor` cluster privilege or greater privileges.
{ref}/async-search.html[Endpoint documentation]
[source,ts]
@ -2370,7 +2377,7 @@ client.asyncSearch.status({ id })
* *Request (object):*
** *`id` (string)*: A unique identifier for the async search.
** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available.
** *`keep_alive` (Optional, string | -1 | 0)*: The length of time that the async search needs to be available.
Ongoing async searches and any saved search results are deleted after this period.
[discrete]
@ -3286,6 +3293,7 @@ If `false`, the request returns a 404 status code when there are no matches or o
[discrete]
==== delete_auto_follow_pattern
Delete auto-follow patterns.
Delete a collection of cross-cluster replication auto-follow patterns.
{ref}/ccr-delete-auto-follow-pattern.html[Endpoint documentation]
@ -3298,8 +3306,10 @@ client.ccr.deleteAutoFollowPattern({ name })
==== Arguments
* *Request (object):*
** *`name` (string)*: The name of the auto follow pattern.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
** *`name` (string)*: The auto-follow pattern collection to delete.
** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node.
If the master node is not available before the timeout expires, the request fails and returns an error.
It can also be set to `-1` to indicate that the request should never timeout.
[discrete]
==== follow
@ -3346,6 +3356,7 @@ remote Lucene segment files to the follower index.
[discrete]
==== follow_info
Get follower information.
Get information about all cross-cluster replication follower indices.
For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.
@ -3359,8 +3370,10 @@ client.ccr.followInfo({ index })
==== Arguments
* *Request (object):*
** *`index` (string | string[])*: A list of index patterns; use `_all` to perform the operation on all indices
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
** *`index` (string | string[])*: A comma-delimited list of follower index patterns.
** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node.
If the master node is not available before the timeout expires, the request fails and returns an error.
It can also be set to `-1` to indicate that the request should never timeout.
[discrete]
==== follow_stats

View File

@ -109,7 +109,7 @@ export default class AsyncSearch {
}
/**
* Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role.
* Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: * The user or API key that submitted the original async search request. * Users that have the `monitor` cluster privilege or greater privileges.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/async-search.html | Elasticsearch API documentation}
*/
async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.AsyncSearchStatusResponse>

View File

@ -34,6 +34,8 @@ export interface BulkCreateOperation extends BulkWriteOperation {
export interface BulkDeleteOperation extends BulkOperationBase {
}
export type BulkFailureStoreStatus = 'not_applicable_or_unknown' | 'used' | 'not_enabled' | 'failed'
export interface BulkIndexOperation extends BulkWriteOperation {
}
@ -58,6 +60,7 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete'
export interface BulkRequest<TDocument = unknown, TPartialDocument = unknown> extends RequestBase {
index?: IndexName
include_source_on_error?: boolean
list_executed_pipelines?: boolean
pipeline?: string
refresh?: Refresh
@ -83,6 +86,7 @@ export interface BulkResponseItem {
_id?: string | null
_index: string
status: integer
failure_store?: BulkFailureStoreStatus
error?: ErrorCause
_primary_term?: long
result?: string
@ -159,6 +163,7 @@ export interface CountResponse {
export interface CreateRequest<TDocument = unknown> extends RequestBase {
id: Id
index: IndexName
include_source_on_error?: boolean
pipeline?: string
refresh?: Refresh
routing?: Routing
@ -639,6 +644,7 @@ export interface IndexRequest<TDocument = unknown> extends RequestBase {
index: IndexName
if_primary_term?: long
if_seq_no?: SequenceNumber
include_source_on_error?: boolean
op_type?: OpType
pipeline?: string
refresh?: Refresh
@ -1989,6 +1995,7 @@ export interface UpdateRequest<TDocument = unknown, TPartialDocument = unknown>
index: IndexName
if_primary_term?: long
if_seq_no?: SequenceNumber
include_source_on_error?: boolean
lang?: string
refresh?: Refresh
require_alias?: boolean
@ -13884,20 +13891,21 @@ export interface MlAnomaly {
}
export interface MlAnomalyCause {
actual: double[]
by_field_name: Name
by_field_value: string
correlated_by_field_value: string
field_name: Field
function: string
function_description: string
influencers: MlInfluence[]
over_field_name: Name
over_field_value: string
partition_field_name: string
partition_field_value: string
actual?: double[]
by_field_name?: Name
by_field_value?: string
correlated_by_field_value?: string
field_name?: Field
function?: string
function_description?: string
geo_results?: MlGeoResults
influencers?: MlInfluence[]
over_field_name?: Name
over_field_value?: string
partition_field_name?: string
partition_field_value?: string
probability: double
typical: double[]
typical?: double[]
}
export interface MlAnomalyExplanation {
@ -14466,8 +14474,8 @@ export interface MlFilterRef {
export type MlFilterType = 'include' | 'exclude'
export interface MlGeoResults {
actual_point: string
typical_point: string
actual_point?: string
typical_point?: string
}
export interface MlHyperparameter {
@ -19031,6 +19039,18 @@ export interface ShutdownPutNodeRequest extends RequestBase {
export type ShutdownPutNodeResponse = AcknowledgedResponseBase
export interface SimulateIngestIngestDocumentSimulationKeys {
_id: Id
_index: IndexName
_source: Record<string, any>
_version: SpecUtilsStringified<VersionNumber>
executed_pipelines: string[]
ignored_fields?: Record<string, string>[]
error?: ErrorCause
}
export type SimulateIngestIngestDocumentSimulation = SimulateIngestIngestDocumentSimulationKeys
& { [property: string]: string | Id | IndexName | Record<string, any> | SpecUtilsStringified<VersionNumber> | string[] | Record<string, string>[] | ErrorCause }
export interface SimulateIngestRequest extends RequestBase {
index?: IndexName
pipeline?: PipelineName
@ -19042,7 +19062,11 @@ export interface SimulateIngestRequest extends RequestBase {
}
export interface SimulateIngestResponse {
docs: IngestSimulateDocumentResult[]
docs: SimulateIngestSimulateIngestDocumentResult[]
}
export interface SimulateIngestSimulateIngestDocumentResult {
doc?: SimulateIngestIngestDocumentSimulation
}
export interface SlmConfiguration {

View File

@ -34,6 +34,8 @@ export interface BulkCreateOperation extends BulkWriteOperation {
export interface BulkDeleteOperation extends BulkOperationBase {
}
export type BulkFailureStoreStatus = 'not_applicable_or_unknown' | 'used' | 'not_enabled' | 'failed'
export interface BulkIndexOperation extends BulkWriteOperation {
}
@ -58,6 +60,7 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete'
export interface BulkRequest<TDocument = unknown, TPartialDocument = unknown> extends RequestBase {
index?: IndexName
include_source_on_error?: boolean
list_executed_pipelines?: boolean
pipeline?: string
refresh?: Refresh
@ -84,6 +87,7 @@ export interface BulkResponseItem {
_id?: string | null
_index: string
status: integer
failure_store?: BulkFailureStoreStatus
error?: ErrorCause
_primary_term?: long
result?: string
@ -170,6 +174,7 @@ export interface CountResponse {
export interface CreateRequest<TDocument = unknown> extends RequestBase {
id: Id
index: IndexName
include_source_on_error?: boolean
pipeline?: string
refresh?: Refresh
routing?: Routing
@ -660,6 +665,7 @@ export interface IndexRequest<TDocument = unknown> extends RequestBase {
index: IndexName
if_primary_term?: long
if_seq_no?: SequenceNumber
include_source_on_error?: boolean
op_type?: OpType
pipeline?: string
refresh?: Refresh
@ -2060,6 +2066,7 @@ export interface UpdateRequest<TDocument = unknown, TPartialDocument = unknown>
index: IndexName
if_primary_term?: long
if_seq_no?: SequenceNumber
include_source_on_error?: boolean
lang?: string
refresh?: Refresh
require_alias?: boolean
@ -14149,20 +14156,21 @@ export interface MlAnomaly {
}
export interface MlAnomalyCause {
actual: double[]
by_field_name: Name
by_field_value: string
correlated_by_field_value: string
field_name: Field
function: string
function_description: string
influencers: MlInfluence[]
over_field_name: Name
over_field_value: string
partition_field_name: string
partition_field_value: string
actual?: double[]
by_field_name?: Name
by_field_value?: string
correlated_by_field_value?: string
field_name?: Field
function?: string
function_description?: string
geo_results?: MlGeoResults
influencers?: MlInfluence[]
over_field_name?: Name
over_field_value?: string
partition_field_name?: string
partition_field_value?: string
probability: double
typical: double[]
typical?: double[]
}
export interface MlAnomalyExplanation {
@ -14731,8 +14739,8 @@ export interface MlFilterRef {
export type MlFilterType = 'include' | 'exclude'
export interface MlGeoResults {
actual_point: string
typical_point: string
actual_point?: string
typical_point?: string
}
export interface MlHyperparameter {
@ -19544,6 +19552,18 @@ export interface ShutdownPutNodeRequest extends RequestBase {
export type ShutdownPutNodeResponse = AcknowledgedResponseBase
export interface SimulateIngestIngestDocumentSimulationKeys {
_id: Id
_index: IndexName
_source: Record<string, any>
_version: SpecUtilsStringified<VersionNumber>
executed_pipelines: string[]
ignored_fields?: Record<string, string>[]
error?: ErrorCause
}
export type SimulateIngestIngestDocumentSimulation = SimulateIngestIngestDocumentSimulationKeys
& { [property: string]: string | Id | IndexName | Record<string, any> | SpecUtilsStringified<VersionNumber> | string[] | Record<string, string>[] | ErrorCause }
export interface SimulateIngestRequest extends RequestBase {
index?: IndexName
pipeline?: PipelineName
@ -19558,7 +19578,11 @@ export interface SimulateIngestRequest extends RequestBase {
}
export interface SimulateIngestResponse {
docs: IngestSimulateDocumentResult[]
docs: SimulateIngestSimulateIngestDocumentResult[]
}
export interface SimulateIngestSimulateIngestDocumentResult {
doc?: SimulateIngestIngestDocumentSimulation
}
export interface SlmConfiguration {