Auto-generated API code (#2715)

This commit is contained in:
Elastic Machine
2025-04-07 21:42:06 +02:00
committed by GitHub
parent c988c44f66
commit 73ef18836e
3 changed files with 18 additions and 206 deletions

View File

@ -7552,23 +7552,6 @@ client.inference.get({ ... })
- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type - **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type
- **`inference_id` (Optional, string)**: The inference Id - **`inference_id` (Optional, string)**: The inference Id
## client.inference.postEisChatCompletion [_inference.post_eis_chat_completion]
Perform a chat completion task through the Elastic Inference Service (EIS).
Perform a chat completion inference task with the `elastic` service.
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion)
```ts
client.inference.postEisChatCompletion({ eis_inference_id })
```
### Arguments [_arguments_inference.post_eis_chat_completion]
#### Request (object) [_request_inference.post_eis_chat_completion]
- **`eis_inference_id` (string)**: The unique identifier of the inference endpoint.
- **`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })**
## client.inference.put [_inference.put] ## client.inference.put [_inference.put]
Create an inference endpoint. Create an inference endpoint.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
@ -7775,26 +7758,6 @@ These settings are specific to the `cohere` service.
- **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. - **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task.
These settings are specific to the task type you specified. These settings are specific to the task type you specified.
## client.inference.putEis [_inference.put_eis]
Create an Elastic Inference Service (EIS) inference endpoint.
Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis)
```ts
client.inference.putEis({ task_type, eis_inference_id, service, service_settings })
```
### Arguments [_arguments_inference.put_eis]
#### Request (object) [_request_inference.put_eis]
- **`task_type` (Enum("chat_completion"))**: The type of the inference task that the model will perform.
NOTE: The `chat_completion` task type only supports streaming and only through the _stream API.
- **`eis_inference_id` (string)**: The unique identifier of the inference endpoint.
- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `elastic`.
- **`service_settings` ({ model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `elastic` service.
## client.inference.putElasticsearch [_inference.put_elasticsearch] ## client.inference.putElasticsearch [_inference.put_elasticsearch]
Create an Elasticsearch inference endpoint. Create an Elasticsearch inference endpoint.

View File

@ -77,15 +77,6 @@ export default class Inference {
body: [], body: [],
query: [] query: []
}, },
'inference.post_eis_chat_completion': {
path: [
'eis_inference_id'
],
body: [
'chat_completion_request'
],
query: []
},
'inference.put': { 'inference.put': {
path: [ path: [
'task_type', 'task_type',
@ -174,17 +165,6 @@ export default class Inference {
], ],
query: [] query: []
}, },
'inference.put_eis': {
path: [
'task_type',
'eis_inference_id'
],
body: [
'service',
'service_settings'
],
query: []
},
'inference.put_elasticsearch': { 'inference.put_elasticsearch': {
path: [ path: [
'task_type', 'task_type',
@ -583,53 +563,6 @@ export default class Inference {
return await this.transport.request({ path, method, querystring, body, meta }, options) return await this.transport.request({ path, method, querystring, body, meta }, options)
} }
/**
* Perform a chat completion task through the Elastic Inference Service (EIS). Perform a chat completion inference task with the `elastic` service.
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion | Elasticsearch API documentation}
*/
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePostEisChatCompletionResponse>
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePostEisChatCompletionResponse, unknown>>
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise<T.InferencePostEisChatCompletionResponse>
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath,
body: acceptedBody,
query: acceptedQuery
} = this.acceptedParams['inference.post_eis_chat_completion']
const userQuery = params?.querystring
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
let body: any = params.body ?? undefined
for (const key in params) {
if (acceptedBody.includes(key)) {
// @ts-expect-error
body = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) {
// @ts-expect-error
querystring[key] = params[key]
} else {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
}
}
}
const method = 'POST'
const path = `/_inference/chat_completion/${encodeURIComponent(params.eis_inference_id.toString())}/_stream`
const meta: TransportRequestMetadata = {
name: 'inference.post_eis_chat_completion',
pathParts: {
eis_inference_id: params.eis_inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/** /**
* Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation}
@ -1033,64 +966,6 @@ export default class Inference {
return await this.transport.request({ path, method, querystring, body, meta }, options) return await this.transport.request({ path, method, querystring, body, meta }, options)
} }
/**
* Create an Elastic Inference Service (EIS) inference endpoint. Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis | Elasticsearch API documentation}
*/
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutEisResponse>
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutEisResponse, unknown>>
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise<T.InferencePutEisResponse>
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise<any> {
const {
path: acceptedPath,
body: acceptedBody,
query: acceptedQuery
} = this.acceptedParams['inference.put_eis']
const userQuery = params?.querystring
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
let body: Record<string, any> | string | undefined
const userBody = params?.body
if (userBody != null) {
if (typeof userBody === 'string') {
body = userBody
} else {
body = { ...userBody }
}
}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body' && key !== 'querystring') {
if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) {
// @ts-expect-error
querystring[key] = params[key]
} else {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
}
}
}
const method = 'PUT'
const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.eis_inference_id.toString())}`
const meta: TransportRequestMetadata = {
name: 'inference.put_eis',
pathParts: {
task_type: params.task_type,
eis_inference_id: params.eis_inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/** /**
* Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch | Elasticsearch API documentation} * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch | Elasticsearch API documentation}

View File

@ -15503,8 +15503,10 @@ export interface ClusterHealthHealthResponseBody {
active_primary_shards: integer active_primary_shards: integer
/** The total number of active primary and replica shards. */ /** The total number of active primary and replica shards. */
active_shards: integer active_shards: integer
/** The ratio of active shards in the cluster expressed as a string formatted percentage. */
active_shards_percent?: string
/** The ratio of active shards in the cluster expressed as a percentage. */ /** The ratio of active shards in the cluster expressed as a percentage. */
active_shards_percent_as_number: Percentage active_shards_percent_as_number: double
/** The name of the cluster. */ /** The name of the cluster. */
cluster_name: Name cluster_name: Name
/** The number of shards whose allocation has been delayed by the timeout settings. */ /** The number of shards whose allocation has been delayed by the timeout settings. */
@ -15566,7 +15568,7 @@ export interface ClusterHealthRequest extends RequestBase {
/** Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. */ /** Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. */
wait_for_events?: WaitForEvents wait_for_events?: WaitForEvents
/** The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and <N. Alternatively, it is possible to use ge(N), le(N), gt(N) and lt(N) notation. */ /** The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and <N. Alternatively, it is possible to use ge(N), le(N), gt(N) and lt(N) notation. */
wait_for_nodes?: string | integer wait_for_nodes?: ClusterHealthWaitForNodes
/** A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard initializations. Defaults to false, which means it will not wait for initializing shards. */ /** A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard initializations. Defaults to false, which means it will not wait for initializing shards. */
wait_for_no_initializing_shards?: boolean wait_for_no_initializing_shards?: boolean
/** A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard relocations. Defaults to false, which means it will not wait for relocating shards. */ /** A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard relocations. Defaults to false, which means it will not wait for relocating shards. */
@ -15591,6 +15593,8 @@ export interface ClusterHealthShardHealthStats {
unassigned_primary_shards: integer unassigned_primary_shards: integer
} }
export type ClusterHealthWaitForNodes = string | integer
export interface ClusterInfoRequest extends RequestBase { export interface ClusterInfoRequest extends RequestBase {
/** Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. */ /** Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. */
target: ClusterInfoTargets target: ClusterInfoTargets
@ -21652,18 +21656,6 @@ export type InferenceDenseByteVector = byte[]
export type InferenceDenseVector = float[] export type InferenceDenseVector = float[]
export interface InferenceEisServiceSettings {
/** The name of the model to use for the inference task. */
model_id: string
/** This setting helps to minimize the number of rate limit errors returned.
* By default, the `elastic` service sets the number of requests allowed per minute to `240` in case of `chat_completion`. */
rate_limit?: InferenceRateLimitSetting
}
export type InferenceEisServiceType = 'elastic'
export type InferenceEisTaskType = 'chat_completion'
export interface InferenceElasticsearchServiceSettings { export interface InferenceElasticsearchServiceSettings {
/** Adaptive allocations configuration details. /** Adaptive allocations configuration details.
* If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets.
@ -22154,18 +22146,6 @@ export interface InferenceGetResponse {
endpoints: InferenceInferenceEndpointInfo[] endpoints: InferenceInferenceEndpointInfo[]
} }
export interface InferencePostEisChatCompletionRequest extends RequestBase {
/** The unique identifier of the inference endpoint. */
eis_inference_id: Id
chat_completion_request?: InferenceRequestChatCompletion
/** All values in `body` will be added to the request body. */
body?: string | { [key: string]: any } & { eis_inference_id?: never, chat_completion_request?: never }
/** All values in `querystring` will be added to the request querystring. */
querystring?: { [key: string]: any } & { eis_inference_id?: never, chat_completion_request?: never }
}
export type InferencePostEisChatCompletionResponse = StreamResult
export interface InferencePutRequest extends RequestBase { export interface InferencePutRequest extends RequestBase {
/** The task type */ /** The task type */
task_type?: InferenceTaskType task_type?: InferenceTaskType
@ -22315,24 +22295,6 @@ export interface InferencePutCohereRequest extends RequestBase {
export type InferencePutCohereResponse = InferenceInferenceEndpointInfo export type InferencePutCohereResponse = InferenceInferenceEndpointInfo
export interface InferencePutEisRequest extends RequestBase {
/** The type of the inference task that the model will perform.
* NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */
task_type: InferenceEisTaskType
/** The unique identifier of the inference endpoint. */
eis_inference_id: Id
/** The type of service supported for the specified task type. In this case, `elastic`. */
service: InferenceEisServiceType
/** Settings used to install the inference model. These settings are specific to the `elastic` service. */
service_settings: InferenceEisServiceSettings
/** All values in `body` will be added to the request body. */
body?: string | { [key: string]: any } & { task_type?: never, eis_inference_id?: never, service?: never, service_settings?: never }
/** All values in `querystring` will be added to the request querystring. */
querystring?: { [key: string]: any } & { task_type?: never, eis_inference_id?: never, service?: never, service_settings?: never }
}
export type InferencePutEisResponse = InferenceInferenceEndpointInfo
export interface InferencePutElasticsearchRequest extends RequestBase { export interface InferencePutElasticsearchRequest extends RequestBase {
/** The type of the inference task that the model will perform. */ /** The type of the inference task that the model will perform. */
task_type: InferenceElasticsearchTaskType task_type: InferenceElasticsearchTaskType
@ -23044,6 +23006,13 @@ export interface IngestInferenceProcessor extends IngestProcessorBase {
field_map?: Record<Field, any> field_map?: Record<Field, any>
/** Contains the inference type and its options. */ /** Contains the inference type and its options. */
inference_config?: IngestInferenceConfig inference_config?: IngestInferenceConfig
/** Input fields for inference and output (destination) fields for the inference results.
* This option is incompatible with the target_field and field_map options. */
input_output?: IngestInputConfig | IngestInputConfig[]
/** If true and any of the input fields defined in input_ouput are missing
* then those missing fields are quietly ignored, otherwise a missing field causes a failure.
* Only applies when using input_output configurations to explicitly list the input fields. */
ignore_missing?: boolean
} }
export interface IngestIngest { export interface IngestIngest {
@ -23052,6 +23021,11 @@ export interface IngestIngest {
pipeline?: Name pipeline?: Name
} }
export interface IngestInputConfig {
input_field: string
output_field: string
}
export interface IngestIpLocationProcessor extends IngestProcessorBase { export interface IngestIpLocationProcessor extends IngestProcessorBase {
/** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */
database_file?: string database_file?: string