Files
elasticsearch-js/docs/reference/inference.asciidoc
2024-12-05 14:47:47 -06:00

211 lines
6.3 KiB
Plaintext

[[reference-inference]]
== client.inference
////////
===========================================================================================================================
|| ||
|| ||
|| ||
|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ ||
|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ ||
|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ ||
|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ ||
|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ ||
|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ ||
|| ||
|| ||
|| This file is autogenerated, DO NOT send pull requests that changes this file directly. ||
|| You should update the script that does the generation, which can be found in: ||
|| https://github.com/elastic/elastic-client-generator-js ||
|| ||
|| You can run the script with the following command: ||
|| npm run elasticsearch -- --version <version> ||
|| ||
|| ||
|| ||
===========================================================================================================================
////////
++++
<style>
.lang-ts a.xref {
text-decoration: underline !important;
}
</style>
++++
[discrete]
[[client.inference.delete]]
== `client.inference.delete()`
Delete an inference endpoint
{ref}/delete-inference-api.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: InferenceDeleteRequest, options?: TransportRequestOptions) => Promise<InferenceDeleteResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface InferenceDeleteRequest extends <<RequestBase>> {
task_type?: <<InferenceTaskType>>
inference_id: <<Id>>
dry_run?: boolean
force?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type InferenceDeleteResponse = <<InferenceDeleteInferenceEndpointResult>>
----
[discrete]
[[client.inference.get]]
== `client.inference.get()`
Get an inference endpoint
{ref}/get-inference-api.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: InferenceGetRequest, options?: TransportRequestOptions) => Promise<InferenceGetResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface InferenceGetRequest extends <<RequestBase>> {
task_type?: <<InferenceTaskType>>
inference_id?: <<Id>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface InferenceGetResponse {
endpoints: <<InferenceInferenceEndpointInfo>>[]
}
----
[discrete]
[[client.inference.inference]]
== `client.inference.inference()`
Perform inference on the service
{ref}/post-inference-api.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: InferenceInferenceRequest, options?: TransportRequestOptions) => Promise<InferenceInferenceResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface InferenceInferenceRequest extends <<RequestBase>> {
task_type?: <<InferenceTaskType>>
inference_id: <<Id>>
timeout?: <<Duration>>
query?: string
input: string | string[]
task_settings?: <<InferenceTaskSettings>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type InferenceInferenceResponse = <<InferenceInferenceResult>>
----
[discrete]
[[client.inference.put]]
== `client.inference.put()`
Create an inference endpoint
{ref}/put-inference-api.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: InferencePutRequest, options?: TransportRequestOptions) => Promise<InferencePutResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface InferencePutRequest extends <<RequestBase>> {
task_type?: <<InferenceTaskType>>
inference_id: <<Id>>
inference_config?: <<InferenceInferenceEndpoint>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type InferencePutResponse = <<InferenceInferenceEndpointInfo>>
----
[discrete]
[[client.inference.streamInference]]
== `client.inference.streamInference()`
Perform streaming inference
[discrete]
=== Function signature
[source,ts]
----
(request: InferenceStreamInferenceRequest, options?: TransportRequestOptions) => Promise<InferenceStreamInferenceResponse>
----