Auto-generated API code (#2646)

This commit is contained in:
Elastic Machine
2025-03-20 03:00:58 +02:00
committed by GitHub
parent 38cc36656c
commit ccd69195dd
61 changed files with 1789 additions and 831 deletions

View File

@ -3,17 +3,11 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_query/async",
querystring: {
format: "json",
},
body: {
query:
"\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ",
include_ccs_metadata: true,
},
const response = await client.esql.asyncQuery({
format: "json",
query:
"\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ",
include_ccs_metadata: true,
});
console.log(response);
----

View File

@ -3,23 +3,20 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_application/search_application/my-app/_render_query",
body: {
params: {
query_string: "my first query",
text_fields: [
{
name: "title",
boost: 5,
},
{
name: "description",
boost: 1,
},
],
},
const response = await client.searchApplication.renderQuery({
name: "my-app",
params: {
query_string: "my first query",
text_fields: [
{
name: "title",
boost: 5,
},
{
name: "description",
boost: 1,
},
],
},
});
console.log(response);

View File

@ -3,18 +3,16 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/chat_completion/openai-completion/_stream",
body: {
model: "gpt-4o",
messages: [
{
role: "user",
content: "What is Elastic?",
},
],
},
const response = await client.inference.streamInference({
task_type: "chat_completion",
inference_id: "openai-completion",
model: "gpt-4o",
messages: [
{
role: "user",
content: "What is Elastic?",
},
],
});
console.log(response);
----

View File

@ -0,0 +1,25 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.createFrom({
source: "my-index",
dest: "my-new-index",
create_from: {
settings_override: {
index: {
number_of_shards: 5,
},
},
mappings_override: {
properties: {
field2: {
type: "boolean",
},
},
},
},
});
console.log(response);
----

View File

@ -3,9 +3,8 @@
[source, js]
----
const response = await client.inference.put({
task_type: "my-inference-endpoint",
inference_id: "_update",
const response = await client.inference.update({
inference_id: "my-inference-endpoint",
inference_config: {
service_settings: {
api_key: "<API_KEY>",

View File

@ -0,0 +1,10 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.cancelMigrateReindex({
index: "my-data-stream",
});
console.log(response);
----

View File

@ -0,0 +1,12 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.security.delegatePki({
x509_certificate_chain: [
"MIIDeDCCAmCgAwIBAgIUBzj/nGGKxP2iXawsSquHmQjCJmMwDQYJKoZIhvcNAQELBQAwUzErMCkGA1UEAxMiRWxhc3RpY3NlYXJjaCBUZXN0IEludGVybWVkaWF0ZSBDQTEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMB4XDTIzMDcxODE5MjkwNloXDTQzMDcxMzE5MjkwNlowSjEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAllHL4pQkkfwAm/oLkxYYO+r950DEy1bjH+4viCHzNADLCTWO+lOZJVlNx7QEzJE3QGMdif9CCBBxQFMapA7oUFCLq84fPSQQu5AnvvbltVD9nwVtCs+9ZGDjMKsz98RhSLMFIkxdxi6HkQ3Lfa4ZSI4lvba4oo+T/GveazBDS+NgmKyq00EOXt3tWi1G9vEVItommzXWfv0agJWzVnLMldwkPqsw0W7zrpyT7FZS4iLbQADGceOW8fiauOGMkscu9zAnDR/SbWl/chYioQOdw6ndFLn1YIFPd37xL0WsdsldTpn0vH3YfzgLMffT/3P6YlwBegWzsx6FnM/93Ecb4wIDAQABo00wSzAJBgNVHRMEAjAAMB0GA1UdDgQWBBQKNRwjW+Ad/FN1Rpoqme/5+jrFWzAfBgNVHSMEGDAWgBRcya0c0x/PaI7MbmJVIylWgLqXNjANBgkqhkiG9w0BAQsFAAOCAQEACZ3PF7Uqu47lplXHP6YlzYL2jL0D28hpj5lGtdha4Muw1m/BjDb0Pu8l0NQ1z3AP6AVcvjNDkQq6Y5jeSz0bwQlealQpYfo7EMXjOidrft1GbqOMFmTBLpLA9SvwYGobSTXWTkJzonqVaTcf80HpMgM2uEhodwTcvz6v1WEfeT/HMjmdIsq4ImrOL9RNrcZG6nWfw0HR3JNOgrbfyEztEI471jHznZ336OEcyX7gQuvHE8tOv5+oD1d7s3Xg1yuFp+Ynh+FfOi3hPCuaHA+7F6fLmzMDLVUBAllugst1C3U+L/paD7tqIa4ka+KNPCbSfwazmJrt4XNiivPR4hwH5g==",
],
});
console.log(response);
----

View File

@ -3,27 +3,23 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_ingest/_simulate",
body: {
docs: [
{
_index: "my-index",
_id: "123",
_source: {
foo: "bar",
},
const response = await client.simulate.ingest({
docs: [
{
_index: "my-index",
_id: "123",
_source: {
foo: "bar",
},
{
_index: "my-index",
_id: "456",
_source: {
foo: "rab",
},
},
{
_index: "my-index",
_id: "456",
_source: {
foo: "rab",
},
],
},
},
],
});
console.log(response);
----

View File

@ -0,0 +1,15 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.migrateReindex({
reindex: {
source: {
index: "my-data-stream",
},
mode: "upgrade",
},
});
console.log(response);
----

View File

@ -3,14 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/oidc/logout",
body: {
token:
"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==",
refresh_token: "vLBPvmAB6KvwvJZr27cS",
},
const response = await client.security.oidcLogout({
token:
"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==",
refresh_token: "vLBPvmAB6KvwvJZr27cS",
});
console.log(response);
----

View File

@ -3,12 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_query/async/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE&#x3D;",
querystring: {
wait_for_completion_timeout: "30s",
},
const response = await client.esql.asyncQueryGet({
id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=",
wait_for_completion_timeout: "30s",
});
console.log(response);
----

View File

@ -0,0 +1,22 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.createFrom({
source: "my-index",
dest: "my-new-index",
create_from: {
settings_override: {
index: {
"blocks.write": null,
"blocks.read": null,
"blocks.read_only": null,
"blocks.read_only_allow_delete": null,
"blocks.metadata": null,
},
},
},
});
console.log(response);
----

View File

@ -0,0 +1,21 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "semantic-embeddings",
mappings: {
properties: {
semantic_text: {
type: "semantic_text",
},
content: {
type: "text",
copy_to: "semantic_text",
},
},
},
});
console.log(response);
----

View File

@ -3,14 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_query/async",
body: {
query:
"\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ",
wait_for_completion_timeout: "2s",
},
const response = await client.esql.asyncQuery({
query:
"\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ",
wait_for_completion_timeout: "2s",
});
console.log(response);
----

View File

@ -3,9 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM&#x3D;",
const response = await client.esql.asyncQueryGet({
id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=",
});
console.log(response);
----

View File

@ -3,43 +3,41 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/chat_completion/openai-completion/_stream",
body: {
messages: [
{
role: "user",
content: [
{
type: "text",
text: "What's the price of a scarf?",
},
],
},
],
tools: [
{
type: "function",
function: {
name: "get_current_price",
description: "Get the current price of a item",
parameters: {
type: "object",
properties: {
item: {
id: "123",
},
const response = await client.inference.streamInference({
task_type: "chat_completion",
inference_id: "openai-completion",
messages: [
{
role: "user",
content: [
{
type: "text",
text: "What's the price of a scarf?",
},
],
},
],
tools: [
{
type: "function",
function: {
name: "get_current_price",
description: "Get the current price of a item",
parameters: {
type: "object",
properties: {
item: {
id: "123",
},
},
},
},
],
tool_choice: {
type: "function",
function: {
name: "get_current_price",
},
},
],
tool_choice: {
type: "function",
function: {
name: "get_current_price",
},
},
});

View File

@ -0,0 +1,10 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.getMigrateReindexStatus({
index: "my-data-stream",
});
console.log(response);
----

View File

@ -6,6 +6,11 @@
const response = await client.ml.startTrainedModelDeployment({
model_id: "my_model",
deployment_id: "my_model_for_search",
adaptive_allocations: {
enabled: true,
min_number_of_allocations: 3,
max_number_of_allocations: 10,
},
});
console.log(response);
----

View File

@ -3,12 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/completion/openai-completion/_stream",
body: {
input: "What is Elastic?",
},
const response = await client.inference.streamInference({
task_type: "completion",
inference_id: "openai-completion",
input: "What is Elastic?",
});
console.log(response);
----

View File

@ -3,14 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/oidc/prepare",
body: {
realm: "oidc1",
state: "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO",
nonce: "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5",
},
const response = await client.security.oidcPrepareAuthentication({
realm: "oidc1",
state: "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO",
nonce: "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5",
});
console.log(response);
----

View File

@ -3,17 +3,11 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_query/async",
querystring: {
format: "json",
},
body: {
query:
"\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ",
include_ccs_metadata: true,
},
const response = await client.esql.asyncQuery({
format: "json",
query:
"\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ",
include_ccs_metadata: true,
});
console.log(response);
----

View File

@ -0,0 +1,11 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.getDataLifecycleStats({
human: "true",
pretty: "true",
});
console.log(response);
----

View File

@ -0,0 +1,12 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.createFrom({
source: "my-index",
dest: "my-new-index",
create_from: null,
});
console.log(response);
----

View File

@ -0,0 +1,10 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.esql.asyncQueryDelete({
id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=",
});
console.log(response);
----

View File

@ -3,12 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/api_key/_bulk_update",
body: {
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
},
const response = await client.security.bulkUpdateApiKeys({
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
});
console.log(response);
----

View File

@ -3,35 +3,31 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_text_structure/find_message_structure",
body: {
messages: [
"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128",
"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]",
"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]",
"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]",
"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]",
"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]",
"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]",
"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]",
"[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]",
"[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled",
"[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled",
"[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled",
"[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]",
"[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]",
"[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized",
"[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...",
],
},
const response = await client.textStructure.findMessageStructure({
messages: [
"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128",
"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]",
"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]",
"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]",
"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]",
"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]",
"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]",
"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]",
"[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]",
"[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled",
"[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled",
"[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled",
"[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]",
"[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]",
"[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized",
"[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...",
],
});
console.log(response);
----

View File

@ -3,9 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "DELETE",
path: "/_ingest/ip_location/database/my-database-id",
const response = await client.ingest.deleteIpLocationDatabase({
id: "my-database-id",
});
console.log(response);
----

View File

@ -3,30 +3,26 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/api_key/_bulk_update",
body: {
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
role_descriptors: {
"role-a": {
indices: [
{
names: ["*"],
privileges: ["write"],
},
],
},
const response = await client.security.bulkUpdateApiKeys({
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
role_descriptors: {
"role-a": {
indices: [
{
names: ["*"],
privileges: ["write"],
},
],
},
metadata: {
environment: {
level: 2,
trusted: true,
tags: ["production"],
},
},
expiration: "30d",
},
metadata: {
environment: {
level: 2,
trusted: true,
tags: ["production"],
},
},
expiration: "30d",
});
console.log(response);
----

View File

@ -3,32 +3,30 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/chat_completion/openai-completion/_stream",
body: {
messages: [
{
role: "assistant",
content: "Let's find out what the weather is",
tool_calls: [
{
id: "call_KcAjWtAww20AihPHphUh46Gd",
type: "function",
function: {
name: "get_current_weather",
arguments: '{"location":"Boston, MA"}',
},
const response = await client.inference.streamInference({
task_type: "chat_completion",
inference_id: "openai-completion",
messages: [
{
role: "assistant",
content: "Let's find out what the weather is",
tool_calls: [
{
id: "call_KcAjWtAww20AihPHphUh46Gd",
type: "function",
function: {
name: "get_current_weather",
arguments: '{"location":"Boston, MA"}',
},
],
},
{
role: "tool",
content: "The weather is cold",
tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd",
},
],
},
},
],
},
{
role: "tool",
content: "The weather is cold",
tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd",
},
],
});
console.log(response);
----

View File

@ -3,10 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_ingest/ip_location/database/my-database-1",
body: {
const response = await client.ingest.putIpLocationDatabase({
id: "my-database-1",
configuration: {
name: "GeoIP2-Domain",
maxmind: {
account_id: "1234567",

View File

@ -0,0 +1,8 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.resolveCluster();
console.log(response);
----

View File

@ -3,9 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "DELETE",
path: "/_ingest/ip_location/database/example-database-id",
const response = await client.ingest.deleteIpLocationDatabase({
id: "example-database-id",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_application/analytics/my_analytics_collection/event/search_click",
body: {
const response = await client.searchApplication.postBehavioralAnalyticsEvent({
collection_name: "my_analytics_collection",
event_type: "search_click",
payload: {
session: {
id: "1797ca95-91c9-4e2e-b1bd-9c38e6f386a9",
},

View File

@ -3,16 +3,12 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/oidc/authenticate",
body: {
redirect_uri:
"https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I",
state: "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I",
nonce: "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM",
realm: "oidc1",
},
const response = await client.security.oidcAuthenticate({
redirect_uri:
"https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I",
state: "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I",
nonce: "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM",
realm: "oidc1",
});
console.log(response);
----

View File

@ -3,13 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_application/search_application/my_search_application/_render_query",
body: {
params: {
query_string: "rock climbing",
},
const response = await client.searchApplication.renderQuery({
name: "my_search_application",
params: {
query_string: "rock climbing",
},
});
console.log(response);

View File

@ -0,0 +1,10 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.cancelMigrateReindex({
index: "my-data-stream",
});
console.log(response);
----

View File

@ -0,0 +1,10 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.getMigrateReindexStatus({
index: "my-data-stream",
});
console.log(response);
----

View File

@ -3,9 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_application/search_application/my_search_application/_render_query",
const response = await client.searchApplication.renderQuery({
name: "my_search_application",
});
console.log(response);
----

View File

@ -0,0 +1,10 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.esql.asyncQueryStop({
id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=",
});
console.log(response);
----

View File

@ -208,13 +208,9 @@ const response = await client.bulk({
});
console.log(response);
const response1 = await client.transport.request({
method: "GET",
path: "/_text_structure/find_field_structure",
querystring: {
index: "test-logs",
field: "message",
},
const response1 = await client.textStructure.findFieldStructure({
index: "test-logs",
field: "message",
});
console.log(response1);
----

View File

@ -3,36 +3,32 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_ingest/_simulate",
body: {
docs: [
{
_index: "my-index",
_id: "123",
_source: {
foo: "bar",
},
const response = await client.simulate.ingest({
docs: [
{
_index: "my-index",
_id: "123",
_source: {
foo: "bar",
},
{
_index: "my-index",
_id: "456",
_source: {
foo: "rab",
},
},
{
_index: "my-index",
_id: "456",
_source: {
foo: "rab",
},
],
pipeline_substitutions: {
"my-pipeline": {
processors: [
{
uppercase: {
field: "foo",
},
},
],
pipeline_substitutions: {
"my-pipeline": {
processors: [
{
uppercase: {
field: "foo",
},
],
},
},
],
},
},
});

View File

@ -3,13 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/api_key/_bulk_update",
body: {
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
role_descriptors: {},
},
const response = await client.security.bulkUpdateApiKeys({
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
role_descriptors: {},
});
console.log(response);
----

View File

@ -3,69 +3,65 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_ingest/_simulate",
body: {
docs: [
{
_index: "my-index",
_id: "id",
_source: {
foo: "bar",
},
},
{
_index: "my-index",
_id: "id",
_source: {
foo: "rab",
},
},
],
pipeline_substitutions: {
"my-pipeline": {
processors: [
{
set: {
field: "field3",
value: "value3",
},
},
],
const response = await client.simulate.ingest({
docs: [
{
_index: "my-index",
_id: "id",
_source: {
foo: "bar",
},
},
component_template_substitutions: {
"my-component-template": {
template: {
mappings: {
dynamic: "true",
properties: {
field3: {
type: "keyword",
},
{
_index: "my-index",
_id: "id",
_source: {
foo: "rab",
},
},
],
pipeline_substitutions: {
"my-pipeline": {
processors: [
{
set: {
field: "field3",
value: "value3",
},
},
],
},
},
component_template_substitutions: {
"my-component-template": {
template: {
mappings: {
dynamic: "true",
properties: {
field3: {
type: "keyword",
},
},
settings: {
index: {
default_pipeline: "my-pipeline",
},
},
settings: {
index: {
default_pipeline: "my-pipeline",
},
},
},
},
index_template_substitutions: {
"my-index-template": {
index_patterns: ["my-index-*"],
composed_of: ["component_template_1", "component_template_2"],
},
},
index_template_substitutions: {
"my-index-template": {
index_patterns: ["my-index-*"],
composed_of: ["component_template_1", "component_template_2"],
},
mapping_addition: {
dynamic: "strict",
properties: {
foo: {
type: "keyword",
},
},
mapping_addition: {
dynamic: "strict",
properties: {
foo: {
type: "keyword",
},
},
},

View File

@ -3,13 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/oidc/prepare",
body: {
iss: "http://127.0.0.1:8080",
login_hint: "this_is_an_opaque_string",
},
const response = await client.security.oidcPrepareAuthentication({
iss: "http://127.0.0.1:8080",
login_hint: "this_is_an_opaque_string",
});
console.log(response);
----

View File

@ -3,10 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_ingest/ip_location/database/my-database-2",
body: {
const response = await client.ingest.putIpLocationDatabase({
id: "my-database-2",
configuration: {
name: "standard_location",
ipinfo: {},
},

View File

@ -3,9 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_ingest/ip_location/database/my-database-id",
const response = await client.ingest.getIpLocationDatabase({
id: "my-database-id",
});
console.log(response);
----

View File

@ -3,9 +3,6 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_security/settings",
});
const response = await client.security.getSettings();
console.log(response);
----

View File

@ -0,0 +1,14 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.createFrom({
source: "my-index",
dest: "my-new-index",
create_from: {
remove_index_blocks: false,
},
});
console.log(response);
----

View File

@ -3,12 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/oidc/prepare",
body: {
realm: "oidc1",
},
const response = await client.security.oidcPrepareAuthentication({
realm: "oidc1",
});
console.log(response);
----

View File

@ -3,38 +3,34 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_ingest/_simulate",
body: {
docs: [
{
_index: "my-index",
_id: "123",
_source: {
foo: "foo",
},
const response = await client.simulate.ingest({
docs: [
{
_index: "my-index",
_id: "123",
_source: {
foo: "foo",
},
{
_index: "my-index",
_id: "456",
_source: {
bar: "rab",
},
},
{
_index: "my-index",
_id: "456",
_source: {
bar: "rab",
},
],
component_template_substitutions: {
"my-mappings_template": {
template: {
mappings: {
dynamic: "strict",
properties: {
foo: {
type: "keyword",
},
bar: {
type: "keyword",
},
},
],
component_template_substitutions: {
"my-mappings_template": {
template: {
mappings: {
dynamic: "strict",
properties: {
foo: {
type: "keyword",
},
bar: {
type: "keyword",
},
},
},

View File

@ -0,0 +1,12 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.createFrom({
source: ".ml-anomalies-custom-example",
dest: ".reindexed-v9-ml-anomalies-custom-example",
create_from: null,
});
console.log(response);
----

View File

@ -221,7 +221,7 @@ client.count({ ... })
* *Request (object):*
** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter.
** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.
** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified.
** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified.
@ -324,9 +324,14 @@ client.create({ id, index })
** *`id` (string)*: A unique identifier for the document. To automatically generate a document ID, use the `POST /<target>/_doc/` request format.
** *`index` (string)*: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesnt match a data stream template, this request creates the index.
** *`document` (Optional, object)*: A document.
** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term.
** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number.
** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors.
** *`op_type` (Optional, Enum("index" | "create"))*: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `<index>/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required.
** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter.
** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes.
** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias.
** *`require_data_stream` (Optional, boolean)*: If `true`, the request's actions must target a data stream (existing or to be created).
** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard.
** *`timeout` (Optional, string | -1 | 0)*: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur.
** *`version` (Optional, number)*: The explicit version number for concurrency control. It must be a non-negative long number.
@ -489,7 +494,7 @@ client.deleteByQuery({ index })
* *Request (object):*
** *`index` (string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`.
** *`max_docs` (Optional, number)*: The maximum number of documents to delete.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to delete specified with Query DSL.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to delete specified with Query DSL.
** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices.
** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.
** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified.
@ -652,7 +657,7 @@ client.explain({ id, index })
* *Request (object):*
** *`id` (string)*: The document identifier.
** *`index` (string)*: Index names that are used to limit the request. Only a single index name can be provided to this parameter.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL.
** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified.
** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified.
** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified.
@ -687,7 +692,7 @@ client.fieldCaps({ ... })
* *Request (object):*
** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all.
** *`fields` (Optional, string | string[])*: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported.
** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document.
** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document.
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.
** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`.
@ -1092,7 +1097,7 @@ client.knnSearch({ index, knn })
** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns.
** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response.
** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match.
** *`routing` (Optional, string)*: A list of specific routing values.
[discrete]
@ -1307,7 +1312,7 @@ client.openPointInTime({ index, keep_alive })
* *Request (object):*
** *`index` (string | string[])*: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices
** *`keep_alive` (string | -1 | 0)*: Extend the length of time that the point in time persists.
** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard.
** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, it is random.
** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard.
@ -1724,9 +1729,9 @@ client.search({ ... })
** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])*: The approximate kNN search to run.
** *`rank` (Optional, { rrf })*: The Reciprocal Rank Fusion (RRF) to use.
** *`min_score` (Optional, number)*: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results.
** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results.
** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results.
** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The search definition using the Query DSL.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The search definition using the Query DSL.
** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases.
** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`.
** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Retrieve a script evaluation (based on different fields) for each hit.
@ -1938,7 +1943,7 @@ client.searchMvt({ index, field, zoom, x, y })
** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: The aggregation used to create a grid for the `field`.
** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if `<zoom>` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer.
** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The query DSL used to filter documents for the search.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The query DSL used to filter documents for the search.
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name.
** *`size` (Optional, number)*: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer.
** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest.
@ -2028,7 +2033,7 @@ client.termsEnum({ index, field })
** *`size` (Optional, number)*: The number of matching terms to return.
** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty.
** *`case_insensitive` (Optional, boolean)*: When `true`, the provided search string is matched against index terms without case sensitivity.
** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter an index shard if the provided query rewrites to `match_none`.
** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter an index shard if the provided query rewrites to `match_none`.
** *`string` (Optional, string)*: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766.
** *`search_after` (Optional, string)*: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request.
@ -2090,17 +2095,17 @@ client.termvectors({ index })
** *`doc` (Optional, object)*: An artificial document (a document not present in the index) for which you want to retrieve term vectors.
** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })*: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query.
** *`per_field_analyzer` (Optional, Record<string, string>)*: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated.
** *`fields` (Optional, string | string[])*: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters.
** *`fields` (Optional, string | string[])*: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters.
** *`field_statistics` (Optional, boolean)*: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field).
** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets.
** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads.
** *`positions` (Optional, boolean)*: If `true`, the response includes term positions.
** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default.
** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time.
** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard.
** *`term_statistics` (Optional, boolean)*: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact.
** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard.
** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit.
** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type.
** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default.
** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time.
[discrete]
=== update
@ -2253,7 +2258,7 @@ client.updateByQuery({ index })
* *Request (object):*
** *`index` (string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`.
** *`max_docs` (Optional, number)*: The maximum number of documents to update.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to update using the Query DSL.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to update using the Query DSL.
** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when updating.
** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices.
** *`conflicts` (Optional, Enum("abort" | "proceed"))*: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`.
@ -2420,9 +2425,9 @@ names matching these patterns in the hits.fields property of the response.
** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])*: Defines the approximate kNN search to run.
** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are
not included in the search results.
** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*
** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*
** *`profile` (Optional, boolean)*
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL.
** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*
** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Retrieve a script evaluation (based on different fields) for each hit.
** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*
@ -3866,6 +3871,7 @@ If no response is received before the timeout expires, the request fails and ret
[discrete]
==== health
Get the cluster health status.
You can also use the API to get the health status of only specified data streams and indices.
For data streams, the API retrieves the health status of the streams backing indices.
@ -4036,6 +4042,7 @@ If no response is received before the timeout expires, the request fails and ret
[discrete]
==== put_settings
Update the cluster settings.
Configure and update dynamic settings on a running cluster.
You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`.
@ -5025,7 +5032,7 @@ client.eql.search({ index, query })
** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order
** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp"
** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs.
** *`keep_alive` (Optional, string | -1 | 0)*
** *`keep_on_completion` (Optional, boolean)*
** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*
@ -5067,7 +5074,7 @@ client.esql.asyncQuery({ query })
* *Request (object):*
** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results.
** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on.
** *`locale` (Optional, string)*
** *`params` (Optional, number | number | string | boolean | null | User-defined value[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters.
** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object
@ -5079,6 +5086,11 @@ name and the next level key is the column name.
** *`include_ccs_metadata` (Optional, boolean)*: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters`
object with information about the clusters that participated in the search along with info such as shards
count.
** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for the request to finish.
By default, the request waits for 1 second for the query results.
If the query completes during this period, results are returned
Otherwise, a query ID is returned that can later be used to retrieve the results.
** *`allow_partial_results` (Optional, boolean)*: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards.
** *`delimiter` (Optional, string)*: The character to use between values within a CSV row.
It is valid only for the CSV format.
** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results.
@ -5090,10 +5102,6 @@ When this period expires, the query and its results are deleted, even if the que
If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value.
** *`keep_on_completion` (Optional, boolean)*: Indicates whether the query and its results are stored in the cluster.
If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter.
** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for the request to finish.
By default, the request waits for 1 second for the query results.
If the query completes during this period, results are returned
Otherwise, a query ID is returned that can later be used to retrieve the results.
[discrete]
==== async_query_delete
@ -5188,7 +5196,7 @@ client.esql.query({ query })
* *Request (object):*
** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results.
** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on.
** *`locale` (Optional, string)*
** *`params` (Optional, number | number | string | boolean | null | User-defined value[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters.
** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object
@ -5204,6 +5212,7 @@ count.
** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format.
** *`drop_null_columns` (Optional, boolean)*: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results?
Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns.
** *`allow_partial_results` (Optional, boolean)*: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards.
[discrete]
=== features
@ -5362,9 +5371,9 @@ Defaults to 10,000 hits.
names matching these patterns in the hits.fields property of the response.
** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are
not included in the search results.
** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*
** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*
** *`profile` (Optional, boolean)*
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL.
** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*
** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Retrieve a script evaluation (based on different fields) for each hit.
** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*
@ -5458,7 +5467,7 @@ client.graph.explore({ index })
** *`index` (string | string[])*: Name of the index.
** *`connections` (Optional, { connections, query, vertices })*: Specifies or more fields from which you want to extract terms that are associated with the specified vertices.
** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })*: Direct the Graph API how to build the graph.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query.
** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])*: Specifies one or more fields that contain the terms you want to include in the graph as vertices.
** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard.
** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for a response from each shard.
@ -6331,6 +6340,8 @@ client.indices.existsIndexTemplate({ name })
* *Request (object):*
** *`name` (string)*: List of index template names used to limit the request. Wildcard (*) expressions are supported.
** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node.
** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
[discrete]
@ -6407,8 +6418,6 @@ If the request can target data streams, this argument determines whether wildcar
Supports a list of values, such as `open,hidden`.
** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response.
** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics.
** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation.
Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).
[discrete]
==== flush
@ -6838,7 +6847,7 @@ If successful, the request removes the alias and creates a data stream with the
The indices for the alias become hidden backing indices for the stream.
The write index for the alias becomes the write index for the stream.
{ref}/data-streams.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-migrate-to-data-stream[Endpoint documentation]
[source,ts]
----
client.indices.migrateToDataStream({ name })
@ -6857,7 +6866,7 @@ client.indices.migrateToDataStream({ name })
Update data streams.
Performs one or more data stream modification actions in a single atomic operation.
{ref}/data-streams.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-modify-data-stream[Endpoint documentation]
[source,ts]
----
client.indices.modifyDataStream({ actions })
@ -6938,7 +6947,7 @@ NOTE: When promoting a data stream, ensure the local cluster has a data stream e
If this is missing, the data stream will not be able to roll over until a matching index template is created.
This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.
{ref}/data-streams.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-promote-data-stream[Endpoint documentation]
[source,ts]
----
client.indices.promoteDataStream({ name })
@ -6956,7 +6965,7 @@ client.indices.promoteDataStream({ name })
Create or update an alias.
Adds a data stream or index to an alias.
{ref}/indices-aliases.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-alias[Endpoint documentation]
[source,ts]
----
client.indices.putAlias({ index, name })
@ -6972,7 +6981,7 @@ Wildcard patterns that match both data streams and indices return an error.
** *`name` (string)*: Alias to update.
If the alias doesnt exist, the request creates it.
Index alias names support date math.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access.
** *`index_routing` (Optional, string)*: Value used to route indexing operations to a specific shard.
If specified, this overwrites the `routing` value for indexing operations.
Data stream aliases dont support this parameter.
@ -6995,7 +7004,7 @@ If no response is received before the timeout expires, the request fails and ret
Update data stream lifecycles.
Update the data stream lifecycle of the specified data streams.
{ref}/data-streams-put-lifecycle.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-data-lifecycle[Endpoint documentation]
[source,ts]
----
client.indices.putDataLifecycle({ name })
@ -7054,7 +7063,7 @@ This recursive merging strategy applies not only to field mappings, but also roo
If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.
If an entry already exists with the same key, then it is overwritten by the new definition.
{ref}/indices-put-template.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-index-template[Endpoint documentation]
[source,ts]
----
client.indices.putIndexTemplate({ name })
@ -7146,13 +7155,13 @@ client.indices.putMapping({ index })
** *`dynamic_date_formats` (Optional, string[])*: If date detection is enabled then new string fields are checked
against 'dynamic_date_formats' and if the value matches then
a new date field is added instead of string.
** *`dynamic_templates` (Optional, Record<string, { mapping, runtime, match, path_match, unmatch, path_unmatch, match_mapping_type, unmatch_mapping_type, match_pattern }> | Record<string, { mapping, runtime, match, path_match, unmatch, path_unmatch, match_mapping_type, unmatch_mapping_type, match_pattern }>[])*: Specify dynamic templates for the mapping.
** *`dynamic_templates` (Optional, Record<string, { mapping, runtime, match, path_match, unmatch, path_unmatch, match_mapping_type, unmatch_mapping_type, match_pattern }>[])*: Specify dynamic templates for the mapping.
** *`_field_names` (Optional, { enabled })*: Control whether field names are enabled for the index.
** *`_meta` (Optional, Record<string, User-defined value>)*: A mapping type can have custom meta data associated with it. These are
not used at all by Elasticsearch, but can be used to store
application-specific metadata.
** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields.
** *`properties` (Optional, Record<string, { type } | { boost, fielddata, index, null_value, type } | { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } | { relations, eager_global_ordinals, type } | { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, similarity, split_queries_on_whitespace, time_series_dimension, type } | { type, fields, meta, copy_to } | { type } | { positive_score_impact, type } | { positive_score_impact, type } | { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { type } | { type, null_value } | { boost, format, ignore_malformed, index, null_value, precision_step, type } | { boost, fielddata, format, ignore_malformed, index, null_value, precision_step, locale, type } | { type, default_metric, metrics, time_series_metric } | { type, dims, element_type, index, index_options, similarity } | { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } | { enabled, include_in_parent, include_in_root, type } | { enabled, subobjects, type } | { type, enabled, priority, time_series_dimension } | { type, meta, inference_id } | { type } | { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } | { value, type } | { type, index } | { path, type } | { ignore_malformed, type } | { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } | { type } | { analyzer, boost, index, null_value, enable_position_increments, type } | { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } | { coerce, ignore_malformed, ignore_z_value, orientation, strategy, type } | { ignore_malformed, ignore_z_value, null_value, type } | { coerce, ignore_malformed, ignore_z_value, orientation, type } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value, scaling_factor } | { type, null_value } | { type, null_value } | { format, type } | { type } | { type } | { type } | { type } | { type } | { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)*: Mapping for a field. For new fields, this mapping can include:
** *`properties` (Optional, Record<string, { type } | { boost, fielddata, index, null_value, type } | { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } | { relations, eager_global_ordinals, type } | { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, similarity, split_queries_on_whitespace, time_series_dimension, type } | { type, fields, meta, copy_to } | { type } | { positive_score_impact, type } | { positive_score_impact, type } | { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { type } | { type, null_value } | { boost, format, ignore_malformed, index, script, on_script_error, null_value, precision_step, type } | { boost, fielddata, format, ignore_malformed, index, script, on_script_error, null_value, precision_step, locale, type } | { type, default_metric, metrics, time_series_metric } | { type, dims, element_type, index, index_options, similarity } | { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } | { enabled, include_in_parent, include_in_root, type } | { enabled, subobjects, type } | { type, enabled, priority, time_series_dimension } | { type, meta, inference_id, search_inference_id } | { type } | { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } | { value, type } | { type, index } | { path, type } | { ignore_malformed, type } | { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } | { type } | { analyzer, boost, index, null_value, enable_position_increments, type } | { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } | { coerce, ignore_malformed, ignore_z_value, index, orientation, strategy, type } | { ignore_malformed, ignore_z_value, null_value, type } | { coerce, ignore_malformed, ignore_z_value, orientation, type } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value, scaling_factor } | { type, null_value } | { type, null_value } | { format, type } | { type } | { type } | { type } | { type } | { type } | { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)*: Mapping for a field. For new fields, this mapping can include:
- Field name
- Field data type
@ -7221,6 +7230,9 @@ hidden data streams. Supports a list of values, such as
received before the timeout expires, the request fails and returns an
error.
** *`preserve_existing` (Optional, boolean)*: If `true`, existing index settings remain unchanged.
** *`reopen` (Optional, boolean)*: Whether to close and reopen the index to apply non-dynamic settings.
If set to `true` the indices to which the settings are being applied
will be closed temporarily and then reopened in order to apply the changes.
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the
timeout expires, the request fails and returns an error.
@ -7388,6 +7400,7 @@ client.indices.reloadSearchAnalyzers({ index })
** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both.
** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed)
** *`resource` (Optional, string)*: Changed resource to reload analyzers from if applicable
[discrete]
==== resolve_cluster
@ -7579,6 +7592,8 @@ If no response is received before the timeout expires, the request fails and ret
If no response is received before the timeout expires, the request fails and returns an error.
** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation.
Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).
** *`lazy` (Optional, boolean)*: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write.
Only allowed on data streams.
[discrete]
==== segments
@ -7715,6 +7730,8 @@ client.indices.simulateIndexTemplate({ name })
* *Request (object):*
** *`name` (string)*: Name of the index to simulate
** *`create` (Optional, boolean)*: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one
** *`cause` (Optional, string)*: User defined reason for dry-run creating the new template for simulation purposes
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template.
@ -7760,6 +7777,7 @@ references a component template that might not exist
** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template
that uses deprecated components, Elasticsearch will emit a deprecation warning.
** *`create` (Optional, boolean)*: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation.
** *`cause` (Optional, string)*: User defined reason for dry-run creating the new template for simulation purposes
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template.
@ -7893,7 +7911,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind
Create or update an alias.
Adds a data stream or index to an alias.
{ref}/indices-aliases.html[Endpoint documentation]
https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-update-aliases[Endpoint documentation]
[source,ts]
----
client.indices.updateAliases({ ... })
@ -7927,7 +7945,7 @@ client.indices.validateQuery({ ... })
** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search.
Supports wildcards (`*`).
To search all data streams or indices, omit this parameter or use `*` or `_all`.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax.
** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.
This behavior applies even if the request targets other open indices.
** *`all_shards` (Optional, boolean)*: If `true`, the validation is executed on all shards instead of one random shard per index.
@ -7949,6 +7967,51 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
[discrete]
=== inference
[discrete]
==== chat_completion_unified
Perform chat completion inference
{ref}/chat-completion-inference-api.html[Endpoint documentation]
[source,ts]
----
client.inference.chatCompletionUnified({ inference_id, messages })
----
[discrete]
==== Arguments
* *Request (object):*
** *`inference_id` (string)*: The inference Id
** *`messages` ({ content, role, tool_call_id, tool_calls }[])*: A list of objects representing the conversation.
** *`model` (Optional, string)*: The ID of the model to use.
** *`max_completion_tokens` (Optional, number)*: The upper bound limit for the number of tokens that can be generated for a completion request.
** *`stop` (Optional, string[])*: A sequence of strings to control when the model should stop generating additional tokens.
** *`temperature` (Optional, float)*: The sampling temperature to use.
** *`tool_choice` (Optional, string | { type, function })*: Controls which tool is called by the model.
** *`tools` (Optional, { type, function }[])*: A list of tools that the model can call.
** *`top_p` (Optional, float)*: Nucleus sampling, an alternative to sampling with temperature.
** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete.
[discrete]
==== completion
Perform completion inference on the service
{ref}/post-inference-api.html[Endpoint documentation]
[source,ts]
----
client.inference.completion({ inference_id, input })
----
[discrete]
==== Arguments
* *Request (object):*
** *`inference_id` (string)*: The inference Id
** *`input` (string | string[])*: Inference input.
Either a string or an array of strings.
** *`task_settings` (Optional, User-defined value)*: Optional task settings
** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete.
[discrete]
==== delete
Delete an inference endpoint
@ -7964,7 +8027,7 @@ client.inference.delete({ inference_id })
* *Request (object):*
** *`inference_id` (string)*: The inference identifier.
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The task type
** *`dry_run` (Optional, boolean)*: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned.
** *`force` (Optional, boolean)*: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields.
@ -7982,43 +8045,9 @@ client.inference.get({ ... })
==== Arguments
* *Request (object):*
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The task type
** *`inference_id` (Optional, string)*: The inference Id
[discrete]
==== inference
Perform inference on the service.
This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.
It returns a response with the results of the tasks.
The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.
> info
> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
{ref}/post-inference-api.html[Endpoint documentation]
[source,ts]
----
client.inference.inference({ inference_id, input })
----
[discrete]
==== Arguments
* *Request (object):*
** *`inference_id` (string)*: The unique identifier for the inference endpoint.
** *`input` (string | string[])*: The text on which you want to perform the inference task.
It can be a single string or an array.
> info
> Inference endpoints for the `completion` task type currently only support a single string as input.
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of inference task that the model performs.
** *`query` (Optional, string)*: The query input, which is required only for the `rerank` task.
It is not required for other tasks.
** *`task_settings` (Optional, User-defined value)*: Task settings for the individual inference request.
These settings are specific to the task type you specified and override the task settings specified when initializing the service.
** *`timeout` (Optional, string | -1 | 0)*: The amount of time to wait for the inference request to complete.
[discrete]
==== put
Create an inference endpoint.
@ -8043,11 +8072,165 @@ client.inference.put({ inference_id })
* *Request (object):*
** *`inference_id` (string)*: The inference Id
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The task type
** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })*
[discrete]
==== stream_inference
==== put_eis
Create an Elastic Inference Service (EIS) inference endpoint.
Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).
{ref}/infer-service-elastic.html[Endpoint documentation]
[source,ts]
----
client.inference.putEis({ task_type, eis_inference_id, service, service_settings })
----
[discrete]
==== Arguments
* *Request (object):*
** *`task_type` (Enum("chat_completion"))*: The type of the inference task that the model will perform.
NOTE: The `chat_completion` task type only supports streaming and only through the _stream API.
** *`eis_inference_id` (string)*: The unique identifier of the inference endpoint.
** *`service` (Enum("elastic"))*: The type of service supported for the specified task type. In this case, `elastic`.
** *`service_settings` ({ model_id, rate_limit })*: Settings used to install the inference model. These settings are specific to the `elastic` service.
[discrete]
==== put_openai
Create an OpenAI inference endpoint.
Create an inference endpoint to perform an inference task with the `openai` service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-openai.html[Endpoint documentation]
[source,ts]
----
client.inference.putOpenai({ task_type, openai_inference_id, service, service_settings })
----
[discrete]
==== Arguments
* *Request (object):*
** *`task_type` (Enum("chat_completion" | "completion" | "text_embedding"))*: The type of the inference task that the model will perform.
NOTE: The `chat_completion` task type only supports streaming and only through the _stream API.
** *`openai_inference_id` (string)*: The unique identifier of the inference endpoint.
** *`service` (Enum("elastic"))*: The type of service supported for the specified task type. In this case, `openai`.
** *`service_settings` ({ api_key, dimensions, model_id, organization_id, rate_limit, url })*: Settings used to install the inference model. These settings are specific to the `openai` service.
** *`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })*: The chunking configuration object.
** *`task_settings` (Optional, { user })*: Settings to configure the inference task.
These settings are specific to the task type you specified.
[discrete]
==== put_voyageai
Create a VoyageAI inference endpoint.
Create an inference endpoint to perform an inference task with the `voyageai` service.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-voyageai.html[Endpoint documentation]
[source,ts]
----
client.inference.putVoyageai({ task_type, voyageai_inference_id, service, service_settings })
----
[discrete]
==== Arguments
* *Request (object):*
** *`task_type` (Enum("text_embedding" | "rerank"))*: The type of the inference task that the model will perform.
** *`voyageai_inference_id` (string)*: The unique identifier of the inference endpoint.
** *`service` (Enum("elastic"))*: The type of service supported for the specified task type. In this case, `voyageai`.
** *`service_settings` ({ dimensions, model_id, rate_limit, embedding_type })*: Settings used to install the inference model. These settings are specific to the `voyageai` service.
** *`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })*: The chunking configuration object.
** *`task_settings` (Optional, { input_type, return_documents, top_k, truncation })*: Settings to configure the inference task.
These settings are specific to the task type you specified.
[discrete]
==== put_watsonx
Create a Watsonx inference endpoint.
Create an inference endpoint to perform an inference task with the `watsonxai` service.
You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service.
You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
After creating the endpoint, wait for the model deployment to complete before using it.
To verify the deployment status, use the get trained model statistics API.
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
{ref}/infer-service-watsonx-ai.html[Endpoint documentation]
[source,ts]
----
client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_settings })
----
[discrete]
==== Arguments
* *Request (object):*
** *`task_type` (Enum("text_embedding"))*: The task type.
The only valid task type for the model to perform is `text_embedding`.
** *`watsonx_inference_id` (string)*: The unique identifier of the inference endpoint.
** *`service` (Enum("elastic"))*: The type of service supported for the specified task type. In this case, `watsonxai`.
** *`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })*: Settings used to install the inference model. These settings are specific to the `watsonxai` service.
[discrete]
==== rerank
Perform rereanking inference on the service
{ref}/post-inference-api.html[Endpoint documentation]
[source,ts]
----
client.inference.rerank({ inference_id, query, input })
----
[discrete]
==== Arguments
* *Request (object):*
** *`inference_id` (string)*: The unique identifier for the inference endpoint.
** *`query` (string)*: Query input.
** *`input` (string | string[])*: The text on which you want to perform the inference task.
It can be a single string or an array.
> info
> Inference endpoints for the `completion` task type currently only support a single string as input.
** *`task_settings` (Optional, User-defined value)*: Task settings for the individual inference request.
These settings are specific to the task type you specified and override the task settings specified when initializing the service.
** *`timeout` (Optional, string | -1 | 0)*: The amount of time to wait for the inference request to complete.
[discrete]
==== sparse_embedding
Perform sparse embedding inference on the service
{ref}/post-inference-api.html[Endpoint documentation]
[source,ts]
----
client.inference.sparseEmbedding({ inference_id, input })
----
[discrete]
==== Arguments
* *Request (object):*
** *`inference_id` (string)*: The inference Id
** *`input` (string | string[])*: Inference input.
Either a string or an array of strings.
** *`task_settings` (Optional, User-defined value)*: Optional task settings
** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete.
[discrete]
==== stream_completion
Perform streaming inference.
Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation.
This API works only with the completion task type.
@ -8059,7 +8242,7 @@ This API requires the `monitor_inference` cluster privilege (the built-in `infer
{ref}/stream-inference-api.html[Endpoint documentation]
[source,ts]
----
client.inference.streamInference({ inference_id, input })
client.inference.streamCompletion({ inference_id, input })
----
[discrete]
@ -8071,14 +8254,16 @@ client.inference.streamInference({ inference_id, input })
It can be a single string or an array.
NOTE: Inference endpoints for the completion task type currently only support a single string as input.
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of task that the model performs.
** *`task_settings` (Optional, User-defined value)*: Optional task settings
[discrete]
==== unified_inference
Perform inference on the service using the Unified Schema
==== text_embedding
Perform text embedding inference on the service
{ref}/post-inference-api.html[Endpoint documentation]
[source,ts]
----
client.inference.unifiedInference({ inference_id, messages })
client.inference.textEmbedding({ inference_id, input })
----
[discrete]
@ -8086,15 +8271,9 @@ client.inference.unifiedInference({ inference_id, messages })
* *Request (object):*
** *`inference_id` (string)*: The inference Id
** *`messages` ({ content, role, tool_call_id, tool_calls }[])*: A list of objects representing the conversation.
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type
** *`model` (Optional, string)*: The ID of the model to use.
** *`max_completion_tokens` (Optional, number)*: The upper bound limit for the number of tokens that can be generated for a completion request.
** *`stop` (Optional, string[])*: A sequence of strings to control when the model should stop generating additional tokens.
** *`temperature` (Optional, float)*: The sampling temperature to use.
** *`tool_choice` (Optional, string | { type, function })*: Controls which tool is called by the model.
** *`tools` (Optional, { type, function }[])*: A list of tools that the model can call.
** *`top_p` (Optional, float)*: Nucleus sampling, an alternative to sampling with temperature.
** *`input` (string | string[])*: Inference input.
Either a string or an array of strings.
** *`task_settings` (Optional, User-defined value)*: Optional task settings
** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete.
[discrete]
@ -8118,7 +8297,7 @@ client.inference.update({ inference_id })
* *Request (object):*
** *`inference_id` (string)*: The unique identifier of the inference endpoint.
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of inference task that the model performs.
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The type of inference task that the model performs.
** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })*
[discrete]
@ -9003,7 +9182,7 @@ client.ml.evaluateDataFrame({ evaluation, index })
* *Request (object):*
** *`evaluation` ({ classification, outlier_detection, regression })*: Defines the type of evaluation you want to perform.
** *`index` (string)*: Defines the `index` in which the evaluation will be performed.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index.
[discrete]
==== explain_data_frame_analytics
@ -10002,15 +10181,15 @@ The default value is either the bucket span for short bucket spans, or, for long
fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last
(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses
aggregations, this value must be divisible by the interval of the date histogram aggregation.
** *`indices` (Optional, string | string[])*: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine
learning nodes must have the `remote_cluster_client` role.
** *`indices` (Optional, string | string[])*: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master
nodes and the machine learning nodes must have the `remote_cluster_client` role.
** *`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })*: Specifies index expansion options that are used during search
** *`job_id` (Optional, string)*: Identifier for the anomaly detection job.
** *`max_empty_searches` (Optional, number)*: If a real-time datafeed has never seen any data (including during any initial training period), it automatically
stops and closes the associated job after this many real-time searches return no documents. In other words,
it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no
end time that sees no data remains started until it is explicitly stopped. By default, it is not set.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an
Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this
object is passed verbatim to Elasticsearch.
** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might
@ -10385,6 +10564,9 @@ client.ml.startTrainedModelDeployment({ model_id })
* *Request (object):*
** *`model_id` (string)*: The unique identifier of the trained model. Currently, only PyTorch models are supported.
** *`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })*: Adaptive allocations configuration. When enabled, the number of allocations
is set based on the current load.
If adaptive_allocations is enabled, do not set the number of allocations manually.
** *`cache_size` (Optional, number | string)*: The inference cache size (in memory outside the JVM heap) per node for the model.
The default value is the same size as the `model_size_bytes`. To disable the cache,
`0b` can be provided.
@ -10395,6 +10577,7 @@ a separate set of threads to evaluate the model.
Increasing this value generally increases the throughput.
If this setting is greater than the number of hardware threads
it will automatically be changed to a value less than the number of hardware threads.
If adaptive_allocations is enabled, do not set this value, because its automatically set.
** *`priority` (Optional, Enum("normal" | "low"))*: The deployment priority.
** *`queue_capacity` (Optional, number)*: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds
this value, new requests are rejected with a 429 error.
@ -10559,7 +10742,7 @@ learning nodes must have the `remote_cluster_client` role.
stops and closes the associated job after this many real-time searches return no documents. In other words,
it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no
end time that sees no data remains started until it is explicitly stopped. By default, it is not set.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an
Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this
object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also
changed. Therefore, the time required to learn might be long and the understandability of the results is
@ -10717,6 +10900,10 @@ a separate set of threads to evaluate the model.
Increasing this value generally increases the throughput.
If this setting is greater than the number of hardware threads
it will automatically be changed to a value less than the number of hardware threads.
If adaptive_allocations is enabled, do not set this value, because its automatically set.
** *`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })*: Adaptive allocations configuration. When enabled, the number of allocations
is set based on the current load.
If adaptive_allocations is enabled, do not set the number of allocations manually.
[discrete]
==== upgrade_job_snapshot
@ -11291,7 +11478,7 @@ This parameter has the following rules:
* Only one rollup index may be specified. If more than one are supplied, an exception occurs.
* Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Specifies aggregations.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query that is subject to some limitations.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query that is subject to some limitations.
** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data.
** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response
** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response
@ -12794,14 +12981,14 @@ These APIs are used internally by Kibana in order to provide OpenID Connect base
{ref}/security-api-oidc-logout.html[Endpoint documentation]
[source,ts]
----
client.security.oidcLogout({ access_token })
client.security.oidcLogout({ token })
----
[discrete]
==== Arguments
* *Request (object):*
** *`access_token` (string)*: The access token to be invalidated.
** *`token` (string)*: The access token to be invalidated.
** *`refresh_token` (Optional, string)*: The refresh token to be invalidated.
[discrete]
@ -13655,7 +13842,7 @@ client.simulate.ingest({ docs })
This value can be overridden by specifying an index on each document.
If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument.
** *`component_template_substitutions` (Optional, Record<string, { template, version, _meta, deprecated }>)*: A map of component template names to substitute component template definition objects.
** *`index_template_subtitutions` (Optional, Record<string, { index_patterns, composed_of, template, version, priority, _meta, allow_auto_create, data_stream, deprecated, ignore_missing_component_templates }>)*: A map of index template names to substitute index template definition objects.
** *`index_template_substitutions` (Optional, Record<string, { index_patterns, composed_of, template, version, priority, _meta, allow_auto_create, data_stream, deprecated, ignore_missing_component_templates }>)*: A map of index template names to substitute index template definition objects.
** *`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*
** *`pipeline_substitutions` (Optional, Record<string, { description, on_failure, processors, version, deprecated, _meta }>)*: Pipelines to test.
If you dont specify the `pipeline` request path parameter, this parameter is required.
@ -14385,7 +14572,7 @@ It ignores other request body parameters.
** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response.
** *`field_multi_value_leniency` (Optional, boolean)*: If `false`, the API returns an exception when encountering multiple values for a field.
If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering.
** *`index_using_frozen` (Optional, boolean)*: If `true`, the search can run on frozen indices.
** *`keep_alive` (Optional, string | -1 | 0)*: The retention period for an async or saved synchronous search.
** *`keep_on_completion` (Optional, boolean)*: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter.
@ -14426,7 +14613,7 @@ client.sql.translate({ query })
* *Request (object):*
** *`query` (string)*: The SQL query to run.
** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering.
** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering.
** *`time_zone` (Optional, string)*: The ISO-8601 time zone ID for the search.
[discrete]
@ -15728,7 +15915,7 @@ client.watcher.queryWatches({ ... })
It must be non-negative.
** *`size` (Optional, number)*: The number of hits to return.
It must be non-negative.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query that filters the watches to be returned.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query that filters the watches to be returned.
** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: One or more fields used to sort the search results.
** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Retrieve the next page of hits using a set of sort values from the previous page.

View File

@ -53,7 +53,7 @@ export default class Esql {
async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest | TB.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise<T.EsqlAsyncQueryResponse>
async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest | TB.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = []
const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata']
const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata', 'wait_for_completion_timeout']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body

View File

@ -1438,7 +1438,7 @@ export default class Indices {
/**
* Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/data-streams.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-migrate-to-data-stream | Elasticsearch API documentation}
*/
async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesMigrateToDataStreamResponse>
async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesMigrateToDataStreamResponse, unknown>>
@ -1470,7 +1470,7 @@ export default class Indices {
/**
* Update data streams. Performs one or more data stream modification actions in a single atomic operation.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/data-streams.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-modify-data-stream | Elasticsearch API documentation}
*/
async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesModifyDataStreamResponse>
async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesModifyDataStreamResponse, unknown>>
@ -1543,7 +1543,7 @@ export default class Indices {
/**
* Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/data-streams.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-promote-data-stream | Elasticsearch API documentation}
*/
async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPromoteDataStreamResponse>
async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesPromoteDataStreamResponse, unknown>>
@ -1575,7 +1575,7 @@ export default class Indices {
/**
* Create or update an alias. Adds a data stream or index to an alias.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/indices-aliases.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-alias | Elasticsearch API documentation}
*/
async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPutAliasResponse>
async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesPutAliasResponse, unknown>>
@ -1627,7 +1627,7 @@ export default class Indices {
/**
* Update data stream lifecycles. Update the data stream lifecycle of the specified data streams.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/data-streams-put-lifecycle.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-data-lifecycle | Elasticsearch API documentation}
*/
async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPutDataLifecycleResponse>
async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesPutDataLifecycleResponse, unknown>>
@ -1671,7 +1671,7 @@ export default class Indices {
/**
* Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. **Composing aliases, mappings, and settings** When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/indices-put-template.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-index-template | Elasticsearch API documentation}
*/
async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPutIndexTemplateResponse>
async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesPutIndexTemplateResponse, unknown>>
@ -2416,7 +2416,7 @@ export default class Indices {
/**
* Create or update an alias. Adds a data stream or index to an alias.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/indices-aliases.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-update-aliases | Elasticsearch API documentation}
*/
async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesUpdateAliasesResponse>
async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesUpdateAliasesResponse, unknown>>

View File

@ -44,6 +44,94 @@ export default class Inference {
this.transport = transport
}
/**
* Perform chat completion inference
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/chat-completion-inference-api.html | Elasticsearch API documentation}
*/
async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceChatCompletionUnifiedResponse>
async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceChatCompletionUnifiedResponse, unknown>>
async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise<T.InferenceChatCompletionUnifiedResponse>
async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['inference_id']
const acceptedBody: string[] = ['messages', 'model', 'max_completion_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
const method = 'POST'
const path = `/_inference/chat_completion/${encodeURIComponent(params.inference_id.toString())}/_stream`
const meta: TransportRequestMetadata = {
name: 'inference.chat_completion_unified',
pathParts: {
inference_id: params.inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Perform completion inference on the service
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/post-inference-api.html | Elasticsearch API documentation}
*/
async completion (this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceCompletionResponse>
async completion (this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceCompletionResponse, unknown>>
async completion (this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptions): Promise<T.InferenceCompletionResponse>
async completion (this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['inference_id']
const acceptedBody: string[] = ['input', 'task_settings']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
const method = 'POST'
const path = `/_inference/completion/${encodeURIComponent(params.inference_id.toString())}`
const meta: TransportRequestMetadata = {
name: 'inference.completion',
pathParts: {
inference_id: params.inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Delete an inference endpoint
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/delete-inference-api.html | Elasticsearch API documentation}
@ -128,58 +216,6 @@ export default class Inference {
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/post-inference-api.html | Elasticsearch API documentation}
*/
async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceInferenceResponse>
async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceInferenceResponse, unknown>>
async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise<T.InferenceInferenceResponse>
async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['task_type', 'inference_id']
const acceptedBody: string[] = ['query', 'input', 'task_settings']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
let method = ''
let path = ''
if (params.task_type != null && params.inference_id != null) {
method = 'POST'
path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}`
} else {
method = 'POST'
path = `/_inference/${encodeURIComponent(params.inference_id.toString())}`
}
const meta: TransportRequestMetadata = {
name: 'inference.inference',
pathParts: {
task_type: params.task_type,
inference_id: params.inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/put-inference-api.html | Elasticsearch API documentation}
@ -226,15 +262,15 @@ export default class Inference {
}
/**
* Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/stream-inference-api.html | Elasticsearch API documentation}
* Create an Elastic Inference Service (EIS) inference endpoint. Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/infer-service-elastic.html | Elasticsearch API documentation}
*/
async streamInference (this: That, params: T.InferenceStreamInferenceRequest | TB.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceStreamInferenceResponse>
async streamInference (this: That, params: T.InferenceStreamInferenceRequest | TB.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceStreamInferenceResponse, unknown>>
async streamInference (this: That, params: T.InferenceStreamInferenceRequest | TB.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise<T.InferenceStreamInferenceResponse>
async streamInference (this: That, params: T.InferenceStreamInferenceRequest | TB.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['inference_id', 'task_type']
const acceptedBody: string[] = ['input']
async putEis (this: That, params: T.InferencePutEisRequest | TB.InferencePutEisRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutEisResponse>
async putEis (this: That, params: T.InferencePutEisRequest | TB.InferencePutEisRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutEisResponse, unknown>>
async putEis (this: That, params: T.InferencePutEisRequest | TB.InferencePutEisRequest, options?: TransportRequestOptions): Promise<T.InferencePutEisResponse>
async putEis (this: That, params: T.InferencePutEisRequest | TB.InferencePutEisRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['task_type', 'eis_inference_id']
const acceptedBody: string[] = ['service', 'service_settings']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
@ -258,35 +294,28 @@ export default class Inference {
}
}
let method = ''
let path = ''
if (params.task_type != null && params.inference_id != null) {
method = 'POST'
path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_stream`
} else {
method = 'POST'
path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_stream`
}
const method = 'PUT'
const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.eis_inference_id.toString())}`
const meta: TransportRequestMetadata = {
name: 'inference.stream_inference',
name: 'inference.put_eis',
pathParts: {
inference_id: params.inference_id,
task_type: params.task_type
task_type: params.task_type,
eis_inference_id: params.eis_inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Perform inference on the service using the Unified Schema
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/unified-inference-api.html | Elasticsearch API documentation}
* Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/infer-service-openai.html | Elasticsearch API documentation}
*/
async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest | TB.InferenceUnifiedInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceUnifiedInferenceResponse>
async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest | TB.InferenceUnifiedInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceUnifiedInferenceResponse, unknown>>
async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest | TB.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise<T.InferenceUnifiedInferenceResponse>
async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest | TB.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['task_type', 'inference_id']
const acceptedBody: string[] = ['messages', 'model', 'max_completion_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p']
async putOpenai (this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutOpenaiResponse>
async putOpenai (this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutOpenaiResponse, unknown>>
async putOpenai (this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutOpenaiResponse>
async putOpenai (this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['task_type', 'openai_inference_id']
const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
@ -310,19 +339,278 @@ export default class Inference {
}
}
let method = ''
let path = ''
if (params.task_type != null && params.inference_id != null) {
method = 'POST'
path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_unified`
} else {
method = 'POST'
path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_unified`
}
const method = 'PUT'
const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.openai_inference_id.toString())}`
const meta: TransportRequestMetadata = {
name: 'inference.unified_inference',
name: 'inference.put_openai',
pathParts: {
task_type: params.task_type,
openai_inference_id: params.openai_inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Create a VoyageAI inference endpoint. Create an inference endpoint to perform an inference task with the `voyageai` service. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/infer-service-voyageai.html | Elasticsearch API documentation}
*/
async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutVoyageaiResponse>
async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutVoyageaiResponse, unknown>>
async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutVoyageaiResponse>
async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['task_type', 'voyageai_inference_id']
const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
const method = 'PUT'
const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.voyageai_inference_id.toString())}`
const meta: TransportRequestMetadata = {
name: 'inference.put_voyageai',
pathParts: {
task_type: params.task_type,
voyageai_inference_id: params.voyageai_inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/infer-service-watsonx-ai.html | Elasticsearch API documentation}
*/
async putWatsonx (this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutWatsonxResponse>
async putWatsonx (this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutWatsonxResponse, unknown>>
async putWatsonx (this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise<T.InferencePutWatsonxResponse>
async putWatsonx (this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['task_type', 'watsonx_inference_id']
const acceptedBody: string[] = ['service', 'service_settings']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
const method = 'PUT'
const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.watsonx_inference_id.toString())}`
const meta: TransportRequestMetadata = {
name: 'inference.put_watsonx',
pathParts: {
task_type: params.task_type,
watsonx_inference_id: params.watsonx_inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Perform rereanking inference on the service
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/post-inference-api.html | Elasticsearch API documentation}
*/
async rerank (this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceRerankResponse>
async rerank (this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceRerankResponse, unknown>>
async rerank (this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptions): Promise<T.InferenceRerankResponse>
async rerank (this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['inference_id']
const acceptedBody: string[] = ['query', 'input', 'task_settings']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
const method = 'POST'
const path = `/_inference/rerank/${encodeURIComponent(params.inference_id.toString())}`
const meta: TransportRequestMetadata = {
name: 'inference.rerank',
pathParts: {
inference_id: params.inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Perform sparse embedding inference on the service
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/post-inference-api.html | Elasticsearch API documentation}
*/
async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceSparseEmbeddingResponse>
async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceSparseEmbeddingResponse, unknown>>
async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise<T.InferenceSparseEmbeddingResponse>
async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['inference_id']
const acceptedBody: string[] = ['input', 'task_settings']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
const method = 'POST'
const path = `/_inference/sparse_embedding/${encodeURIComponent(params.inference_id.toString())}`
const meta: TransportRequestMetadata = {
name: 'inference.sparse_embedding',
pathParts: {
inference_id: params.inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/stream-inference-api.html | Elasticsearch API documentation}
*/
async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceStreamCompletionResponse>
async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceStreamCompletionResponse, unknown>>
async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise<T.InferenceStreamCompletionResponse>
async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['inference_id']
const acceptedBody: string[] = ['input', 'task_settings']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
const method = 'POST'
const path = `/_inference/completion/${encodeURIComponent(params.inference_id.toString())}/_stream`
const meta: TransportRequestMetadata = {
name: 'inference.stream_completion',
pathParts: {
inference_id: params.inference_id
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Perform text embedding inference on the service
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/post-inference-api.html | Elasticsearch API documentation}
*/
async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceTextEmbeddingResponse>
async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceTextEmbeddingResponse, unknown>>
async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise<T.InferenceTextEmbeddingResponse>
async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['inference_id']
const acceptedBody: string[] = ['input', 'task_settings']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
const method = 'POST'
const path = `/_inference/text_embedding/${encodeURIComponent(params.inference_id.toString())}`
const meta: TransportRequestMetadata = {
name: 'inference.text_embedding',
pathParts: {
inference_id: params.inference_id
}
}
@ -358,10 +646,10 @@ export default class Inference {
let method = ''
let path = ''
if (params.task_type != null && params.inference_id != null) {
method = 'POST'
method = 'PUT'
path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_update`
} else {
method = 'POST'
method = 'PUT'
path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_update`
}
const meta: TransportRequestMetadata = {

View File

@ -2478,11 +2478,23 @@ export default class Ml {
async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise<T.MlStartTrainedModelDeploymentResponse>
async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['model_id']
const acceptedBody: string[] = ['adaptive_allocations']
const querystring: Record<string, any> = {}
const body = undefined
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedPath.includes(key)) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
@ -2839,7 +2851,7 @@ export default class Ml {
async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise<T.MlUpdateTrainedModelDeploymentResponse>
async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['model_id']
const acceptedBody: string[] = ['number_of_allocations']
const acceptedBody: string[] = ['number_of_allocations', 'adaptive_allocations']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body

View File

@ -1671,7 +1671,7 @@ export default class Security {
async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest | TB.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise<T.SecurityOidcLogoutResponse>
async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest | TB.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = []
const acceptedBody: string[] = ['access_token', 'refresh_token']
const acceptedBody: string[] = ['token', 'refresh_token']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body

View File

@ -53,7 +53,7 @@ export default class Simulate {
async ingest (this: That, params: T.SimulateIngestRequest | TB.SimulateIngestRequest, options?: TransportRequestOptions): Promise<T.SimulateIngestResponse>
async ingest (this: That, params: T.SimulateIngestRequest | TB.SimulateIngestRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['index']
const acceptedBody: string[] = ['docs', 'component_template_substitutions', 'index_template_subtitutions', 'mapping_addition', 'pipeline_substitutions']
const acceptedBody: string[] = ['docs', 'component_template_substitutions', 'index_template_substitutions', 'mapping_addition', 'pipeline_substitutions']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body

View File

@ -47,7 +47,7 @@ export default async function TermvectorsApi<TDocument = unknown> (this: That, p
export default async function TermvectorsApi<TDocument = unknown> (this: That, params: T.TermvectorsRequest<TDocument> | TB.TermvectorsRequest<TDocument>, options?: TransportRequestOptions): Promise<T.TermvectorsResponse>
export default async function TermvectorsApi<TDocument = unknown> (this: That, params: T.TermvectorsRequest<TDocument> | TB.TermvectorsRequest<TDocument>, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['index', 'id']
const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer']
const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer', 'fields', 'field_statistics', 'offsets', 'payloads', 'positions', 'term_statistics', 'routing', 'version', 'version_type']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body

View File

@ -163,9 +163,14 @@ export interface CountResponse {
export interface CreateRequest<TDocument = unknown> extends RequestBase {
id: Id
index: IndexName
if_primary_term?: long
if_seq_no?: SequenceNumber
include_source_on_error?: boolean
op_type?: OpType
pipeline?: string
refresh?: Refresh
require_alias?: boolean
require_data_stream?: boolean
routing?: Routing
timeout?: Duration
version?: VersionNumber
@ -1945,20 +1950,20 @@ export interface TermvectorsFilter {
export interface TermvectorsRequest<TDocument = unknown> extends RequestBase {
index: IndexName
id?: Id
preference?: string
realtime?: boolean
doc?: TDocument
filter?: TermvectorsFilter
per_field_analyzer?: Record<Field, string>
fields?: Fields
field_statistics?: boolean
offsets?: boolean
payloads?: boolean
positions?: boolean
preference?: string
realtime?: boolean
routing?: Routing
term_statistics?: boolean
routing?: Routing
version?: VersionNumber
version_type?: VersionType
doc?: TDocument
filter?: TermvectorsFilter
per_field_analyzer?: Record<Field, string>
}
export interface TermvectorsResponse {
@ -2118,7 +2123,6 @@ export interface BulkIndexByScrollFailure {
id: Id
index: IndexName
status: integer
type: string
}
export interface BulkStats {
@ -4388,7 +4392,7 @@ export interface AggregationsWeightedAverageValue {
export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase {
}
export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer
export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer
export interface AnalysisArabicAnalyzer {
type: 'arabic'
@ -4825,17 +4829,6 @@ export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase {
discard_compound_token?: boolean
}
export type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai'
export interface AnalysisLanguageAnalyzer {
type: 'language'
version?: VersionString
language: AnalysisLanguage
stem_exclusion: string[]
stopwords?: AnalysisStopWords
stopwords_path?: string
}
export interface AnalysisLatvianAnalyzer {
type: 'latvian'
stopwords?: AnalysisStopWords
@ -5358,6 +5351,8 @@ export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase {
format?: string
ignore_malformed?: boolean
index?: boolean
script?: Script | string
on_script_error?: MappingOnScriptError
null_value?: DateTime
precision_step?: integer
type: 'date_nanos'
@ -5369,6 +5364,8 @@ export interface MappingDateProperty extends MappingDocValuesPropertyBase {
format?: string
ignore_malformed?: boolean
index?: boolean
script?: Script | string
on_script_error?: MappingOnScriptError
null_value?: DateTime
precision_step?: integer
locale?: string
@ -5445,7 +5442,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase {
export interface MappingDynamicTemplate {
mapping?: MappingProperty
runtime?: MappingProperty
runtime?: MappingRuntimeField
match?: string | string[]
path_match?: string | string[]
unmatch?: string | string[]
@ -5509,6 +5506,7 @@ export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase {
coerce?: boolean
ignore_malformed?: boolean
ignore_z_value?: boolean
index?: boolean
orientation?: MappingGeoOrientation
strategy?: MappingGeoStrategy
type: 'geo_shape'
@ -5640,7 +5638,7 @@ export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase
export interface MappingObjectProperty extends MappingCorePropertyBase {
enabled?: boolean
subobjects?: boolean
subobjects?: MappingSubobjects
type?: 'object'
}
@ -5711,7 +5709,7 @@ export interface MappingRuntimeFieldFetchFields {
format?: string
}
export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup'
export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'geo_shape' | 'ip' | 'keyword' | 'long' | 'lookup'
export type MappingRuntimeFields = Record<Field, MappingRuntimeField>
@ -5737,7 +5735,8 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase
export interface MappingSemanticTextProperty {
type: 'semantic_text'
meta?: Record<string, string>
inference_id: Id
inference_id?: Id
search_inference_id?: Id
}
export interface MappingShapeProperty extends MappingDocValuesPropertyBase {
@ -5772,6 +5771,8 @@ export interface MappingSparseVectorProperty extends MappingPropertyBase {
type: 'sparse_vector'
}
export type MappingSubobjects = boolean | 'true' | 'false' | 'auto'
export interface MappingSuggestContext {
name: Name
path?: Field
@ -5823,7 +5824,7 @@ export interface MappingTypeMapping {
date_detection?: boolean
dynamic?: MappingDynamicMapping
dynamic_date_formats?: string[]
dynamic_templates?: Record<string, MappingDynamicTemplate>[]
dynamic_templates?: Partial<Record<string, MappingDynamicTemplate>>[]
_field_names?: MappingFieldNamesField
index_field?: MappingIndexField
_meta?: Metadata
@ -5834,7 +5835,7 @@ export interface MappingTypeMapping {
_source?: MappingSourceField
runtime?: Record<string, MappingRuntimeField>
enabled?: boolean
subobjects?: boolean
subobjects?: MappingSubobjects
_data_stream_timestamp?: MappingDataStreamTimestamp
}
@ -6019,6 +6020,12 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys
export type QueryDslGeoExecution = 'memory' | 'indexed'
export interface QueryDslGeoGridQuery extends QueryDslQueryBase {
geogrid?: GeoTile
geohash?: GeoHash
geohex?: GeoHexCell
}
export interface QueryDslGeoPolygonPoints {
points: GeoLocation[]
}
@ -6314,6 +6321,7 @@ export interface QueryDslQueryContainer {
fuzzy?: Partial<Record<Field, QueryDslFuzzyQuery | string | double | boolean>>
geo_bounding_box?: QueryDslGeoBoundingBoxQuery
geo_distance?: QueryDslGeoDistanceQuery
geo_grid?: Partial<Record<Field, QueryDslGeoGridQuery>>
geo_polygon?: QueryDslGeoPolygonQuery
geo_shape?: QueryDslGeoShapeQuery
has_child?: QueryDslHasChildQuery
@ -10414,12 +10422,12 @@ export type EsqlTableValuesLongDouble = double | double[]
export type EsqlTableValuesLongValue = long | long[]
export interface EsqlAsyncQueryRequest extends RequestBase {
allow_partial_results?: boolean
delimiter?: string
drop_null_columns?: boolean
format?: EsqlQueryEsqlFormat
keep_alive?: Duration
keep_on_completion?: boolean
wait_for_completion_timeout?: Duration
columnar?: boolean
filter?: QueryDslQueryContainer
locale?: string
@ -10428,6 +10436,7 @@ export interface EsqlAsyncQueryRequest extends RequestBase {
query: string
tables?: Record<string, Record<string, EsqlTableValuesContainer>>
include_ccs_metadata?: boolean
wait_for_completion_timeout?: Duration
}
export type EsqlAsyncQueryResponse = EsqlResult
@ -10460,6 +10469,7 @@ export interface EsqlQueryRequest extends RequestBase {
format?: EsqlQueryEsqlFormat
delimiter?: string
drop_null_columns?: boolean
allow_partial_results?: boolean
columnar?: boolean
filter?: QueryDslQueryContainer
locale?: string
@ -11093,7 +11103,7 @@ export interface IndicesIndexSettingsKeys {
routing_partition_size?: SpecUtilsStringified<integer>
load_fixed_bitset_filters_eagerly?: boolean
hidden?: boolean | string
auto_expand_replicas?: string
auto_expand_replicas?: SpecUtilsWithNullValue<string>
merge?: IndicesMerge
search?: IndicesSettingsSearch
refresh_interval?: Duration
@ -11390,7 +11400,7 @@ export interface IndicesSoftDeletes {
retention_lease?: IndicesRetentionLease
}
export type IndicesSourceMode = 'DISABLED' | 'STORED' | 'SYNTHETIC'
export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic'
export interface IndicesStorage {
type: IndicesStorageType
@ -11732,6 +11742,8 @@ export type IndicesExistsAliasResponse = boolean
export interface IndicesExistsIndexTemplateRequest extends RequestBase {
name: Name
local?: boolean
flat_settings?: boolean
master_timeout?: Duration
}
@ -11801,7 +11813,6 @@ export interface IndicesFieldUsageStatsRequest extends RequestBase {
expand_wildcards?: ExpandWildcards
ignore_unavailable?: boolean
fields?: Fields
wait_for_active_shards?: WaitForActiveShards
}
export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody
@ -12153,7 +12164,7 @@ export interface IndicesPutMappingRequest extends RequestBase {
date_detection?: boolean
dynamic?: MappingDynamicMapping
dynamic_date_formats?: string[]
dynamic_templates?: Record<string, MappingDynamicTemplate> | Record<string, MappingDynamicTemplate>[]
dynamic_templates?: Partial<Record<string, MappingDynamicTemplate>>[]
_field_names?: MappingFieldNamesField
_meta?: Metadata
numeric_detection?: boolean
@ -12173,6 +12184,7 @@ export interface IndicesPutSettingsRequest extends RequestBase {
ignore_unavailable?: boolean
master_timeout?: Duration
preserve_existing?: boolean
reopen?: boolean
timeout?: Duration
settings?: IndicesIndexSettings
}
@ -12326,6 +12338,7 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase {
allow_no_indices?: boolean
expand_wildcards?: ExpandWildcards
ignore_unavailable?: boolean
resource?: string
}
export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult
@ -12387,6 +12400,7 @@ export interface IndicesRolloverRequest extends RequestBase {
master_timeout?: Duration
timeout?: Duration
wait_for_active_shards?: WaitForActiveShards
lazy?: boolean
aliases?: Record<IndexName, IndicesAlias>
conditions?: IndicesRolloverRolloverConditions
mappings?: MappingTypeMapping
@ -12527,6 +12541,8 @@ export interface IndicesShrinkResponse {
export interface IndicesSimulateIndexTemplateRequest extends RequestBase {
name: Name
create?: boolean
cause?: string
master_timeout?: Duration
include_defaults?: boolean
}
@ -12544,6 +12560,7 @@ export interface IndicesSimulateTemplateOverlapping {
export interface IndicesSimulateTemplateRequest extends RequestBase {
name?: Name
create?: boolean
cause?: string
master_timeout?: Duration
include_defaults?: boolean
allow_auto_create?: boolean
@ -12825,6 +12842,10 @@ export interface IndicesValidateQueryResponse {
error?: string
}
export interface InferenceCompletionInferenceResult {
completion: InferenceCompletionResult[]
}
export interface InferenceCompletionResult {
result: string
}
@ -12856,22 +12877,26 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi
task_type: InferenceTaskType
}
export interface InferenceInferenceResult {
text_embedding_bytes?: InferenceTextEmbeddingByteResult[]
text_embedding?: InferenceTextEmbeddingResult[]
sparse_embedding?: InferenceSparseEmbeddingResult[]
completion?: InferenceCompletionResult[]
rerank?: InferenceRankedDocument[]
}
export interface InferenceRankedDocument {
index: integer
relevance_score: float
text?: string
}
export interface InferenceRateLimitSetting {
requests_per_minute?: integer
}
export interface InferenceRerankedInferenceResult {
rerank: InferenceRankedDocument[]
}
export type InferenceServiceSettings = any
export interface InferenceSparseEmbeddingInferenceResult {
sparse_embedding: InferenceSparseEmbeddingResult[]
}
export interface InferenceSparseEmbeddingResult {
embedding: InferenceSparseVector
}
@ -12880,16 +12905,94 @@ export type InferenceSparseVector = Record<string, float>
export type InferenceTaskSettings = any
export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion'
export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion'
export interface InferenceTextEmbeddingByteResult {
embedding: InferenceDenseByteVector
}
export interface InferenceTextEmbeddingInferenceResult {
text_embedding_bytes?: InferenceTextEmbeddingByteResult[]
text_embedding_bits?: InferenceTextEmbeddingByteResult[]
text_embedding?: InferenceTextEmbeddingResult[]
}
export interface InferenceTextEmbeddingResult {
embedding: InferenceDenseVector
}
export interface InferenceChatCompletionUnifiedCompletionTool {
type: string
function: InferenceChatCompletionUnifiedCompletionToolFunction
}
export interface InferenceChatCompletionUnifiedCompletionToolChoice {
type: string
function: InferenceChatCompletionUnifiedCompletionToolChoiceFunction
}
export interface InferenceChatCompletionUnifiedCompletionToolChoiceFunction {
name: string
}
export interface InferenceChatCompletionUnifiedCompletionToolFunction {
description?: string
name: string
parameters?: any
strict?: boolean
}
export type InferenceChatCompletionUnifiedCompletionToolType = string | InferenceChatCompletionUnifiedCompletionToolChoice
export interface InferenceChatCompletionUnifiedContentObject {
text: string
type: string
}
export interface InferenceChatCompletionUnifiedMessage {
content?: InferenceChatCompletionUnifiedMessageContent
role: string
tool_call_id?: Id
tool_calls?: InferenceChatCompletionUnifiedToolCall[]
}
export type InferenceChatCompletionUnifiedMessageContent = string | InferenceChatCompletionUnifiedContentObject[]
export interface InferenceChatCompletionUnifiedRequest extends RequestBase {
inference_id: Id
timeout?: Duration
messages: InferenceChatCompletionUnifiedMessage[]
model?: string
max_completion_tokens?: long
stop?: string[]
temperature?: float
tool_choice?: InferenceChatCompletionUnifiedCompletionToolType
tools?: InferenceChatCompletionUnifiedCompletionTool[]
top_p?: float
}
export type InferenceChatCompletionUnifiedResponse = StreamResult
export interface InferenceChatCompletionUnifiedToolCall {
id: Id
function: InferenceChatCompletionUnifiedToolCallFunction
type: string
}
export interface InferenceChatCompletionUnifiedToolCallFunction {
arguments: string
name: string
}
export interface InferenceCompletionRequest extends RequestBase {
inference_id: Id
timeout?: Duration
input: string | string[]
task_settings?: InferenceTaskSettings
}
export type InferenceCompletionResponse = InferenceCompletionInferenceResult
export interface InferenceDeleteRequest extends RequestBase {
task_type?: InferenceTaskType
inference_id: Id
@ -12908,17 +13011,6 @@ export interface InferenceGetResponse {
endpoints: InferenceInferenceEndpointInfo[]
}
export interface InferenceInferenceRequest extends RequestBase {
task_type?: InferenceTaskType
inference_id: Id
timeout?: Duration
query?: string
input: string | string[]
task_settings?: InferenceTaskSettings
}
export type InferenceInferenceResponse = InferenceInferenceResult
export interface InferencePutRequest extends RequestBase {
task_type?: InferenceTaskType
inference_id: Id
@ -12927,78 +13019,139 @@ export interface InferencePutRequest extends RequestBase {
export type InferencePutResponse = InferenceInferenceEndpointInfo
export interface InferenceStreamInferenceRequest extends RequestBase {
inference_id: Id
task_type?: InferenceTaskType
input: string | string[]
export interface InferencePutEisEisServiceSettings {
model_id: string
rate_limit?: InferenceRateLimitSetting
}
export type InferenceStreamInferenceResponse = StreamResult
export type InferencePutEisEisTaskType = 'chat_completion'
export interface InferenceUnifiedInferenceCompletionTool {
type: string
function: InferenceUnifiedInferenceCompletionToolFunction
export interface InferencePutEisRequest extends RequestBase {
task_type: InferencePutEisEisTaskType
eis_inference_id: Id
service: InferencePutEisServiceType
service_settings: InferencePutEisEisServiceSettings
}
export interface InferenceUnifiedInferenceCompletionToolChoice {
type: string
function: InferenceUnifiedInferenceCompletionToolChoiceFunction
export type InferencePutEisResponse = InferenceInferenceEndpointInfo
export type InferencePutEisServiceType = 'elastic'
export interface InferencePutOpenaiOpenAIServiceSettings {
api_key: string
dimensions?: integer
model_id: string
organization_id?: string
rate_limit?: InferenceRateLimitSetting
url?: string
}
export interface InferenceUnifiedInferenceCompletionToolChoiceFunction {
name: string
export interface InferencePutOpenaiOpenAITaskSettings {
user?: string
}
export interface InferenceUnifiedInferenceCompletionToolFunction {
description?: string
name: string
parameters?: any
strict?: boolean
export type InferencePutOpenaiOpenAITaskType = 'chat_completion' | 'completion' | 'text_embedding'
export interface InferencePutOpenaiRequest extends RequestBase {
task_type: InferencePutOpenaiOpenAITaskType
openai_inference_id: Id
chunking_settings?: InferenceInferenceChunkingSettings
service: InferencePutOpenaiServiceType
service_settings: InferencePutOpenaiOpenAIServiceSettings
task_settings?: InferencePutOpenaiOpenAITaskSettings
}
export type InferenceUnifiedInferenceCompletionToolType = string | InferenceUnifiedInferenceCompletionToolChoice
export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfo
export interface InferenceUnifiedInferenceContentObject {
text: string
type: string
export type InferencePutOpenaiServiceType = 'openai'
export interface InferencePutVoyageaiRequest extends RequestBase {
task_type: InferencePutVoyageaiVoyageAITaskType
voyageai_inference_id: Id
chunking_settings?: InferenceInferenceChunkingSettings
service: InferencePutVoyageaiServiceType
service_settings: InferencePutVoyageaiVoyageAIServiceSettings
task_settings?: InferencePutVoyageaiVoyageAITaskSettings
}
export interface InferenceUnifiedInferenceMessage {
content?: InferenceUnifiedInferenceMessageContent
role: string
tool_call_id?: Id
tool_calls?: InferenceUnifiedInferenceToolCall[]
export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfo
export type InferencePutVoyageaiServiceType = 'voyageai'
export interface InferencePutVoyageaiVoyageAIServiceSettings {
dimensions?: integer
model_id: string
rate_limit?: InferenceRateLimitSetting
embedding_type?: float
}
export type InferenceUnifiedInferenceMessageContent = string | InferenceUnifiedInferenceContentObject[]
export interface InferencePutVoyageaiVoyageAITaskSettings {
input_type?: string
return_documents?: boolean
top_k?: integer
truncation?: boolean
}
export interface InferenceUnifiedInferenceRequest extends RequestBase {
task_type?: InferenceTaskType
export type InferencePutVoyageaiVoyageAITaskType = 'text_embedding' | 'rerank'
export interface InferencePutWatsonxRequest extends RequestBase {
task_type: InferencePutWatsonxWatsonxTaskType
watsonx_inference_id: Id
service: InferencePutWatsonxServiceType
service_settings: InferencePutWatsonxWatsonxServiceSettings
}
export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfo
export type InferencePutWatsonxServiceType = 'watsonxai'
export interface InferencePutWatsonxWatsonxServiceSettings {
api_key: string
api_version: string
model_id: string
project_id: string
rate_limit?: InferenceRateLimitSetting
url: string
}
export type InferencePutWatsonxWatsonxTaskType = 'text_embedding'
export interface InferenceRerankRequest extends RequestBase {
inference_id: Id
timeout?: Duration
messages: InferenceUnifiedInferenceMessage[]
model?: string
max_completion_tokens?: long
stop?: string[]
temperature?: float
tool_choice?: InferenceUnifiedInferenceCompletionToolType
tools?: InferenceUnifiedInferenceCompletionTool[]
top_p?: float
query: string
input: string | string[]
task_settings?: InferenceTaskSettings
}
export type InferenceUnifiedInferenceResponse = StreamResult
export type InferenceRerankResponse = InferenceRerankedInferenceResult
export interface InferenceUnifiedInferenceToolCall {
id: Id
function: InferenceUnifiedInferenceToolCallFunction
type: string
export interface InferenceSparseEmbeddingRequest extends RequestBase {
inference_id: Id
timeout?: Duration
input: string | string[]
task_settings?: InferenceTaskSettings
}
export interface InferenceUnifiedInferenceToolCallFunction {
arguments: string
name: string
export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInferenceResult
export interface InferenceStreamCompletionRequest extends RequestBase {
inference_id: Id
input: string | string[]
task_settings?: InferenceTaskSettings
}
export type InferenceStreamCompletionResponse = StreamResult
export interface InferenceTextEmbeddingRequest extends RequestBase {
inference_id: Id
timeout?: Duration
input: string | string[]
task_settings?: InferenceTaskSettings
}
export type InferenceTextEmbeddingResponse = InferenceTextEmbeddingInferenceResult
export interface InferenceUpdateRequest extends RequestBase {
inference_id: Id
task_type?: InferenceTaskType
@ -16196,6 +16349,7 @@ export interface MlStartTrainedModelDeploymentRequest extends RequestBase {
threads_per_allocation?: integer
timeout?: Duration
wait_for?: MlDeploymentAllocationState
adaptive_allocations?: MlAdaptiveAllocationsSettings
}
export interface MlStartTrainedModelDeploymentResponse {
@ -16369,6 +16523,7 @@ export interface MlUpdateModelSnapshotResponse {
export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase {
model_id: Id
number_of_allocations?: integer
adaptive_allocations?: MlAdaptiveAllocationsSettings
}
export interface MlUpdateTrainedModelDeploymentResponse {
@ -16719,13 +16874,11 @@ export interface NodesNodeBufferPool {
used_in_bytes?: long
}
export interface NodesNodeReloadError {
export interface NodesNodeReloadResult {
name: Name
reload_exception?: ErrorCause
}
export type NodesNodeReloadResult = NodesStats | NodesNodeReloadError
export interface NodesNodesResponseBase {
_nodes?: NodeStatistics
}
@ -17943,6 +18096,15 @@ export interface SecurityRemoteIndicesPrivileges {
allow_restricted_indices?: boolean
}
export interface SecurityRemoteUserIndicesPrivileges {
field_security?: SecurityFieldSecurity[]
names: IndexName | IndexName[]
privileges: SecurityIndexPrivilege[]
query?: SecurityIndicesPrivilegesQuery[]
allow_restricted_indices: boolean
clusters: string[]
}
export interface SecurityReplicationAccess {
names: IndexName | IndexName[]
allow_restricted_indices?: boolean
@ -18440,7 +18602,8 @@ export interface SecurityGetRoleRole {
remote_indices?: SecurityRemoteIndicesPrivileges[]
remote_cluster?: SecurityRemoteClusterPrivileges[]
metadata: Metadata
run_as: string[]
description?: string
run_as?: string[]
transient_metadata?: Record<string, any>
applications: SecurityApplicationPrivileges[]
role_templates?: SecurityRoleTemplate[]
@ -18549,8 +18712,10 @@ export interface SecurityGetUserPrivilegesRequest extends RequestBase {
export interface SecurityGetUserPrivilegesResponse {
applications: SecurityApplicationPrivileges[]
cluster: string[]
remote_cluster?: SecurityRemoteClusterPrivileges[]
global: SecurityGlobalPrivilege[]
indices: SecurityUserIndicesPrivileges[]
remote_indices?: SecurityRemoteUserIndicesPrivileges[]
run_as: string[]
}
@ -18694,7 +18859,7 @@ export interface SecurityOidcAuthenticateResponse {
}
export interface SecurityOidcLogoutRequest extends RequestBase {
access_token: string
token: string
refresh_token?: string
}
@ -19114,7 +19279,7 @@ export interface SimulateIngestRequest extends RequestBase {
pipeline?: PipelineName
docs: IngestDocument[]
component_template_substitutions?: Record<string, ClusterComponentTemplateNode>
index_template_subtitutions?: Record<string, IndicesIndexTemplate>
index_template_substitutions?: Record<string, IndicesIndexTemplate>
mapping_addition?: MappingTypeMapping
pipeline_substitutions?: Record<string, IngestPipeline>
}

View File

@ -174,9 +174,14 @@ export interface CountResponse {
export interface CreateRequest<TDocument = unknown> extends RequestBase {
id: Id
index: IndexName
if_primary_term?: long
if_seq_no?: SequenceNumber
include_source_on_error?: boolean
op_type?: OpType
pipeline?: string
refresh?: Refresh
require_alias?: boolean
require_data_stream?: boolean
routing?: Routing
timeout?: Duration
version?: VersionNumber
@ -2013,22 +2018,22 @@ export interface TermvectorsFilter {
export interface TermvectorsRequest<TDocument = unknown> extends RequestBase {
index: IndexName
id?: Id
fields?: Fields
field_statistics?: boolean
offsets?: boolean
payloads?: boolean
positions?: boolean
preference?: string
realtime?: boolean
routing?: Routing
term_statistics?: boolean
version?: VersionNumber
version_type?: VersionType
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
doc?: TDocument
filter?: TermvectorsFilter
per_field_analyzer?: Record<Field, string>
fields?: Fields
field_statistics?: boolean
offsets?: boolean
payloads?: boolean
positions?: boolean
term_statistics?: boolean
routing?: Routing
version?: VersionNumber
version_type?: VersionType
}
}
@ -2195,7 +2200,6 @@ export interface BulkIndexByScrollFailure {
id: Id
index: IndexName
status: integer
type: string
}
export interface BulkStats {
@ -4465,7 +4469,7 @@ export interface AggregationsWeightedAverageValue {
export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase {
}
export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer
export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer
export interface AnalysisArabicAnalyzer {
type: 'arabic'
@ -4902,17 +4906,6 @@ export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase {
discard_compound_token?: boolean
}
export type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai'
export interface AnalysisLanguageAnalyzer {
type: 'language'
version?: VersionString
language: AnalysisLanguage
stem_exclusion: string[]
stopwords?: AnalysisStopWords
stopwords_path?: string
}
export interface AnalysisLatvianAnalyzer {
type: 'latvian'
stopwords?: AnalysisStopWords
@ -5435,6 +5428,8 @@ export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase {
format?: string
ignore_malformed?: boolean
index?: boolean
script?: Script | string
on_script_error?: MappingOnScriptError
null_value?: DateTime
precision_step?: integer
type: 'date_nanos'
@ -5446,6 +5441,8 @@ export interface MappingDateProperty extends MappingDocValuesPropertyBase {
format?: string
ignore_malformed?: boolean
index?: boolean
script?: Script | string
on_script_error?: MappingOnScriptError
null_value?: DateTime
precision_step?: integer
locale?: string
@ -5522,7 +5519,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase {
export interface MappingDynamicTemplate {
mapping?: MappingProperty
runtime?: MappingProperty
runtime?: MappingRuntimeField
match?: string | string[]
path_match?: string | string[]
unmatch?: string | string[]
@ -5586,6 +5583,7 @@ export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase {
coerce?: boolean
ignore_malformed?: boolean
ignore_z_value?: boolean
index?: boolean
orientation?: MappingGeoOrientation
strategy?: MappingGeoStrategy
type: 'geo_shape'
@ -5717,7 +5715,7 @@ export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase
export interface MappingObjectProperty extends MappingCorePropertyBase {
enabled?: boolean
subobjects?: boolean
subobjects?: MappingSubobjects
type?: 'object'
}
@ -5788,7 +5786,7 @@ export interface MappingRuntimeFieldFetchFields {
format?: string
}
export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup'
export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'geo_shape' | 'ip' | 'keyword' | 'long' | 'lookup'
export type MappingRuntimeFields = Record<Field, MappingRuntimeField>
@ -5814,7 +5812,8 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase
export interface MappingSemanticTextProperty {
type: 'semantic_text'
meta?: Record<string, string>
inference_id: Id
inference_id?: Id
search_inference_id?: Id
}
export interface MappingShapeProperty extends MappingDocValuesPropertyBase {
@ -5849,6 +5848,8 @@ export interface MappingSparseVectorProperty extends MappingPropertyBase {
type: 'sparse_vector'
}
export type MappingSubobjects = boolean | 'true' | 'false' | 'auto'
export interface MappingSuggestContext {
name: Name
path?: Field
@ -5900,7 +5901,7 @@ export interface MappingTypeMapping {
date_detection?: boolean
dynamic?: MappingDynamicMapping
dynamic_date_formats?: string[]
dynamic_templates?: Record<string, MappingDynamicTemplate>[]
dynamic_templates?: Partial<Record<string, MappingDynamicTemplate>>[]
_field_names?: MappingFieldNamesField
index_field?: MappingIndexField
_meta?: Metadata
@ -5911,7 +5912,7 @@ export interface MappingTypeMapping {
_source?: MappingSourceField
runtime?: Record<string, MappingRuntimeField>
enabled?: boolean
subobjects?: boolean
subobjects?: MappingSubobjects
_data_stream_timestamp?: MappingDataStreamTimestamp
}
@ -6096,6 +6097,12 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys
export type QueryDslGeoExecution = 'memory' | 'indexed'
export interface QueryDslGeoGridQuery extends QueryDslQueryBase {
geogrid?: GeoTile
geohash?: GeoHash
geohex?: GeoHexCell
}
export interface QueryDslGeoPolygonPoints {
points: GeoLocation[]
}
@ -6391,6 +6398,7 @@ export interface QueryDslQueryContainer {
fuzzy?: Partial<Record<Field, QueryDslFuzzyQuery | string | double | boolean>>
geo_bounding_box?: QueryDslGeoBoundingBoxQuery
geo_distance?: QueryDslGeoDistanceQuery
geo_grid?: Partial<Record<Field, QueryDslGeoGridQuery>>
geo_polygon?: QueryDslGeoPolygonQuery
geo_shape?: QueryDslGeoShapeQuery
has_child?: QueryDslHasChildQuery
@ -10585,12 +10593,12 @@ export type EsqlTableValuesLongDouble = double | double[]
export type EsqlTableValuesLongValue = long | long[]
export interface EsqlAsyncQueryRequest extends RequestBase {
allow_partial_results?: boolean
delimiter?: string
drop_null_columns?: boolean
format?: EsqlQueryEsqlFormat
keep_alive?: Duration
keep_on_completion?: boolean
wait_for_completion_timeout?: Duration
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
columnar?: boolean
@ -10601,6 +10609,7 @@ export interface EsqlAsyncQueryRequest extends RequestBase {
query: string
tables?: Record<string, Record<string, EsqlTableValuesContainer>>
include_ccs_metadata?: boolean
wait_for_completion_timeout?: Duration
}
}
@ -10634,6 +10643,7 @@ export interface EsqlQueryRequest extends RequestBase {
format?: EsqlQueryEsqlFormat
delimiter?: string
drop_null_columns?: boolean
allow_partial_results?: boolean
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
columnar?: boolean
@ -11286,7 +11296,7 @@ export interface IndicesIndexSettingsKeys {
routing_partition_size?: SpecUtilsStringified<integer>
load_fixed_bitset_filters_eagerly?: boolean
hidden?: boolean | string
auto_expand_replicas?: string
auto_expand_replicas?: SpecUtilsWithNullValue<string>
merge?: IndicesMerge
search?: IndicesSettingsSearch
refresh_interval?: Duration
@ -11583,7 +11593,7 @@ export interface IndicesSoftDeletes {
retention_lease?: IndicesRetentionLease
}
export type IndicesSourceMode = 'DISABLED' | 'STORED' | 'SYNTHETIC'
export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic'
export interface IndicesStorage {
type: IndicesStorageType
@ -11936,6 +11946,8 @@ export type IndicesExistsAliasResponse = boolean
export interface IndicesExistsIndexTemplateRequest extends RequestBase {
name: Name
local?: boolean
flat_settings?: boolean
master_timeout?: Duration
}
@ -12005,7 +12017,6 @@ export interface IndicesFieldUsageStatsRequest extends RequestBase {
expand_wildcards?: ExpandWildcards
ignore_unavailable?: boolean
fields?: Fields
wait_for_active_shards?: WaitForActiveShards
}
export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody
@ -12372,7 +12383,7 @@ export interface IndicesPutMappingRequest extends RequestBase {
date_detection?: boolean
dynamic?: MappingDynamicMapping
dynamic_date_formats?: string[]
dynamic_templates?: Record<string, MappingDynamicTemplate> | Record<string, MappingDynamicTemplate>[]
dynamic_templates?: Partial<Record<string, MappingDynamicTemplate>>[]
_field_names?: MappingFieldNamesField
_meta?: Metadata
numeric_detection?: boolean
@ -12393,6 +12404,7 @@ export interface IndicesPutSettingsRequest extends RequestBase {
ignore_unavailable?: boolean
master_timeout?: Duration
preserve_existing?: boolean
reopen?: boolean
timeout?: Duration
/** @deprecated The use of the 'body' key has been deprecated, use 'settings' instead. */
body?: IndicesIndexSettings
@ -12550,6 +12562,7 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase {
allow_no_indices?: boolean
expand_wildcards?: ExpandWildcards
ignore_unavailable?: boolean
resource?: string
}
export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult
@ -12611,6 +12624,7 @@ export interface IndicesRolloverRequest extends RequestBase {
master_timeout?: Duration
timeout?: Duration
wait_for_active_shards?: WaitForActiveShards
lazy?: boolean
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
aliases?: Record<IndexName, IndicesAlias>
@ -12757,6 +12771,8 @@ export interface IndicesShrinkResponse {
export interface IndicesSimulateIndexTemplateRequest extends RequestBase {
name: Name
create?: boolean
cause?: string
master_timeout?: Duration
include_defaults?: boolean
}
@ -12774,6 +12790,7 @@ export interface IndicesSimulateTemplateOverlapping {
export interface IndicesSimulateTemplateRequest extends RequestBase {
name?: Name
create?: boolean
cause?: string
master_timeout?: Duration
include_defaults?: boolean
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
@ -13067,6 +13084,10 @@ export interface IndicesValidateQueryResponse {
error?: string
}
export interface InferenceCompletionInferenceResult {
completion: InferenceCompletionResult[]
}
export interface InferenceCompletionResult {
result: string
}
@ -13098,22 +13119,26 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi
task_type: InferenceTaskType
}
export interface InferenceInferenceResult {
text_embedding_bytes?: InferenceTextEmbeddingByteResult[]
text_embedding?: InferenceTextEmbeddingResult[]
sparse_embedding?: InferenceSparseEmbeddingResult[]
completion?: InferenceCompletionResult[]
rerank?: InferenceRankedDocument[]
}
export interface InferenceRankedDocument {
index: integer
relevance_score: float
text?: string
}
export interface InferenceRateLimitSetting {
requests_per_minute?: integer
}
export interface InferenceRerankedInferenceResult {
rerank: InferenceRankedDocument[]
}
export type InferenceServiceSettings = any
export interface InferenceSparseEmbeddingInferenceResult {
sparse_embedding: InferenceSparseEmbeddingResult[]
}
export interface InferenceSparseEmbeddingResult {
embedding: InferenceSparseVector
}
@ -13122,16 +13147,100 @@ export type InferenceSparseVector = Record<string, float>
export type InferenceTaskSettings = any
export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion'
export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion'
export interface InferenceTextEmbeddingByteResult {
embedding: InferenceDenseByteVector
}
export interface InferenceTextEmbeddingInferenceResult {
text_embedding_bytes?: InferenceTextEmbeddingByteResult[]
text_embedding_bits?: InferenceTextEmbeddingByteResult[]
text_embedding?: InferenceTextEmbeddingResult[]
}
export interface InferenceTextEmbeddingResult {
embedding: InferenceDenseVector
}
export interface InferenceChatCompletionUnifiedCompletionTool {
type: string
function: InferenceChatCompletionUnifiedCompletionToolFunction
}
export interface InferenceChatCompletionUnifiedCompletionToolChoice {
type: string
function: InferenceChatCompletionUnifiedCompletionToolChoiceFunction
}
export interface InferenceChatCompletionUnifiedCompletionToolChoiceFunction {
name: string
}
export interface InferenceChatCompletionUnifiedCompletionToolFunction {
description?: string
name: string
parameters?: any
strict?: boolean
}
export type InferenceChatCompletionUnifiedCompletionToolType = string | InferenceChatCompletionUnifiedCompletionToolChoice
export interface InferenceChatCompletionUnifiedContentObject {
text: string
type: string
}
export interface InferenceChatCompletionUnifiedMessage {
content?: InferenceChatCompletionUnifiedMessageContent
role: string
tool_call_id?: Id
tool_calls?: InferenceChatCompletionUnifiedToolCall[]
}
export type InferenceChatCompletionUnifiedMessageContent = string | InferenceChatCompletionUnifiedContentObject[]
export interface InferenceChatCompletionUnifiedRequest extends RequestBase {
inference_id: Id
timeout?: Duration
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
messages: InferenceChatCompletionUnifiedMessage[]
model?: string
max_completion_tokens?: long
stop?: string[]
temperature?: float
tool_choice?: InferenceChatCompletionUnifiedCompletionToolType
tools?: InferenceChatCompletionUnifiedCompletionTool[]
top_p?: float
}
}
export type InferenceChatCompletionUnifiedResponse = StreamResult
export interface InferenceChatCompletionUnifiedToolCall {
id: Id
function: InferenceChatCompletionUnifiedToolCallFunction
type: string
}
export interface InferenceChatCompletionUnifiedToolCallFunction {
arguments: string
name: string
}
export interface InferenceCompletionRequest extends RequestBase {
inference_id: Id
timeout?: Duration
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
input: string | string[]
task_settings?: InferenceTaskSettings
}
}
export type InferenceCompletionResponse = InferenceCompletionInferenceResult
export interface InferenceDeleteRequest extends RequestBase {
task_type?: InferenceTaskType
inference_id: Id
@ -13150,20 +13259,6 @@ export interface InferenceGetResponse {
endpoints: InferenceInferenceEndpointInfo[]
}
export interface InferenceInferenceRequest extends RequestBase {
task_type?: InferenceTaskType
inference_id: Id
timeout?: Duration
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
query?: string
input: string | string[]
task_settings?: InferenceTaskSettings
}
}
export type InferenceInferenceResponse = InferenceInferenceResult
export interface InferencePutRequest extends RequestBase {
task_type?: InferenceTaskType
inference_id: Id
@ -13173,84 +13268,163 @@ export interface InferencePutRequest extends RequestBase {
export type InferencePutResponse = InferenceInferenceEndpointInfo
export interface InferenceStreamInferenceRequest extends RequestBase {
inference_id: Id
task_type?: InferenceTaskType
export interface InferencePutEisEisServiceSettings {
model_id: string
rate_limit?: InferenceRateLimitSetting
}
export type InferencePutEisEisTaskType = 'chat_completion'
export interface InferencePutEisRequest extends RequestBase {
task_type: InferencePutEisEisTaskType
eis_inference_id: Id
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
input: string | string[]
service: InferencePutEisServiceType
service_settings: InferencePutEisEisServiceSettings
}
}
export type InferenceStreamInferenceResponse = StreamResult
export type InferencePutEisResponse = InferenceInferenceEndpointInfo
export interface InferenceUnifiedInferenceCompletionTool {
type: string
function: InferenceUnifiedInferenceCompletionToolFunction
export type InferencePutEisServiceType = 'elastic'
export interface InferencePutOpenaiOpenAIServiceSettings {
api_key: string
dimensions?: integer
model_id: string
organization_id?: string
rate_limit?: InferenceRateLimitSetting
url?: string
}
export interface InferenceUnifiedInferenceCompletionToolChoice {
type: string
function: InferenceUnifiedInferenceCompletionToolChoiceFunction
export interface InferencePutOpenaiOpenAITaskSettings {
user?: string
}
export interface InferenceUnifiedInferenceCompletionToolChoiceFunction {
name: string
export type InferencePutOpenaiOpenAITaskType = 'chat_completion' | 'completion' | 'text_embedding'
export interface InferencePutOpenaiRequest extends RequestBase {
task_type: InferencePutOpenaiOpenAITaskType
openai_inference_id: Id
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
chunking_settings?: InferenceInferenceChunkingSettings
service: InferencePutOpenaiServiceType
service_settings: InferencePutOpenaiOpenAIServiceSettings
task_settings?: InferencePutOpenaiOpenAITaskSettings
}
}
export interface InferenceUnifiedInferenceCompletionToolFunction {
description?: string
name: string
parameters?: any
strict?: boolean
export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfo
export type InferencePutOpenaiServiceType = 'openai'
export interface InferencePutVoyageaiRequest extends RequestBase {
task_type: InferencePutVoyageaiVoyageAITaskType
voyageai_inference_id: Id
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
chunking_settings?: InferenceInferenceChunkingSettings
service: InferencePutVoyageaiServiceType
service_settings: InferencePutVoyageaiVoyageAIServiceSettings
task_settings?: InferencePutVoyageaiVoyageAITaskSettings
}
}
export type InferenceUnifiedInferenceCompletionToolType = string | InferenceUnifiedInferenceCompletionToolChoice
export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfo
export interface InferenceUnifiedInferenceContentObject {
text: string
type: string
export type InferencePutVoyageaiServiceType = 'voyageai'
export interface InferencePutVoyageaiVoyageAIServiceSettings {
dimensions?: integer
model_id: string
rate_limit?: InferenceRateLimitSetting
embedding_type?: float
}
export interface InferenceUnifiedInferenceMessage {
content?: InferenceUnifiedInferenceMessageContent
role: string
tool_call_id?: Id
tool_calls?: InferenceUnifiedInferenceToolCall[]
export interface InferencePutVoyageaiVoyageAITaskSettings {
input_type?: string
return_documents?: boolean
top_k?: integer
truncation?: boolean
}
export type InferenceUnifiedInferenceMessageContent = string | InferenceUnifiedInferenceContentObject[]
export type InferencePutVoyageaiVoyageAITaskType = 'text_embedding' | 'rerank'
export interface InferenceUnifiedInferenceRequest extends RequestBase {
task_type?: InferenceTaskType
export interface InferencePutWatsonxRequest extends RequestBase {
task_type: InferencePutWatsonxWatsonxTaskType
watsonx_inference_id: Id
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
service: InferencePutWatsonxServiceType
service_settings: InferencePutWatsonxWatsonxServiceSettings
}
}
export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfo
export type InferencePutWatsonxServiceType = 'watsonxai'
export interface InferencePutWatsonxWatsonxServiceSettings {
api_key: string
api_version: string
model_id: string
project_id: string
rate_limit?: InferenceRateLimitSetting
url: string
}
export type InferencePutWatsonxWatsonxTaskType = 'text_embedding'
export interface InferenceRerankRequest extends RequestBase {
inference_id: Id
timeout?: Duration
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
messages: InferenceUnifiedInferenceMessage[]
model?: string
max_completion_tokens?: long
stop?: string[]
temperature?: float
tool_choice?: InferenceUnifiedInferenceCompletionToolType
tools?: InferenceUnifiedInferenceCompletionTool[]
top_p?: float
query: string
input: string | string[]
task_settings?: InferenceTaskSettings
}
}
export type InferenceUnifiedInferenceResponse = StreamResult
export type InferenceRerankResponse = InferenceRerankedInferenceResult
export interface InferenceUnifiedInferenceToolCall {
id: Id
function: InferenceUnifiedInferenceToolCallFunction
type: string
export interface InferenceSparseEmbeddingRequest extends RequestBase {
inference_id: Id
timeout?: Duration
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
input: string | string[]
task_settings?: InferenceTaskSettings
}
}
export interface InferenceUnifiedInferenceToolCallFunction {
arguments: string
name: string
export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInferenceResult
export interface InferenceStreamCompletionRequest extends RequestBase {
inference_id: Id
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
input: string | string[]
task_settings?: InferenceTaskSettings
}
}
export type InferenceStreamCompletionResponse = StreamResult
export interface InferenceTextEmbeddingRequest extends RequestBase {
inference_id: Id
timeout?: Duration
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
input: string | string[]
task_settings?: InferenceTaskSettings
}
}
export type InferenceTextEmbeddingResponse = InferenceTextEmbeddingInferenceResult
export interface InferenceUpdateRequest extends RequestBase {
inference_id: Id
task_type?: InferenceTaskType
@ -16552,6 +16726,10 @@ export interface MlStartTrainedModelDeploymentRequest extends RequestBase {
threads_per_allocation?: integer
timeout?: Duration
wait_for?: MlDeploymentAllocationState
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
adaptive_allocations?: MlAdaptiveAllocationsSettings
}
}
export interface MlStartTrainedModelDeploymentResponse {
@ -16745,6 +16923,7 @@ export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase {
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
number_of_allocations?: integer
adaptive_allocations?: MlAdaptiveAllocationsSettings
}
}
@ -17101,13 +17280,11 @@ export interface NodesNodeBufferPool {
used_in_bytes?: long
}
export interface NodesNodeReloadError {
export interface NodesNodeReloadResult {
name: Name
reload_exception?: ErrorCause
}
export type NodesNodeReloadResult = NodesStats | NodesNodeReloadError
export interface NodesNodesResponseBase {
_nodes?: NodeStatistics
}
@ -18354,6 +18531,15 @@ export interface SecurityRemoteIndicesPrivileges {
allow_restricted_indices?: boolean
}
export interface SecurityRemoteUserIndicesPrivileges {
field_security?: SecurityFieldSecurity[]
names: IndexName | IndexName[]
privileges: SecurityIndexPrivilege[]
query?: SecurityIndicesPrivilegesQuery[]
allow_restricted_indices: boolean
clusters: string[]
}
export interface SecurityReplicationAccess {
names: IndexName | IndexName[]
allow_restricted_indices?: boolean
@ -18875,7 +19061,8 @@ export interface SecurityGetRoleRole {
remote_indices?: SecurityRemoteIndicesPrivileges[]
remote_cluster?: SecurityRemoteClusterPrivileges[]
metadata: Metadata
run_as: string[]
description?: string
run_as?: string[]
transient_metadata?: Record<string, any>
applications: SecurityApplicationPrivileges[]
role_templates?: SecurityRoleTemplate[]
@ -18987,8 +19174,10 @@ export interface SecurityGetUserPrivilegesRequest extends RequestBase {
export interface SecurityGetUserPrivilegesResponse {
applications: SecurityApplicationPrivileges[]
cluster: string[]
remote_cluster?: SecurityRemoteClusterPrivileges[]
global: SecurityGlobalPrivilege[]
indices: SecurityUserIndicesPrivileges[]
remote_indices?: SecurityRemoteUserIndicesPrivileges[]
run_as: string[]
}
@ -19152,7 +19341,7 @@ export interface SecurityOidcAuthenticateResponse {
export interface SecurityOidcLogoutRequest extends RequestBase {
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
access_token: string
token: string
refresh_token?: string
}
}
@ -19631,7 +19820,7 @@ export interface SimulateIngestRequest extends RequestBase {
body?: {
docs: IngestDocument[]
component_template_substitutions?: Record<string, ClusterComponentTemplateNode>
index_template_subtitutions?: Record<string, IndicesIndexTemplate>
index_template_substitutions?: Record<string, IndicesIndexTemplate>
mapping_addition?: MappingTypeMapping
pipeline_substitutions?: Record<string, IngestPipeline>
}