Auto-generated API code (#2344)

Co-authored-by: Josh Mock <joshua.mock@elastic.co>
This commit is contained in:
Elastic Machine
2024-08-20 03:32:21 +10:00
committed by GitHub
parent 1042a02733
commit 715292b501
60 changed files with 959 additions and 267 deletions

View File

@ -5,6 +5,21 @@
----
const response = await client.searchApplication.renderQuery({
name: "my-app",
body: {
params: {
query_string: "my first query",
text_fields: [
{
name: "title",
boost: 5,
},
{
name: "description",
boost: 1,
},
],
},
},
});
console.log(response);
----

View File

@ -0,0 +1,20 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "elser-embeddings",
mappings: {
properties: {
content_embedding: {
type: "sparse_vector",
},
content: {
type: "text",
},
},
},
});
console.log(response);
----

View File

@ -5,6 +5,11 @@
----
const response = await client.security.queryUser({
with_profile_uid: "true",
query: {
prefix: {
roles: "other",
},
},
});
console.log(response);
----

View File

@ -3,6 +3,25 @@
[source, js]
----
const response = await client.simulate.ingest({});
const response = await client.simulate.ingest({
body: {
docs: [
{
_index: "my-index",
_id: "123",
_source: {
foo: "bar",
},
},
{
_index: "my-index",
_id: "456",
_source: {
foo: "rab",
},
},
],
},
});
console.log(response);
----

View File

@ -0,0 +1,11 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ingest.deleteGeoipDatabase({
id: "my-database-id",
body: null,
});
console.log(response);
----

View File

@ -0,0 +1,21 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/sparse_embedding/my-elser-model",
body: {
service: "elser",
service_settings: {
adaptive_allocations: {
enabled: true,
min_number_of_allocations: 3,
max_number_of_allocations: 10,
},
},
},
});
console.log(response);
----

View File

@ -3,6 +3,12 @@
[source, js]
----
const response = await client.security.oidcLogout({});
const response = await client.security.oidcLogout({
body: {
token:
"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==",
refresh_token: "vLBPvmAB6KvwvJZr27cS",
},
});
console.log(response);
----

View File

@ -0,0 +1,10 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.security.bulkDeleteRole({
names: ["my_admin_role", "superuser"],
});
console.log(response);
----

View File

@ -6,6 +6,7 @@
const response = await client.esql.asyncQueryGet({
id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=",
wait_for_completion_timeout: "30s",
body: null,
});
console.log(response);
----

View File

@ -0,0 +1,25 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "bad_example_index",
mappings: {
properties: {
field_1: {
type: "text",
copy_to: "field_2",
},
field_2: {
type: "text",
copy_to: "field_3",
},
field_3: {
type: "text",
},
},
},
});
console.log(response);
----

View File

@ -0,0 +1,21 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/completion/anthropic_completion",
body: {
service: "anthropic",
service_settings: {
api_key: "<api_key>",
model_id: "<model_id>",
},
task_settings: {
max_tokens: 1024,
},
},
});
console.log(response);
----

View File

@ -0,0 +1,11 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ingest.getGeoipDatabase({
id: "my-database-id",
body: null,
});
console.log(response);
----

View File

@ -3,6 +3,12 @@
[source, js]
----
const response = await client.esql.asyncQuery({});
const response = await client.esql.asyncQuery({
body: {
query:
"\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ",
wait_for_completion_timeout: "2s",
},
});
console.log(response);
----

View File

@ -5,6 +5,7 @@
----
const response = await client.esql.asyncQueryGet({
id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=",
body: null,
});
console.log(response);
----

View File

@ -0,0 +1,10 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.security.bulkDeleteRole({
names: ["my_admin_role", "my_user_role"],
});
console.log(response);
----

View File

@ -0,0 +1,11 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ml.startTrainedModelDeployment({
model_id: "my_model",
deployment_id: "my_model_for_search",
});
console.log(response);
----

View File

@ -0,0 +1,21 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ingest.putPipeline({
id: "elser_embeddings",
processors: [
{
inference: {
model_id: "elser_embeddings",
input_output: {
input_field: "content",
output_field: "content_embedding",
},
},
},
],
});
console.log(response);
----

View File

@ -0,0 +1,11 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ingest.deleteGeoipDatabase({
id: "example-database-id",
body: null,
});
console.log(response);
----

View File

@ -3,6 +3,12 @@
[source, js]
----
const response = await client.security.oidcPrepareAuthentication({});
const response = await client.security.oidcPrepareAuthentication({
body: {
realm: "oidc1",
state: "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO",
nonce: "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5",
},
});
console.log(response);
----

View File

@ -0,0 +1,28 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "test_index",
mappings: {
dynamic: "strict",
properties: {
description: {
properties: {
notes: {
type: "text",
copy_to: ["description.notes_raw"],
analyzer: "standard",
search_analyzer: "standard",
},
notes_raw: {
type: "keyword",
},
},
},
},
},
});
console.log(response);
----

View File

@ -0,0 +1,10 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.security.bulkDeleteRole({
names: ["my_admin_role", "not_an_existing_role"],
});
console.log(response);
----

View File

@ -1,47 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
query: {
intervals: {
my_text: {
all_of: {
intervals: [
{
match: {
query: "the",
},
},
{
any_of: {
intervals: [
{
match: {
query: "big",
},
},
{
match: {
query: "big bad",
},
},
],
},
},
{
match: {
query: "wolf",
},
},
],
max_gaps: 0,
ordered: true,
},
},
},
},
});
console.log(response);
----

View File

@ -29,7 +29,7 @@ const response = await client.transport.request({
},
{
rule_id: "rule2",
type: "pinned",
type: "exclude",
criteria: [
{
type: "contains",

View File

@ -0,0 +1,59 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.security.bulkPutRole({
roles: {
my_admin_role: {
cluster: ["all"],
indices: [
{
names: ["index1", "index2"],
privileges: ["all"],
field_security: {
grant: ["title", "body"],
},
query: '{"match": {"title": "foo"}}',
},
],
applications: [
{
application: "myapp",
privileges: ["admin", "read"],
resources: ["*"],
},
],
run_as: ["other_user"],
metadata: {
version: 1,
},
},
my_user_role: {
cluster: ["all"],
indices: [
{
names: ["index1"],
privileges: ["read"],
field_security: {
grant: ["title", "body"],
},
query: '{"match": {"title": "foo"}}',
},
],
applications: [
{
application: "myapp",
privileges: ["admin", "read"],
resources: ["*"],
},
],
run_as: ["other_user"],
metadata: {
version: 1,
},
},
},
});
console.log(response);
----

View File

@ -29,7 +29,7 @@ const response = await client.transport.request({
},
{
rule_id: "my-rule2",
type: "pinned",
type: "exclude",
criteria: [
{
type: "fuzzy",

View File

@ -3,6 +3,10 @@
[source, js]
----
const response = await client.security.bulkUpdateApiKeys({});
const response = await client.security.bulkUpdateApiKeys({
body: {
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
},
});
console.log(response);
----

View File

@ -3,6 +3,33 @@
[source, js]
----
const response = await client.textStructure.findMessageStructure({});
const response = await client.textStructure.findMessageStructure({
body: {
messages: [
"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128",
"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]",
"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]",
"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]",
"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]",
"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]",
"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]",
"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]",
"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]",
"[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]",
"[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled",
"[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled",
"[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled",
"[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]",
"[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]",
"[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized",
"[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...",
],
},
});
console.log(response);
----

View File

@ -3,6 +3,37 @@
[source, js]
----
const response = await client.simulate.ingest({});
const response = await client.simulate.ingest({
body: {
docs: [
{
_index: "my-index",
_id: "id",
_source: {
foo: "bar",
},
},
{
_index: "my-index",
_id: "id",
_source: {
foo: "rab",
},
},
],
pipeline_substitutions: {
"my-pipeline": {
processors: [
{
set: {
field: "field3",
value: "value3",
},
},
],
},
},
},
});
console.log(response);
----

View File

@ -19,6 +19,12 @@ const response = await client.security.putRole({
clusters: ["my_remote_cluster"],
},
],
remote_cluster: [
{
privileges: ["monitor_enrich"],
clusters: ["my_remote_cluster"],
},
],
});
console.log(response);
----

View File

@ -3,6 +3,28 @@
[source, js]
----
const response = await client.security.bulkUpdateApiKeys({});
const response = await client.security.bulkUpdateApiKeys({
body: {
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
role_descriptors: {
"role-a": {
indices: [
{
names: ["*"],
privileges: ["write"],
},
],
},
},
metadata: {
environment: {
level: 2,
trusted: true,
tags: ["production"],
},
},
expiration: "30d",
},
});
console.log(response);
----

View File

@ -0,0 +1,16 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.cat.shards({
v: "true",
});
console.log(response);
const response1 = await client.cat.recovery({
v: "true",
active_only: "true",
});
console.log(response1);
----

View File

@ -0,0 +1,22 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/my-e5-model",
body: {
service: "elasticsearch",
service_settings: {
adaptive_allocations: {
enabled: true,
min_number_of_allocations: 3,
max_number_of_allocations: 10,
},
model_id: ".multilingual-e5-small",
},
},
});
console.log(response);
----

View File

@ -3,8 +3,8 @@
[source, js]
----
const response = await client.indices.delete({
index: "my-index",
const response = await client.security.queryRole({
sort: ["name"],
});
console.log(response);
----

View File

@ -0,0 +1,17 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.security.queryRole({
query: {
match: {
description: {
query: "user access",
},
},
},
size: 1,
});
console.log(response);
----

View File

@ -0,0 +1,16 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ingest.putGeoipDatabase({
id: "my-database-id",
body: {
name: "GeoIP2-Domain",
maxmind: {
account_id: "1025402",
},
},
});
console.log(response);
----

View File

@ -6,6 +6,36 @@
const response = await client.searchApplication.postBehavioralAnalyticsEvent({
collection_name: "my_analytics_collection",
event_type: "search_click",
body: {
session: {
id: "1797ca95-91c9-4e2e-b1bd-9c38e6f386a9",
},
user: {
id: "5f26f01a-bbee-4202-9298-81261067abbd",
},
search: {
query: "search term",
results: {
items: [
{
document: {
id: "123",
index: "products",
},
},
],
total_results: 10,
},
sort: {
name: "relevance",
},
search_application: "website",
},
document: {
id: "123",
index: "products",
},
},
});
console.log(response);
----

View File

@ -3,6 +3,14 @@
[source, js]
----
const response = await client.security.oidcAuthenticate({});
const response = await client.security.oidcAuthenticate({
body: {
redirect_uri:
"https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I",
state: "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I",
nonce: "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM",
realm: "oidc1",
},
});
console.log(response);
----

View File

@ -5,6 +5,11 @@
----
const response = await client.searchApplication.renderQuery({
name: "my_search_application",
body: {
params: {
query_string: "rock climbing",
},
},
});
console.log(response);
----

View File

@ -0,0 +1,18 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/sparse_embedding/elser_embeddings",
body: {
service: "elser",
service_settings: {
num_allocations: 1,
num_threads: 1,
},
},
});
console.log(response);
----

View File

@ -0,0 +1,18 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.snapshot.createRepository({
name: "my_backup",
repository: {
type: "azure",
settings: {
client: "secondary",
container: "my_container",
base_path: "snapshots_prefix",
},
},
});
console.log(response);
----

View File

@ -5,6 +5,7 @@
----
const response = await client.searchApplication.renderQuery({
name: "my_search_application",
body: null,
});
console.log(response);
----

View File

@ -211,6 +211,7 @@ console.log(response);
const response1 = await client.textStructure.findFieldStructure({
index: "test-logs",
field: "message",
body: null,
});
console.log(response1);
----

View File

@ -3,6 +3,36 @@
[source, js]
----
const response = await client.simulate.ingest({});
const response = await client.simulate.ingest({
body: {
docs: [
{
_index: "my-index",
_id: "123",
_source: {
foo: "bar",
},
},
{
_index: "my-index",
_id: "456",
_source: {
foo: "rab",
},
},
],
pipeline_substitutions: {
"my-pipeline": {
processors: [
{
uppercase: {
field: "foo",
},
},
],
},
},
},
});
console.log(response);
----

View File

@ -0,0 +1,18 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "elser-embeddings",
query: {
sparse_vector: {
field: "content_embedding",
inference_id: "elser_embeddings",
query: "How to avoid muscle soreness after running?",
},
},
_source: ["id", "content"],
});
console.log(response);
----

View File

@ -3,6 +3,11 @@
[source, js]
----
const response = await client.security.bulkUpdateApiKeys({});
const response = await client.security.bulkUpdateApiKeys({
body: {
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
role_descriptors: {},
},
});
console.log(response);
----

View File

@ -1,50 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.snapshot.createRepository({
name: "my_backup1",
repository: {
type: "azure",
},
});
console.log(response);
const response1 = await client.snapshot.createRepository({
name: "my_backup2",
repository: {
type: "azure",
settings: {
container: "backup-container",
base_path: "backups",
chunk_size: "32MB",
compress: true,
},
},
});
console.log(response1);
const response2 = await client.snapshot.createRepository({
name: "my_backup3",
repository: {
type: "azure",
settings: {
client: "secondary",
},
},
});
console.log(response2);
const response3 = await client.snapshot.createRepository({
name: "my_backup4",
repository: {
type: "azure",
settings: {
client: "secondary",
location_mode: "primary_only",
},
},
});
console.log(response3);
----

View File

@ -0,0 +1,18 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.reindex({
wait_for_completion: "false",
source: {
index: "test-data",
size: 50,
},
dest: {
index: "elser-embeddings",
pipeline: "elser_embeddings",
},
});
console.log(response);
----

View File

@ -0,0 +1,13 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.snapshot.createRepository({
name: "my_backup",
repository: {
type: "azure",
},
});
console.log(response);
----

View File

@ -3,6 +3,11 @@
[source, js]
----
const response = await client.security.oidcPrepareAuthentication({});
const response = await client.security.oidcPrepareAuthentication({
body: {
iss: "http://127.0.0.1:8080",
login_hint: "this_is_an_opaque_string",
},
});
console.log(response);
----

View File

@ -0,0 +1,15 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ml.updateTrainedModelDeployment({
model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english",
adaptive_allocations: {
enabled: true,
min_number_of_allocations: 3,
max_number_of_allocations: 10,
},
});
console.log(response);
----

View File

@ -3,6 +3,10 @@
[source, js]
----
const response = await client.security.oidcPrepareAuthentication({});
const response = await client.security.oidcPrepareAuthentication({
body: {
realm: "oidc1",
},
});
console.log(response);
----

View File

@ -3,6 +3,12 @@
[source, js]
----
const response = await client.security.queryUser({});
const response = await client.security.queryUser({
query: {
prefix: {
roles: "other",
},
},
});
console.log(response);
----

View File

@ -1,33 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
query: {
intervals: {
my_text: {
any_of: {
intervals: [
{
match: {
query: "the big bad wolf",
ordered: true,
max_gaps: 0,
},
},
{
match: {
query: "the big wolf",
ordered: true,
max_gaps: 0,
},
},
],
},
},
},
},
});
console.log(response);
----

View File

@ -0,0 +1,24 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "good_example_index",
mappings: {
properties: {
field_1: {
type: "text",
copy_to: ["field_2", "field_3"],
},
field_2: {
type: "text",
},
field_3: {
type: "text",
},
},
},
});
console.log(response);
----

View File

@ -0,0 +1,59 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.security.bulkPutRole({
roles: {
my_admin_role: {
cluster: ["bad_cluster_privilege"],
indices: [
{
names: ["index1", "index2"],
privileges: ["all"],
field_security: {
grant: ["title", "body"],
},
query: '{"match": {"title": "foo"}}',
},
],
applications: [
{
application: "myapp",
privileges: ["admin", "read"],
resources: ["*"],
},
],
run_as: ["other_user"],
metadata: {
version: 1,
},
},
my_user_role: {
cluster: ["all"],
indices: [
{
names: ["index1"],
privileges: ["read"],
field_security: {
grant: ["title", "body"],
},
query: '{"match": {"title": "foo"}}',
},
],
applications: [
{
application: "myapp",
privileges: ["admin", "read"],
resources: ["*"],
},
],
run_as: ["other_user"],
metadata: {
version: 1,
},
},
},
});
console.log(response);
----

View File

@ -661,6 +661,12 @@ client.msearch({ ... })
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen.
** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response.
** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes
the name of the matched query associated with its score (true)
or as an array containing the name of the matched queries (false)
This functionality reruns each named query on every hit in a search response.
Typically, this adds a small overhead to a request.
However, using computationally expensive named queries on a large number of hits may add significant overhead.
** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute.
** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node.
** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint.
@ -1018,6 +1024,12 @@ If the request can target data streams, this argument determines whether wildcar
Supports a list of values, such as `open,hidden`.
** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices will be ignored when frozen.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes
the name of the matched query associated with its score (true)
or as an array containing the name of the matched queries (false)
This functionality reruns each named query on every hit in a search response.
Typically, this adds a small overhead to a request.
However, using computationally expensive named queries on a large number of hits may add significant overhead.
** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.
This parameter can only be used when the `q` query string parameter is specified.
** *`max_concurrent_shard_requests` (Optional, number)*: Defines the number of concurrent shard requests per node this search executes concurrently.
@ -1682,7 +1694,7 @@ client.cat.componentTemplates({ ... })
[discrete]
==== count
Get a document count.
Provides quick access to a document count for a data stream, an index, or an entire cluster.n/
Provides quick access to a document count for a data stream, an index, or an entire cluster.
The document count only includes live documents, not deleted documents which have not yet been removed by the merge process.
CAT APIs are only intended for human consumption using the command line or Kibana console.
@ -2682,7 +2694,7 @@ client.cluster.putComponentTemplate({ name, template })
* *Request (object):*
** *`name` (string)*: Name of the component template to create.
Elasticsearch includes the following built-in component templates: `logs-mappings`; 'logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`.
Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`.
Elastic Agent uses these templates to configure backing indices for its data streams.
If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version.
If you dont use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API.
@ -4155,6 +4167,8 @@ Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space
Cannot start with `-`, `_`, `+`, or `.ds-`;
Cannot be `.` or `..`;
Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
[discrete]
==== data_streams_stats
@ -4268,6 +4282,7 @@ client.indices.deleteDataStream({ name })
* *Request (object):*
** *`name` (string | string[])*: List of data streams to delete. Wildcard (`*`) expressions are supported.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`.
[discrete]
@ -4632,6 +4647,7 @@ To target all data streams, omit this parameter or use `*` or `_all`.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
[discrete]
==== get_data_stream
@ -4653,6 +4669,7 @@ Wildcard (`*`) expressions are supported. If omitted, all data streams are retur
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match.
Supports a list of values, such as `open,hidden`.
** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
[discrete]
==== get_field_mapping
@ -4820,6 +4837,8 @@ client.indices.migrateToDataStream({ name })
* *Request (object):*
** *`name` (string)*: Name of the index alias to convert to a data stream.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
[discrete]
==== modify_data_stream
@ -4887,6 +4906,7 @@ client.indices.promoteDataStream({ name })
* *Request (object):*
** *`name` (string)*: The name of the data stream
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
[discrete]
==== put_alias
@ -6387,7 +6407,7 @@ learning node capacity for it to be immediately assigned to a node.
[discrete]
==== flush_job
Forces any buffered data to be processed by the job.
Force buffered data to be processed.
The flush jobs API is only applicable when sending data for analysis using
the post data API. Depending on the content of the buffer, then it might
additionally calculate new results. Both flush and close operations are
@ -6416,12 +6436,12 @@ client.ml.flushJob({ job_id })
[discrete]
==== forecast
Predicts the future behavior of a time series by using its historical
behavior.
Predict future behavior of a time series.
Forecasts are not supported for jobs that perform population analysis; an
error occurs if you try to create a forecast for a job that has an
`over_field_name` in its configuration.
`over_field_name` in its configuration. Forcasts predict future behavior
based on historical data.
{ref}/ml-forecast.html[Endpoint documentation]
[source,ts]
@ -6441,7 +6461,7 @@ create a forecast; otherwise, an error occurs.
[discrete]
==== get_buckets
Retrieves anomaly detection job results for one or more buckets.
Get anomaly detection job results for buckets.
The API presents a chronological view of the records, grouped by bucket.
{ref}/ml-get-bucket.html[Endpoint documentation]
@ -6470,7 +6490,7 @@ parameter, the API returns information about all buckets.
[discrete]
==== get_calendar_events
Retrieves information about the scheduled events in calendars.
Get info about events in calendars.
{ref}/ml-get-calendar-event.html[Endpoint documentation]
[source,ts]
@ -6491,7 +6511,7 @@ client.ml.getCalendarEvents({ calendar_id })
[discrete]
==== get_calendars
Retrieves configuration information for calendars.
Get calendar configuration info.
{ref}/ml-get-calendar.html[Endpoint documentation]
[source,ts]
@ -6510,7 +6530,7 @@ client.ml.getCalendars({ ... })
[discrete]
==== get_categories
Retrieves anomaly detection job results for one or more categories.
Get anomaly detection job results for categories.
{ref}/ml-get-category.html[Endpoint documentation]
[source,ts]
@ -6536,7 +6556,7 @@ This parameter has the `from` and `size` properties.
[discrete]
==== get_data_frame_analytics
Retrieves configuration information for data frame analytics jobs.
Get data frame analytics job configuration info.
You can get information for multiple data frame analytics jobs in a single
API request by using a comma-separated list of data frame analytics jobs or a
wildcard expression.
@ -6573,7 +6593,7 @@ be retrieved and then added to another cluster.
[discrete]
==== get_data_frame_analytics_stats
Retrieves usage information for data frame analytics jobs.
Get data frame analytics jobs usage info.
{ref}/get-dfanalytics-stats.html[Endpoint documentation]
[source,ts]
@ -6605,7 +6625,7 @@ there are no matches or only partial matches.
[discrete]
==== get_datafeed_stats
Retrieves usage information for datafeeds.
Get datafeeds usage info.
You can get statistics for multiple datafeeds in a single API request by
using a comma-separated list of datafeeds or a wildcard expression. You can
get statistics for all datafeeds by using `_all`, by specifying `*` as the
@ -6639,7 +6659,7 @@ partial matches. If this parameter is `false`, the request returns a
[discrete]
==== get_datafeeds
Retrieves configuration information for datafeeds.
Get datafeeds configuration info.
You can get information for multiple datafeeds in a single API request by
using a comma-separated list of datafeeds or a wildcard expression. You can
get information for all datafeeds by using `_all`, by specifying `*` as the
@ -6675,7 +6695,7 @@ be retrieved and then added to another cluster.
[discrete]
==== get_filters
Retrieves filters.
Get filters.
You can get a single filter or all filters.
{ref}/ml-get-filter.html[Endpoint documentation]
@ -6694,7 +6714,7 @@ client.ml.getFilters({ ... })
[discrete]
==== get_influencers
Retrieves anomaly detection job results for one or more influencers.
Get anomaly detection job results for influencers.
Influencers are the entities that have contributed to, or are to blame for,
the anomalies. Influencer results are available only if an
`influencer_field_name` is specified in the job configuration.
@ -6729,7 +6749,7 @@ means it is unset and results are not limited to specific timestamps.
[discrete]
==== get_job_stats
Retrieves usage information for anomaly detection jobs.
Get anomaly detection jobs usage info.
{ref}/ml-get-job-stats.html[Endpoint documentation]
[source,ts]
@ -6758,7 +6778,7 @@ code when there are no matches or only partial matches.
[discrete]
==== get_jobs
Retrieves configuration information for anomaly detection jobs.
Get anomaly detection jobs configuration info.
You can get information for multiple anomaly detection jobs in a single API
request by using a group name, a comma-separated list of jobs, or a wildcard
expression. You can get information for all anomaly detection jobs by using
@ -6793,6 +6813,7 @@ be retrieved and then added to another cluster.
[discrete]
==== get_memory_stats
Get machine learning memory usage info.
Get information about how machine learning jobs and trained models are using memory,
on each node, both within the JVM heap, and natively, outside of the JVM.
@ -6817,7 +6838,7 @@ fails and returns an error.
[discrete]
==== get_model_snapshot_upgrade_stats
Retrieves usage information for anomaly detection job model snapshot upgrades.
Get anomaly detection job model snapshot upgrade usage info.
{ref}/ml-get-job-model-snapshot-upgrade-stats.html[Endpoint documentation]
[source,ts]
@ -6845,7 +6866,7 @@ no matches or only partial matches.
[discrete]
==== get_model_snapshots
Retrieves information about model snapshots.
Get model snapshots info.
{ref}/ml-get-snapshot.html[Endpoint documentation]
[source,ts]
@ -6871,7 +6892,9 @@ by specifying `*` as the snapshot ID, or by omitting the snapshot ID.
[discrete]
==== get_overall_buckets
Retrieves overall bucket results that summarize the bucket results of
Get overall bucket results.
Retrievs overall bucket results that summarize the bucket results of
multiple anomaly detection jobs.
The `overall_score` is calculated by combining the scores of all the
@ -6915,7 +6938,7 @@ using `_all` or by specifying `*` as the `<job_id>`.
[discrete]
==== get_records
Retrieves anomaly records for an anomaly detection job.
Get anomaly records for an anomaly detection job.
Records contain the detailed analytical results. They describe the anomalous
activity that has been identified in the input data based on the detector
configuration.
@ -6950,7 +6973,7 @@ client.ml.getRecords({ job_id })
[discrete]
==== get_trained_models
Retrieves configuration information for a trained model.
Get trained model configuration info.
{ref}/get-trained-models.html[Endpoint documentation]
[source,ts]
@ -6990,7 +7013,8 @@ tags are returned.
[discrete]
==== get_trained_models_stats
Retrieves usage information for trained models. You can get usage information for multiple trained
Get trained models usage info.
You can get usage information for multiple trained
models in a single API request by using a comma-separated list of model IDs or a wildcard expression.
{ref}/get-trained-models-stats.html[Endpoint documentation]
@ -7018,7 +7042,7 @@ subset of results when there are partial matches.
[discrete]
==== infer_trained_model
Evaluates a trained model.
Evaluate a trained model.
{ref}/infer-trained-model.html[Endpoint documentation]
[source,ts]
@ -7039,6 +7063,7 @@ Currently, for NLP models, only a single value is allowed.
[discrete]
==== info
Return ML defaults and limits.
Returns defaults and limits used by machine learning.
This endpoint is designed to be used by a user interface that needs to fully
understand machine learning configurations where some options are not
@ -7057,9 +7082,8 @@ client.ml.info()
[discrete]
==== open_job
Open anomaly detection jobs.
An anomaly detection job must be opened in order for it to be ready to
receive and analyze data. It can be opened and closed multiple times
throughout its lifecycle.
An anomaly detection job must be opened to be ready to receive and analyze
data. It can be opened and closed multiple times throughout its lifecycle.
When you open a new job, it starts with an empty model.
When you open an existing job, the most recent model state is automatically
loaded. The job is ready to resume its analysis from where it left off, once
@ -7080,7 +7104,7 @@ client.ml.openJob({ job_id })
[discrete]
==== post_calendar_events
Adds scheduled events to a calendar.
Add scheduled events to the calendar.
{ref}/ml-post-calendar-event.html[Endpoint documentation]
[source,ts]
@ -7097,7 +7121,7 @@ client.ml.postCalendarEvents({ calendar_id, events })
[discrete]
==== post_data
Sends data to an anomaly detection job for analysis.
Send data to an anomaly detection job for analysis.
IMPORTANT: For each job, data can be accepted from only a single connection at a time.
It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.
@ -7119,6 +7143,7 @@ client.ml.postData({ job_id })
[discrete]
==== preview_data_frame_analytics
Preview features used by data frame analytics.
Previews the extracted features used by a data frame analytics config.
{ref}/preview-dfanalytics.html[Endpoint documentation]
@ -7138,7 +7163,7 @@ this API.
[discrete]
==== preview_datafeed
Previews a datafeed.
Preview a datafeed.
This API returns the first "page" of search results from a datafeed.
You can preview an existing datafeed or provide configuration details for a datafeed
and anomaly detection job in the API. The preview shows the structure of the data
@ -7172,7 +7197,7 @@ used. You cannot specify a `job_config` object unless you also supply a `datafee
[discrete]
==== put_calendar
Creates a calendar.
Create a calendar.
{ref}/ml-put-calendar.html[Endpoint documentation]
[source,ts]
@ -7190,7 +7215,7 @@ client.ml.putCalendar({ calendar_id })
[discrete]
==== put_calendar_job
Adds an anomaly detection job to a calendar.
Add anomaly detection job to calendar.
{ref}/ml-put-calendar-job.html[Endpoint documentation]
[source,ts]
@ -7207,7 +7232,7 @@ client.ml.putCalendarJob({ calendar_id, job_id })
[discrete]
==== put_data_frame_analytics
Instantiates a data frame analytics job.
Create a data frame analytics job.
This API creates a data frame analytics job that performs an analysis on the
source indices and stores the outcome in a destination index.
@ -7280,7 +7305,7 @@ greater than that setting.
[discrete]
==== put_datafeed
Instantiates a datafeed.
Create a datafeed.
Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.
You can associate only one datafeed with each anomaly detection job.
The datafeed contains a query that runs at a defined interval (`frequency`).
@ -7350,7 +7375,7 @@ whether wildcard expressions match hidden data streams. Supports a list of value
[discrete]
==== put_filter
Instantiates a filter.
Create a filter.
A filter contains a list of strings. It can be used by one or more anomaly detection jobs.
Specifically, filters are referenced in the `custom_rules` property of detector configuration objects.
@ -7403,7 +7428,8 @@ client.ml.putJob({ job_id, analysis_config, data_description })
[discrete]
==== put_trained_model
Enables you to supply a trained model that is not created by data frame analytics.
Create a trained model.
Enable you to supply a trained model that is not created by data frame analytics.
{ref}/put-trained-models.html[Endpoint documentation]
[source,ts]
@ -7449,8 +7475,9 @@ to complete.
[discrete]
==== put_trained_model_alias
Creates or updates a trained model alias. A trained model alias is a logical
name used to reference a single trained model.
Create or update a trained model alias.
A trained model alias is a logical name used to reference a single trained
model.
You can use aliases instead of trained model identifiers to make it easier to
reference your models. For example, you can use aliases in inference
aggregations and processors.
@ -7484,7 +7511,7 @@ already assigned and this parameter is false, the API returns an error.
[discrete]
==== put_trained_model_definition_part
Creates part of a trained model definition.
Create part of a trained model definition.
{ref}/put-trained-model-definition-part.html[Endpoint documentation]
[source,ts]
@ -7505,7 +7532,7 @@ order of their part number. The first part must be `0` and the final part must b
[discrete]
==== put_trained_model_vocabulary
Creates a trained model vocabulary.
Create a trained model vocabulary.
This API is supported only for natural language processing (NLP) models.
The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.
@ -7526,7 +7553,7 @@ client.ml.putTrainedModelVocabulary({ model_id, vocabulary })
[discrete]
==== reset_job
Resets an anomaly detection job.
Reset an anomaly detection job.
All model state and results are deleted. The job is ready to start over as if
it had just been created.
It is not currently possible to reset multiple jobs using wildcards or a
@ -7551,7 +7578,7 @@ reset.
[discrete]
==== revert_model_snapshot
Reverts to a specific snapshot.
Revert to a snapshot.
The machine learning features react quickly to anomalous input, learning new
behaviors in data. Highly anomalous input increases the variance in the
models whilst the system learns whether this is a new step-change in behavior
@ -7578,6 +7605,7 @@ scratch when it is started.
[discrete]
==== set_upgrade_mode
Set upgrade_mode for ML indices.
Sets a cluster wide upgrade_mode setting that prepares machine learning
indices for an upgrade.
When upgrading your cluster, in some circumstances you must restart your
@ -7608,7 +7636,7 @@ starting.
[discrete]
==== start_data_frame_analytics
Starts a data frame analytics job.
Start a data frame analytics job.
A data frame analytics job can be started and stopped multiple times
throughout its lifecycle.
If the destination index does not exist, it is created automatically the
@ -7639,7 +7667,7 @@ starts.
[discrete]
==== start_datafeed
Starts one or more datafeeds.
Start datafeeds.
A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped
multiple times throughout its lifecycle.
@ -7672,7 +7700,8 @@ characters.
[discrete]
==== start_trained_model_deployment
Starts a trained model deployment, which allocates the model to every machine learning node.
Start a trained model deployment.
It allocates the model to every machine learning node.
{ref}/start-trained-model-deployment.html[Endpoint documentation]
[source,ts]
@ -7708,7 +7737,7 @@ it will automatically be changed to a value less than the number of hardware thr
[discrete]
==== stop_data_frame_analytics
Stops one or more data frame analytics jobs.
Stop data frame analytics jobs.
A data frame analytics job can be started and stopped multiple times
throughout its lifecycle.
@ -7742,7 +7771,7 @@ stops. Defaults to 20 seconds.
[discrete]
==== stop_datafeed
Stops one or more datafeeds.
Stop datafeeds.
A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped
multiple times throughout its lifecycle.
@ -7765,7 +7794,7 @@ the identifier.
[discrete]
==== stop_trained_model_deployment
Stops a trained model deployment.
Stop a trained model deployment.
{ref}/stop-trained-model-deployment.html[Endpoint documentation]
[source,ts]
@ -7787,7 +7816,7 @@ restart the model deployment.
[discrete]
==== update_data_frame_analytics
Updates an existing data frame analytics job.
Update a data frame analytics job.
{ref}/update-dfanalytics.html[Endpoint documentation]
[source,ts]
@ -7817,7 +7846,7 @@ learning node capacity for it to be immediately assigned to a node.
[discrete]
==== update_datafeed
Updates the properties of a datafeed.
Update a datafeed.
You must stop and start the datafeed for the changes to be applied.
When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at
the time of the update and runs the query using those same roles. If you provide secondary authorization headers,
@ -7890,6 +7919,7 @@ whether wildcard expressions match hidden data streams. Supports a list of value
[discrete]
==== update_filter
Update a filter.
Updates the description of a filter, adds items, or removes items from the list.
{ref}/ml-update-filter.html[Endpoint documentation]
@ -7909,6 +7939,7 @@ client.ml.updateFilter({ filter_id })
[discrete]
==== update_job
Update an anomaly detection job.
Updates certain properties of an anomaly detection job.
{ref}/ml-update-job.html[Endpoint documentation]
@ -7974,6 +8005,7 @@ value is null, which means all results are retained.
[discrete]
==== update_model_snapshot
Update a snapshot.
Updates certain properties of a snapshot.
{ref}/ml-update-snapshot.html[Endpoint documentation]
@ -7995,7 +8027,7 @@ snapshot will be deleted when the job is deleted.
[discrete]
==== update_trained_model_deployment
Starts a trained model deployment, which allocates the model to every machine learning node.
Update a trained model deployment.
{ref}/update-trained-model-deployment.html[Endpoint documentation]
[source,ts]
@ -8017,6 +8049,7 @@ it will automatically be changed to a value less than the number of hardware thr
[discrete]
==== upgrade_job_snapshot
Upgrade a snapshot.
Upgrades an anomaly detection model snapshot to the latest major version.
Over time, older snapshot formats are deprecated and removed. Anomaly
detection jobs support only snapshots that are from the current or previous
@ -10669,7 +10702,7 @@ client.synonyms.putSynonym({ id, synonyms_set })
* *Request (object):*
** *`id` (string)*: The id of the synonyms set to be created or updated
** *`synonyms_set` ({ id, synonyms }[])*: The synonym set information to update
** *`synonyms_set` ({ id, synonyms } | { id, synonyms }[])*: The synonym set information to update
[discrete]
==== put_synonym_rule

View File

@ -165,7 +165,7 @@ export default class Cat {
}
/**
* Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster.n/ The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API.
* Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html | Elasticsearch API documentation}
*/
async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.CatCountResponse>

View File

@ -672,7 +672,7 @@ export default class Ml {
}
/**
* Forces any buffered data to be processed by the job. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data.
* Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-flush-job.html | Elasticsearch API documentation}
*/
async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlFlushJobResponse>
@ -716,7 +716,7 @@ export default class Ml {
}
/**
* Predicts the future behavior of a time series by using its historical behavior. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration.
* Predict future behavior of a time series. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-forecast.html | Elasticsearch API documentation}
*/
async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlForecastResponse>
@ -760,7 +760,7 @@ export default class Ml {
}
/**
* Retrieves anomaly detection job results for one or more buckets. The API presents a chronological view of the records, grouped by bucket.
* Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-bucket.html | Elasticsearch API documentation}
*/
async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetBucketsResponse>
@ -812,7 +812,7 @@ export default class Ml {
}
/**
* Retrieves information about the scheduled events in calendars.
* Get info about events in calendars.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar-event.html | Elasticsearch API documentation}
*/
async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetCalendarEventsResponse>
@ -844,7 +844,7 @@ export default class Ml {
}
/**
* Retrieves configuration information for calendars.
* Get calendar configuration info.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar.html | Elasticsearch API documentation}
*/
async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetCalendarsResponse>
@ -896,7 +896,7 @@ export default class Ml {
}
/**
* Retrieves anomaly detection job results for one or more categories.
* Get anomaly detection job results for categories.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-category.html | Elasticsearch API documentation}
*/
async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetCategoriesResponse>
@ -948,7 +948,7 @@ export default class Ml {
}
/**
* Retrieves configuration information for data frame analytics jobs. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression.
* Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics.html | Elasticsearch API documentation}
*/
async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetDataFrameAnalyticsResponse>
@ -988,7 +988,7 @@ export default class Ml {
}
/**
* Retrieves usage information for data frame analytics jobs.
* Get data frame analytics jobs usage info.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics-stats.html | Elasticsearch API documentation}
*/
async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetDataFrameAnalyticsStatsResponse>
@ -1028,7 +1028,7 @@ export default class Ml {
}
/**
* Retrieves usage information for datafeeds. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds.
* Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed-stats.html | Elasticsearch API documentation}
*/
async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetDatafeedStatsResponse>
@ -1068,7 +1068,7 @@ export default class Ml {
}
/**
* Retrieves configuration information for datafeeds. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. This API returns a maximum of 10,000 datafeeds.
* Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. This API returns a maximum of 10,000 datafeeds.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed.html | Elasticsearch API documentation}
*/
async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetDatafeedsResponse>
@ -1108,7 +1108,7 @@ export default class Ml {
}
/**
* Retrieves filters. You can get a single filter or all filters.
* Get filters. You can get a single filter or all filters.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-filter.html | Elasticsearch API documentation}
*/
async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetFiltersResponse>
@ -1148,7 +1148,7 @@ export default class Ml {
}
/**
* Retrieves anomaly detection job results for one or more influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration.
* Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-influencer.html | Elasticsearch API documentation}
*/
async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetInfluencersResponse>
@ -1192,7 +1192,7 @@ export default class Ml {
}
/**
* Retrieves usage information for anomaly detection jobs.
* Get anomaly detection jobs usage info.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-stats.html | Elasticsearch API documentation}
*/
async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetJobStatsResponse>
@ -1232,7 +1232,7 @@ export default class Ml {
}
/**
* Retrieves configuration information for anomaly detection jobs. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the `<job_id>`, or by omitting the `<job_id>`.
* Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the `<job_id>`, or by omitting the `<job_id>`.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job.html | Elasticsearch API documentation}
*/
async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetJobsResponse>
@ -1272,7 +1272,7 @@ export default class Ml {
}
/**
* Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM.
* Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-memory.html | Elasticsearch API documentation}
*/
async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetMemoryStatsResponse>
@ -1312,7 +1312,7 @@ export default class Ml {
}
/**
* Retrieves usage information for anomaly detection job model snapshot upgrades.
* Get anomaly detection job model snapshot upgrade usage info.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-model-snapshot-upgrade-stats.html | Elasticsearch API documentation}
*/
async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetModelSnapshotUpgradeStatsResponse>
@ -1345,7 +1345,7 @@ export default class Ml {
}
/**
* Retrieves information about model snapshots.
* Get model snapshots info.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-snapshot.html | Elasticsearch API documentation}
*/
async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetModelSnapshotsResponse>
@ -1397,7 +1397,7 @@ export default class Ml {
}
/**
* Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span.
* Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-overall-buckets.html | Elasticsearch API documentation}
*/
async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetOverallBucketsResponse>
@ -1441,7 +1441,7 @@ export default class Ml {
}
/**
* Retrieves anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors.
* Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-record.html | Elasticsearch API documentation}
*/
async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetRecordsResponse>
@ -1485,7 +1485,7 @@ export default class Ml {
}
/**
* Retrieves configuration information for a trained model.
* Get trained model configuration info.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models.html | Elasticsearch API documentation}
*/
async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetTrainedModelsResponse>
@ -1525,7 +1525,7 @@ export default class Ml {
}
/**
* Retrieves usage information for trained models. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression.
* Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models-stats.html | Elasticsearch API documentation}
*/
async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetTrainedModelsStatsResponse>
@ -1565,7 +1565,7 @@ export default class Ml {
}
/**
* Evaluates a trained model.
* Evaluate a trained model.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html | Elasticsearch API documentation}
*/
async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlInferTrainedModelResponse>
@ -1609,7 +1609,7 @@ export default class Ml {
}
/**
* Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration.
* Return ML defaults and limits. Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ml-info.html | Elasticsearch API documentation}
*/
async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlInfoResponse>
@ -1639,7 +1639,7 @@ export default class Ml {
}
/**
* Open anomaly detection jobs. An anomaly detection job must be opened in order for it to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received.
* Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-open-job.html | Elasticsearch API documentation}
*/
async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlOpenJobResponse>
@ -1683,7 +1683,7 @@ export default class Ml {
}
/**
* Adds scheduled events to a calendar.
* Add scheduled events to the calendar.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-calendar-event.html | Elasticsearch API documentation}
*/
async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPostCalendarEventsResponse>
@ -1727,7 +1727,7 @@ export default class Ml {
}
/**
* Sends data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.
* Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-data.html | Elasticsearch API documentation}
*/
async postData<TData = unknown> (this: That, params: T.MlPostDataRequest<TData> | TB.MlPostDataRequest<TData>, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPostDataResponse>
@ -1764,7 +1764,7 @@ export default class Ml {
}
/**
* Previews the extracted features used by a data frame analytics config.
* Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config.
* @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/preview-dfanalytics.html | Elasticsearch API documentation}
*/
async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPreviewDataFrameAnalyticsResponse>
@ -1816,7 +1816,7 @@ export default class Ml {
}
/**
* Previews a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials.
* Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-preview-datafeed.html | Elasticsearch API documentation}
*/
async previewDatafeed<TDocument = unknown> (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPreviewDatafeedResponse<TDocument>>
@ -1868,7 +1868,7 @@ export default class Ml {
}
/**
* Creates a calendar.
* Create a calendar.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar.html | Elasticsearch API documentation}
*/
async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPutCalendarResponse>
@ -1912,7 +1912,7 @@ export default class Ml {
}
/**
* Adds an anomaly detection job to a calendar.
* Add anomaly detection job to calendar.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar-job.html | Elasticsearch API documentation}
*/
async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPutCalendarJobResponse>
@ -1945,7 +1945,7 @@ export default class Ml {
}
/**
* Instantiates a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index.
* Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.html | Elasticsearch API documentation}
*/
async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPutDataFrameAnalyticsResponse>
@ -1989,7 +1989,7 @@ export default class Ml {
}
/**
* Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index.
* Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-datafeed.html | Elasticsearch API documentation}
*/
async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPutDatafeedResponse>
@ -2033,7 +2033,7 @@ export default class Ml {
}
/**
* Instantiates a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects.
* Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-filter.html | Elasticsearch API documentation}
*/
async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPutFilterResponse>
@ -2121,7 +2121,7 @@ export default class Ml {
}
/**
* Enables you to supply a trained model that is not created by data frame analytics.
* Create a trained model. Enable you to supply a trained model that is not created by data frame analytics.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models.html | Elasticsearch API documentation}
*/
async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPutTrainedModelResponse>
@ -2165,7 +2165,7 @@ export default class Ml {
}
/**
* Creates or updates a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning.
* Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models-aliases.html | Elasticsearch API documentation}
*/
async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPutTrainedModelAliasResponse>
@ -2198,7 +2198,7 @@ export default class Ml {
}
/**
* Creates part of a trained model definition.
* Create part of a trained model definition.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-definition-part.html | Elasticsearch API documentation}
*/
async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPutTrainedModelDefinitionPartResponse>
@ -2243,7 +2243,7 @@ export default class Ml {
}
/**
* Creates a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.
* Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-vocabulary.html | Elasticsearch API documentation}
*/
async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlPutTrainedModelVocabularyResponse>
@ -2287,7 +2287,7 @@ export default class Ml {
}
/**
* Resets an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list.
* Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-reset-job.html | Elasticsearch API documentation}
*/
async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlResetJobResponse>
@ -2319,7 +2319,7 @@ export default class Ml {
}
/**
* Reverts to a specific snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure.
* Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-revert-snapshot.html | Elasticsearch API documentation}
*/
async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlRevertModelSnapshotResponse>
@ -2364,7 +2364,7 @@ export default class Ml {
}
/**
* Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API.
* Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-set-upgrade-mode.html | Elasticsearch API documentation}
*/
async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlSetUpgradeModeResponse>
@ -2394,7 +2394,7 @@ export default class Ml {
}
/**
* Starts a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings.
* Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-dfanalytics.html | Elasticsearch API documentation}
*/
async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlStartDataFrameAnalyticsResponse>
@ -2426,7 +2426,7 @@ export default class Ml {
}
/**
* Starts one or more datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead.
* Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-start-datafeed.html | Elasticsearch API documentation}
*/
async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlStartDatafeedResponse>
@ -2470,7 +2470,7 @@ export default class Ml {
}
/**
* Starts a trained model deployment, which allocates the model to every machine learning node.
* Start a trained model deployment. It allocates the model to every machine learning node.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html | Elasticsearch API documentation}
*/
async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlStartTrainedModelDeploymentResponse>
@ -2502,7 +2502,7 @@ export default class Ml {
}
/**
* Stops one or more data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle.
* Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-dfanalytics.html | Elasticsearch API documentation}
*/
async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlStopDataFrameAnalyticsResponse>
@ -2534,7 +2534,7 @@ export default class Ml {
}
/**
* Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle.
* Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-stop-datafeed.html | Elasticsearch API documentation}
*/
async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlStopDatafeedResponse>
@ -2578,7 +2578,7 @@ export default class Ml {
}
/**
* Stops a trained model deployment.
* Stop a trained model deployment.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html | Elasticsearch API documentation}
*/
async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlStopTrainedModelDeploymentResponse>
@ -2610,7 +2610,7 @@ export default class Ml {
}
/**
* Updates an existing data frame analytics job.
* Update a data frame analytics job.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-dfanalytics.html | Elasticsearch API documentation}
*/
async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlUpdateDataFrameAnalyticsResponse>
@ -2654,7 +2654,7 @@ export default class Ml {
}
/**
* Updates the properties of a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead.
* Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-datafeed.html | Elasticsearch API documentation}
*/
async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlUpdateDatafeedResponse>
@ -2698,7 +2698,7 @@ export default class Ml {
}
/**
* Updates the description of a filter, adds items, or removes items from the list.
* Update a filter. Updates the description of a filter, adds items, or removes items from the list.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-filter.html | Elasticsearch API documentation}
*/
async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlUpdateFilterResponse>
@ -2742,7 +2742,7 @@ export default class Ml {
}
/**
* Updates certain properties of an anomaly detection job.
* Update an anomaly detection job. Updates certain properties of an anomaly detection job.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-job.html | Elasticsearch API documentation}
*/
async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlUpdateJobResponse>
@ -2786,7 +2786,7 @@ export default class Ml {
}
/**
* Updates certain properties of a snapshot.
* Update a snapshot. Updates certain properties of a snapshot.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-snapshot.html | Elasticsearch API documentation}
*/
async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlUpdateModelSnapshotResponse>
@ -2831,7 +2831,7 @@ export default class Ml {
}
/**
* Starts a trained model deployment, which allocates the model to every machine learning node.
* Update a trained model deployment.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-trained-model-deployment.html | Elasticsearch API documentation}
*/
async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlUpdateTrainedModelDeploymentResponse>
@ -2875,7 +2875,7 @@ export default class Ml {
}
/**
* Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job.
* Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-upgrade-job-model-snapshot.html | Elasticsearch API documentation}
*/
async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlUpgradeJobSnapshotResponse>

View File

@ -773,6 +773,7 @@ export interface MsearchRequest extends RequestBase {
expand_wildcards?: ExpandWildcards
ignore_throttled?: boolean
ignore_unavailable?: boolean
include_named_queries_score?: boolean
max_concurrent_searches?: long
max_concurrent_shard_requests?: long
pre_filter_shard_size?: long
@ -1132,6 +1133,7 @@ export interface SearchRequest extends RequestBase {
expand_wildcards?: ExpandWildcards
ignore_throttled?: boolean
ignore_unavailable?: boolean
include_named_queries_score?: boolean
lenient?: boolean
max_concurrent_shard_requests?: long
min_compatible_shard_node?: VersionString
@ -1421,7 +1423,7 @@ export interface SearchHit<TDocument = unknown> {
fields?: Record<string, any>
highlight?: Record<string, string[]>
inner_hits?: Record<string, SearchInnerHitsResult>
matched_queries?: string[]
matched_queries?: string[] | Record<string, double[]>
_nested?: SearchNestedIdentity
_ignored?: string[]
ignored_field_values?: Record<string, string[]>
@ -2294,6 +2296,7 @@ export interface KnnQuery extends QueryDslQueryBase {
query_vector?: QueryVector
query_vector_builder?: QueryVectorBuilder
num_candidates?: integer
k?: integer
filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
similarity?: float
}
@ -4254,8 +4257,8 @@ export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase {
export interface AnalysisCustomAnalyzer {
type: 'custom'
char_filter?: string[]
filter?: string[]
char_filter?: string | string[]
filter?: string | string[]
position_increment_gap?: integer
position_offset_gap?: integer
tokenizer: string
@ -10872,6 +10875,8 @@ export interface IndicesCreateResponse {
export interface IndicesCreateDataStreamRequest extends RequestBase {
name: DataStreamName
master_timeout?: Duration
timeout?: Duration
}
export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase
@ -10929,6 +10934,7 @@ export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase
export interface IndicesDeleteDataStreamRequest extends RequestBase {
name: DataStreamNames
master_timeout?: Duration
expand_wildcards?: ExpandWildcards
}
@ -11156,6 +11162,7 @@ export interface IndicesGetDataLifecycleRequest extends RequestBase {
name: DataStreamNames
expand_wildcards?: ExpandWildcards
include_defaults?: boolean
master_timeout?: Duration
}
export interface IndicesGetDataLifecycleResponse {
@ -11166,6 +11173,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase {
name?: DataStreamNames
expand_wildcards?: ExpandWildcards
include_defaults?: boolean
master_timeout?: Duration
}
export interface IndicesGetDataStreamResponse {
@ -11246,6 +11254,8 @@ export type IndicesGetTemplateResponse = Record<string, IndicesTemplateMapping>
export interface IndicesMigrateToDataStreamRequest extends RequestBase {
name: IndexName
master_timeout?: Duration
timeout?: Duration
}
export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase
@ -11283,6 +11293,7 @@ export interface IndicesOpenResponse {
export interface IndicesPromoteDataStreamRequest extends RequestBase {
name: IndexName
master_timeout?: Duration
}
export type IndicesPromoteDataStreamResponse = any
@ -13877,7 +13888,7 @@ export interface MlTrainedModelDeploymentStats {
error_count: integer
inference_count: integer
model_id: Id
nodes: MlTrainedModelDeploymentNodesStats
nodes: MlTrainedModelDeploymentNodesStats[]
number_of_allocations: integer
queue_capacity: integer
rejected_execution_count: integer
@ -13912,7 +13923,7 @@ export interface MlTrainedModelInferenceStats {
failure_count: integer
inference_count: integer
missing_all_fields_count: integer
timestamp: DateTime
timestamp: EpochTime<UnitMillis>
}
export interface MlTrainedModelLocation {
@ -18299,7 +18310,7 @@ export interface SynonymsGetSynonymsSetsSynonymsSetItem {
export interface SynonymsPutSynonymRequest extends RequestBase {
id: Id
synonyms_set: SynonymsSynonymRule[]
synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[]
}
export interface SynonymsPutSynonymResponse {

View File

@ -801,6 +801,7 @@ export interface MsearchRequest extends RequestBase {
expand_wildcards?: ExpandWildcards
ignore_throttled?: boolean
ignore_unavailable?: boolean
include_named_queries_score?: boolean
max_concurrent_searches?: long
max_concurrent_shard_requests?: long
pre_filter_shard_size?: long
@ -1184,6 +1185,7 @@ export interface SearchRequest extends RequestBase {
expand_wildcards?: ExpandWildcards
ignore_throttled?: boolean
ignore_unavailable?: boolean
include_named_queries_score?: boolean
lenient?: boolean
max_concurrent_shard_requests?: long
min_compatible_shard_node?: VersionString
@ -1476,7 +1478,7 @@ export interface SearchHit<TDocument = unknown> {
fields?: Record<string, any>
highlight?: Record<string, string[]>
inner_hits?: Record<string, SearchInnerHitsResult>
matched_queries?: string[]
matched_queries?: string[] | Record<string, double[]>
_nested?: SearchNestedIdentity
_ignored?: string[]
ignored_field_values?: Record<string, string[]>
@ -2367,6 +2369,7 @@ export interface KnnQuery extends QueryDslQueryBase {
query_vector?: QueryVector
query_vector_builder?: QueryVectorBuilder
num_candidates?: integer
k?: integer
filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
similarity?: float
}
@ -4327,8 +4330,8 @@ export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase {
export interface AnalysisCustomAnalyzer {
type: 'custom'
char_filter?: string[]
filter?: string[]
char_filter?: string | string[]
filter?: string | string[]
position_increment_gap?: integer
position_offset_gap?: integer
tokenizer: string
@ -11055,6 +11058,8 @@ export interface IndicesCreateResponse {
export interface IndicesCreateDataStreamRequest extends RequestBase {
name: DataStreamName
master_timeout?: Duration
timeout?: Duration
}
export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase
@ -11112,6 +11117,7 @@ export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase
export interface IndicesDeleteDataStreamRequest extends RequestBase {
name: DataStreamNames
master_timeout?: Duration
expand_wildcards?: ExpandWildcards
}
@ -11340,6 +11346,7 @@ export interface IndicesGetDataLifecycleRequest extends RequestBase {
name: DataStreamNames
expand_wildcards?: ExpandWildcards
include_defaults?: boolean
master_timeout?: Duration
}
export interface IndicesGetDataLifecycleResponse {
@ -11350,6 +11357,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase {
name?: DataStreamNames
expand_wildcards?: ExpandWildcards
include_defaults?: boolean
master_timeout?: Duration
}
export interface IndicesGetDataStreamResponse {
@ -11430,6 +11438,8 @@ export type IndicesGetTemplateResponse = Record<string, IndicesTemplateMapping>
export interface IndicesMigrateToDataStreamRequest extends RequestBase {
name: IndexName
master_timeout?: Duration
timeout?: Duration
}
export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase
@ -11470,6 +11480,7 @@ export interface IndicesOpenResponse {
export interface IndicesPromoteDataStreamRequest extends RequestBase {
name: IndexName
master_timeout?: Duration
}
export type IndicesPromoteDataStreamResponse = any
@ -14112,7 +14123,7 @@ export interface MlTrainedModelDeploymentStats {
error_count: integer
inference_count: integer
model_id: Id
nodes: MlTrainedModelDeploymentNodesStats
nodes: MlTrainedModelDeploymentNodesStats[]
number_of_allocations: integer
queue_capacity: integer
rejected_execution_count: integer
@ -14147,7 +14158,7 @@ export interface MlTrainedModelInferenceStats {
failure_count: integer
inference_count: integer
missing_all_fields_count: integer
timestamp: DateTime
timestamp: EpochTime<UnitMillis>
}
export interface MlTrainedModelLocation {
@ -18774,7 +18785,7 @@ export interface SynonymsPutSynonymRequest extends RequestBase {
id: Id
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
synonyms_set: SynonymsSynonymRule[]
synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[]
}
}