Auto-generated code for 8.x (#2369)

This commit is contained in:
Elastic Machine
2024-09-30 20:41:23 +02:00
committed by GitHub
parent 58b457eedc
commit e45ed28c05
70 changed files with 1408 additions and 236 deletions

View File

@ -26,7 +26,7 @@ const response1 = await client.cluster.putComponentTemplate({
type: "keyword",
script: {
source:
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
},

View File

@ -0,0 +1,12 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.tasks.list({
human: "true",
detailed: "true",
actions: "indices:data/write/bulk",
});
console.log(response);
----

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.indices.create({
index: "my-index-000002",
index: "my-index-000003",
mappings: {
properties: {
metrics: {
@ -29,7 +29,7 @@ const response = await client.indices.create({
console.log(response);
const response1 = await client.indices.getMapping({
index: "my-index-000002",
index: "my-index-000003",
});
console.log(response1);
----

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "alibabacloud_ai_search_embeddings",
id: "alibabacloud_ai_search_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -14,6 +14,7 @@ const response = await client.inference.put({
min_number_of_allocations: 3,
max_number_of_allocations: 10,
},
num_threads: 1,
model_id: ".multilingual-e5-small",
},
},

View File

@ -10,7 +10,7 @@ const response = await client.search({
type: "keyword",
script: {
source:
"emit(doc['@timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['@timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
},

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "hugging_face_embeddings",
id: "hugging_face_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "google_vertex_ai_embeddings",
id: "google_vertex_ai_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -10,7 +10,7 @@ const response = await client.search({
"date.day_of_week": {
type: "keyword",
script:
"emit(doc['date'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['date'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
aggs: {

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "azure-ai-studio-embeddings",
pipeline: "azure_ai_studio_embeddings",
pipeline: "azure_ai_studio_embeddings_pipeline",
},
});
console.log(response);

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "azure_ai_studio_embeddings",
id: "azure_ai_studio_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "openai-embeddings",
pipeline: "openai_embeddings",
pipeline: "openai_embeddings_pipeline",
},
});
console.log(response);

View File

@ -0,0 +1,8 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.nodes.hotThreads();
console.log(response);
----

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "amazon-bedrock-embeddings",
pipeline: "amazon_bedrock_embeddings",
pipeline: "amazon_bedrock_embeddings_pipeline",
},
});
console.log(response);

View File

@ -0,0 +1,18 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "restaurants",
retriever: {
knn: {
field: "vector",
query_vector: [10, 22, 77],
k: 10,
num_candidates: 10,
},
},
});
console.log(response);
----

View File

@ -10,7 +10,7 @@ const response = await client.search({
type: "keyword",
script: {
source:
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
},

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "openai_embeddings",
id: "openai_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "elser-embeddings",
pipeline: "elser_embeddings",
pipeline: "elser_embeddings_pipeline",
},
});
console.log(response);

View File

@ -0,0 +1,11 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.nodes.stats({
human: "true",
filter_path: "nodes.*.indexing_pressure",
});
console.log(response);
----

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "cohere_embeddings",
id: "cohere_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "hugging-face-embeddings",
pipeline: "hugging_face_embeddings",
pipeline: "hugging_face_embeddings_pipeline",
},
});
console.log(response);

View File

@ -14,6 +14,7 @@ const response = await client.inference.put({
min_number_of_allocations: 3,
max_number_of_allocations: 10,
},
num_threads: 1,
},
},
});

View File

@ -0,0 +1,19 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "my-index-000002",
mappings: {
properties: {
inference_field: {
type: "semantic_text",
inference_id: "my-elser-endpoint-for-ingest",
search_inference_id: "my-elser-endpoint-for-search",
},
},
},
});
console.log(response);
----

View File

@ -3,8 +3,8 @@
[source, js]
----
const response = await client.nodes.hotThreads({
node_id: "my-node,my-other-node",
const response = await client.nodes.stats({
metric: "breaker",
});
console.log(response);
----

View File

@ -0,0 +1,19 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.inference.put({
task_type: "rerank",
inference_id: "my-msmarco-minilm-model",
inference_config: {
service: "elasticsearch",
service_settings: {
num_allocations: 1,
num_threads: 1,
model_id: "cross-encoder__ms-marco-minilm-l-6-v2",
},
},
});
console.log(response);
----

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "google-vertex-ai-embeddings",
pipeline: "google_vertex_ai_embeddings",
pipeline: "google_vertex_ai_embeddings_pipeline",
},
});
console.log(response);

View File

@ -0,0 +1,45 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "movies",
retriever: {
rrf: {
retrievers: [
{
standard: {
query: {
sparse_vector: {
field: "plot_embedding",
inference_id: "my-elser-model",
query: "films that explore psychological depths",
},
},
},
},
{
standard: {
query: {
multi_match: {
query: "crime",
fields: ["plot", "title"],
},
},
},
},
{
knn: {
field: "vector",
query_vector: [10, 22, 77],
k: 10,
num_candidates: 10,
},
},
],
},
},
});
console.log(response);
----

View File

@ -11,7 +11,7 @@ const response = await client.search({
filter: {
range: {
price: {
to: "500",
lte: "500",
},
},
},

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "cohere-embeddings",
pipeline: "cohere_embeddings",
pipeline: "cohere_embeddings_pipeline",
},
});
console.log(response);

View File

@ -0,0 +1,51 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "my-index-000004",
mappings: {
properties: {
metrics: {
subobjects: "auto",
properties: {
time: {
type: "object",
properties: {
min: {
type: "long",
},
},
},
to: {
type: "object",
properties: {
inner_metrics: {
type: "object",
subobjects: "auto",
properties: {
time: {
type: "object",
properties: {
max: {
type: "long",
},
},
},
},
},
},
},
},
},
},
},
});
console.log(response);
const response1 = await client.indices.getMapping({
index: "my-index-000004",
});
console.log(response1);
----

View File

@ -17,7 +17,7 @@ const response = await client.search({
{
range: {
"result.execution_time": {
from: "now-10s",
gte: "now-10s",
},
},
},

View File

@ -0,0 +1,11 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.snapshot.create({
repository: "my_repository",
snapshot: "_verify_integrity",
});
console.log(response);
----

View File

@ -0,0 +1,20 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "my-index",
query: {
semantic: {
field: "inference_field",
query: "mountain lake",
inner_hits: {
from: 1,
size: 1,
},
},
},
});
console.log(response);
----

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "mistral-embeddings",
pipeline: "mistral_embeddings",
pipeline: "mistral_embeddings_pipeline",
},
});
console.log(response);

View File

@ -9,7 +9,11 @@ const response = await client.inference.put({
inference_config: {
service: "elser",
service_settings: {
num_allocations: 1,
adaptive_allocations: {
enabled: true,
min_number_of_allocations: 3,
max_number_of_allocations: 10,
},
num_threads: 1,
},
},

View File

@ -0,0 +1,17 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "my-index",
query: {
semantic: {
field: "inference_field",
query: "mountain lake",
inner_hits: {},
},
},
});
console.log(response);
----

View File

@ -0,0 +1,36 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "restaurants",
retriever: {
rrf: {
retrievers: [
{
standard: {
query: {
multi_match: {
query: "Austria",
fields: ["city", "region"],
},
},
},
},
{
knn: {
field: "vector",
query_vector: [10, 22, 77],
k: 10,
num_candidates: 10,
},
},
],
rank_constant: 1,
rank_window_size: 50,
},
},
});
console.log(response);
----

View File

@ -7,7 +7,7 @@ const response = await client.search({
index: "semantic-embeddings",
query: {
semantic: {
field: "semantic_text",
field: "content",
query: "How to avoid muscle soreness while running?",
},
},

View File

@ -0,0 +1,26 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "movies",
retriever: {
text_similarity_reranker: {
retriever: {
standard: {
query: {
match: {
genre: "drama",
},
},
},
},
field: "plot",
inference_id: "my-msmarco-minilm-model",
inference_text: "films that explore psychological depths",
},
},
});
console.log(response);
----

View File

@ -4,7 +4,9 @@
[source, js]
----
const response = await client.tasks.list({
filter_path: "nodes.*.tasks",
pretty: "true",
human: "true",
detailed: "true",
});
console.log(response);
----

View File

@ -0,0 +1,17 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.index({
index: "my-index",
id: "lake_tahoe",
document: {
inference_field: [
"Lake Tahoe is the largest alpine lake in North America",
"When hiking in the area, please be on alert for bears",
],
},
});
console.log(response);
----

View File

@ -0,0 +1,28 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "index",
retriever: {
text_similarity_reranker: {
retriever: {
standard: {
query: {
match_phrase: {
text: "landmark in Paris",
},
},
},
},
field: "text",
inference_id: "my-cohere-rerank-model",
inference_text: "Most famous landmark in Paris",
rank_window_size: 100,
min_score: 0.5,
},
},
});
console.log(response);
----

View File

@ -0,0 +1,32 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "restaurants",
retriever: {
standard: {
query: {
bool: {
should: [
{
match: {
region: "Austria",
},
},
],
filter: [
{
term: {
year: "2019",
},
},
],
},
},
},
},
});
console.log(response);
----

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "alibabacloud-ai-search-embeddings",
pipeline: "alibabacloud_ai_search_embeddings",
pipeline: "alibabacloud_ai_search_embeddings_pipeline",
},
});
console.log(response);

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "azure_openai_embeddings",
id: "azure_openai_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -0,0 +1,8 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.cluster.pendingTasks();
console.log(response);
----

View File

@ -8,7 +8,7 @@ const response = await client.search({
day_of_week: {
type: "keyword",
script:
"\n emit(doc['timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ROOT))\n ",
"\n emit(doc['timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ENGLISH))\n ",
},
},
size: 0,

View File

@ -7,14 +7,10 @@ const response = await client.indices.create({
index: "semantic-embeddings",
mappings: {
properties: {
semantic_text: {
content: {
type: "semantic_text",
inference_id: "my-elser-endpoint",
},
content: {
type: "text",
copy_to: "semantic_text",
},
},
},
});

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "azure-openai-embeddings",
pipeline: "azure_openai_embeddings",
pipeline: "azure_openai_embeddings_pipeline",
},
});
console.log(response);

View File

@ -0,0 +1,68 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "my-index-000002",
mappings: {
properties: {
metrics: {
type: "object",
subobjects: "auto",
properties: {
inner: {
type: "object",
enabled: false,
},
nested: {
type: "nested",
},
},
},
},
},
});
console.log(response);
const response1 = await client.index({
index: "my-index-000002",
id: "metric_1",
document: {
"metrics.time": 100,
"metrics.time.min": 10,
"metrics.time.max": 900,
},
});
console.log(response1);
const response2 = await client.index({
index: "my-index-000002",
id: "metric_2",
document: {
metrics: {
time: 100,
"time.min": 10,
"time.max": 900,
inner: {
foo: "bar",
"path.to.some.field": "baz",
},
nested: [
{
id: 10,
},
{
id: 1,
},
],
},
},
});
console.log(response2);
const response3 = await client.indices.getMapping({
index: "my-index-000002",
});
console.log(response3);
----

View File

@ -33,6 +33,25 @@ const response = await client.simulate.ingest({
],
},
},
component_template_substitutions: {
"my-component-template": {
template: {
mappings: {
dynamic: "true",
properties: {
field3: {
type: "keyword",
},
},
},
settings: {
index: {
default_pipeline: "my-pipeline",
},
},
},
},
},
},
});
console.log(response);

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "mistral_embeddings",
id: "mistral_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -0,0 +1,28 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ml.postCalendarEvents({
calendar_id: "dst-germany",
events: [
{
description: "Fall 2024",
start_time: 1729994400000,
end_time: 1730167200000,
skip_result: false,
skip_model_update: false,
force_time_shift: -3600,
},
{
description: "Spring 2025",
start_time: 1743296400000,
end_time: 1743469200000,
skip_result: false,
skip_model_update: false,
force_time_shift: 3600,
},
],
});
console.log(response);
----

View File

@ -0,0 +1,13 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "my-index-000001",
settings: {
"index.mapping.ignore_above": 256,
},
});
console.log(response);
----

View File

@ -0,0 +1,12 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.tasks.list({
human: "true",
detailed: "true",
actions: "indices:data/write/search",
});
console.log(response);
----

View File

@ -0,0 +1,44 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.simulate.ingest({
body: {
docs: [
{
_index: "my-index",
_id: "123",
_source: {
foo: "foo",
},
},
{
_index: "my-index",
_id: "456",
_source: {
bar: "rab",
},
},
],
component_template_substitutions: {
"my-mappings_template": {
template: {
mappings: {
dynamic: "strict",
properties: {
foo: {
type: "keyword",
},
bar: {
type: "keyword",
},
},
},
},
},
},
},
});
console.log(response);
----

View File

@ -5,7 +5,7 @@
----
const response = await client.cat.threadPool({
v: "true",
h: "id,name,active,rejected,completed",
h: "id,name,queue,active,rejected,completed",
});
console.log(response);
----

View File

@ -12,7 +12,7 @@ const response = await client.indices.create({
type: "keyword",
script: {
source:
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
},

View File

@ -11,7 +11,7 @@ const response = await client.indices.create({
type: "keyword",
script: {
source:
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
},

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "amazon_bedrock_embeddings",
id: "amazon_bedrock_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -11,7 +11,7 @@ const response = await client.search({
filter: {
range: {
price: {
to: "500",
lte: "500",
},
},
},

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "elser_embeddings",
id: "elser_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -402,7 +402,7 @@ client.fieldCaps({ ... })
** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all.
** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported.
** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests.
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests.
These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.
** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias,
or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request
@ -750,6 +750,7 @@ client.openPointInTime({ index, keep_alive })
* *Request (object):*
** *`index` (string | string[])*: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices
** *`keep_alive` (string | -1 | 0)*: Extends the time to live of the corresponding point in time.
** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to `match_none` on every shard.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on.
Random by default.
@ -940,7 +941,7 @@ client.search({ ... })
** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search.
Supports wildcards (`*`).
To search all data streams and indices, omit this parameter or use `*` or `_all`.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Defines the aggregations that are run as part of the search request.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Defines the aggregations that are run as part of the search request.
** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*: Collapses search results the values of the specified field.
** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit.
** *`ext` (Optional, Record<string, User-defined value>)*: Configuration of search extensions defined by Elasticsearch plugins.
@ -999,7 +1000,7 @@ If this field is specified, the `_source` parameter defaults to `false`.
You can pass `_source: true` to return both source fields and stored fields in the search response.
** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT).
If you provide a PIT, you cannot specify an `<index>` in the request path.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request.
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request.
These fields take precedence over mapped fields with the same name.
** *`stats` (Optional, string[])*: Stats groups to associate with the search.
Each group maintains a statistics aggregation for its associated searches.
@ -1098,7 +1099,7 @@ client.searchMvt({ index, field, zoom, x, y })
** *`zoom` (number)*: Zoom level for the vector tile to search
** *`x` (number)*: X coordinate for the vector tile to search
** *`y` (number)*: Y coordinate for the vector tile to search
** *`aggs` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Sub-aggregations for the geotile_grid.
** *`aggs` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Sub-aggregations for the geotile_grid.
Supports the following aggregation types:
- avg
@ -1126,7 +1127,7 @@ each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon
of the cells bounding box. If 'point' each feature is a Point that is the centroid
of the cell.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
precedence over mapped fields with the same name.
** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000.
If 0, results dont include the hits layer.
@ -1477,7 +1478,7 @@ client.asyncSearch.submit({ ... })
* *Request (object):*
** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*
** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*
** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit.
** *`ext` (Optional, Record<string, User-defined value>)*: Configuration of search extensions defined by Elasticsearch plugins.
@ -1527,7 +1528,7 @@ parameter defaults to false. You can pass _source: true to return both source fi
and stored fields in the search response.
** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you
cannot specify an <index> in the request path.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
precedence over mapped fields with the same name.
** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics
aggregation for its associated searches. You can retrieve these stats using
@ -3511,7 +3512,7 @@ client.eql.search({ index, query })
** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10
** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit.
** *`result_position` (Optional, Enum("tail" | "head"))*
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*
** *`allow_no_indices` (Optional, boolean)*
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*
** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response.
@ -3565,7 +3566,7 @@ and its format can change at any time but it can give some insight into the perf
of each part of the query.
** *`tables` (Optional, Record<string, Record<string, { integer, keyword, long, double }>>)*: Tables to use with the LOOKUP operation. The top level key is the table
name and the next level key is the column name.
** *`format` (Optional, string)*: A short version of the Accept header, e.g. json, yaml.
** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, e.g. json, yaml.
** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format.
** *`drop_null_columns` (Optional, boolean)*: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results?
Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns.
@ -3668,7 +3669,7 @@ client.fleet.search({ index })
* *Request (object):*
** *`index` (string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*
** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*
** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit.
** *`ext` (Optional, Record<string, User-defined value>)*: Configuration of search extensions defined by Elasticsearch plugins.
@ -3717,7 +3718,7 @@ parameter defaults to false. You can pass _source: true to return both source fi
and stored fields in the search response.
** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you
cannot specify an <index> in the request path.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
precedence over mapped fields with the same name.
** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics
aggregation for its associated searches. You can retrieve these stats using
@ -4030,7 +4031,7 @@ If specified, the `analyzer` parameter overrides this value.
** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token.
** *`text` (Optional, string | string[])*: Text to analyze.
If an array of strings is provided, it is analyzed as a multi-value field.
** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, max_token_length } | { type, max_token_length } | { type, max_token_length } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, flags, group, pattern } | { type, rule_files })*: Tokenizer to use to convert text into tokens.
** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })*: Tokenizer to use to convert text into tokens.
[discrete]
==== clear_cache
@ -4671,6 +4672,7 @@ Wildcard (`*`) expressions are supported. If omitted, all data streams are retur
Supports a list of values, such as `open,hidden`.
** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
** *`verbose` (Optional, boolean)*: Whether the maximum timestamp for each data stream should be calculated and returned.
[discrete]
==== get_field_mapping
@ -5054,14 +5056,14 @@ a new date field is added instead of string.
not used at all by Elasticsearch, but can be used to store
application-specific metadata.
** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields.
** *`properties` (Optional, Record<string, { type } | { boost, fielddata, index, null_value, type } | { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } | { relations, eager_global_ordinals, type } | { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, split_queries_on_whitespace, time_series_dimension, type } | { type, fields, meta, copy_to } | { type } | { positive_score_impact, type } | { positive_score_impact, type } | { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, term_vector, type } | { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, type } | { type } | { type, null_value } | { boost, format, ignore_malformed, index, null_value, precision_step, type } | { boost, fielddata, format, ignore_malformed, index, null_value, precision_step, locale, type } | { type, default_metric, metrics, time_series_metric } | { type, element_type, dims, similarity, index, index_options } | { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } | { enabled, include_in_parent, include_in_root, type } | { enabled, subobjects, type } | { type, meta, inference_id } | { type } | { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } | { value, type } | { path, type } | { ignore_malformed, type } | { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } | { type } | { analyzer, boost, index, null_value, enable_position_increments, type } | { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } | { coerce, ignore_malformed, ignore_z_value, orientation, strategy, type } | { ignore_malformed, ignore_z_value, null_value, type } | { coerce, ignore_malformed, ignore_z_value, orientation, type } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value, scaling_factor } | { type, null_value } | { type, null_value } | { format, type } | { type } | { type } | { type } | { type } | { type } | { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)*: Mapping for a field. For new fields, this mapping can include:
** *`properties` (Optional, Record<string, { type } | { boost, fielddata, index, null_value, type } | { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } | { relations, eager_global_ordinals, type } | { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, similarity, split_queries_on_whitespace, time_series_dimension, type } | { type, fields, meta, copy_to } | { type } | { positive_score_impact, type } | { positive_score_impact, type } | { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { type } | { type, null_value } | { boost, format, ignore_malformed, index, null_value, precision_step, type } | { boost, fielddata, format, ignore_malformed, index, null_value, precision_step, locale, type } | { type, default_metric, metrics, time_series_metric } | { type, element_type, dims, similarity, index, index_options } | { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } | { enabled, include_in_parent, include_in_root, type } | { enabled, subobjects, type } | { type, meta, inference_id } | { type } | { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } | { value, type } | { path, type } | { ignore_malformed, type } | { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } | { type } | { analyzer, boost, index, null_value, enable_position_increments, type } | { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } | { coerce, ignore_malformed, ignore_z_value, orientation, strategy, type } | { ignore_malformed, ignore_z_value, null_value, type } | { coerce, ignore_malformed, ignore_z_value, orientation, type } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value, scaling_factor } | { type, null_value } | { type, null_value } | { format, type } | { type } | { type } | { type } | { type } | { type } | { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)*: Mapping for a field. For new fields, this mapping can include:
- Field name
- Field data type
- Mapping parameters
** *`_routing` (Optional, { required })*: Enable making a routing value required on indexed documents.
** *`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })*: Control whether the _source field is enabled on the index.
** *`runtime` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Mapping of runtime fields for the index.
** *`runtime` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Mapping of runtime fields for the index.
** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.
This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
@ -5262,6 +5264,10 @@ Resources on remote clusters can be specified using the `<cluster>`:`<name>` syn
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.
This behavior applies even if the request targets other open indices.
For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.
[discrete]
==== rollover
@ -5668,6 +5674,8 @@ client.inference.put({ inference_id })
[discrete]
==== delete_geoip_database
Deletes a geoip database configuration.
{ref}/delete-geoip-database-api.html[Endpoint documentation]
[source,ts]
----
client.ingest.deleteGeoipDatabase({ id })
@ -5717,6 +5725,8 @@ client.ingest.geoIpStats()
[discrete]
==== get_geoip_database
Returns information about one or more geoip database configurations.
{ref}/get-geoip-database-api.html[Endpoint documentation]
[source,ts]
----
client.ingest.getGeoipDatabase({ ... })
@ -5770,6 +5780,8 @@ client.ingest.processorGrok()
[discrete]
==== put_geoip_database
Returns information about one or more geoip database configurations.
{ref}/put-geoip-database-api.html[Endpoint documentation]
[source,ts]
----
client.ingest.putGeoipDatabase({ id, name, maxmind })
@ -5805,9 +5817,11 @@ client.ingest.putPipeline({ id })
** *`id` (string)*: ID of the ingest pipeline to create or update.
** *`_meta` (Optional, Record<string, User-defined value>)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch.
** *`description` (Optional, string)*: Description of the ingest pipeline.
** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors.
** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified.
** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, redact, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors.
** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, redact, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified.
** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers.
** *`deprecated` (Optional, boolean)*: Marks this ingest pipeline as deprecated.
When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
** *`if_version` (Optional, number)*: Required version for optimistic concurrency control for pipeline updates
@ -5829,7 +5843,7 @@ client.ingest.simulate({ docs })
** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline.
** *`id` (Optional, string)*: Pipeline to test.
If you dont specify a `pipeline` in the request body, this parameter is required.
** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })*: Pipeline to test.
** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*: Pipeline to test.
If you dont specify the `pipeline` request path parameter, this parameter is required.
If you specify both this and the request path parameter, the API only uses the request path parameter.
** *`verbose` (Optional, boolean)*: If `true`, the response includes output data for each processor in the executed pipeline.
@ -5990,7 +6004,7 @@ client.logstash.putPipeline({ id })
* *Request (object):*
** *`id` (string)*: Identifier for the pipeline.
** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })*
** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*
[discrete]
=== migration
@ -7146,7 +7160,7 @@ client.ml.postCalendarEvents({ calendar_id, events })
* *Request (object):*
** *`calendar_id` (string)*: A string that uniquely identifies a calendar.
** *`events` ({ calendar_id, event_id, description, end_time, start_time }[])*: A list of one of more scheduled events. The events start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format.
** *`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])*: A list of one of more scheduled events. The events start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format.
[discrete]
==== post_data
@ -7358,7 +7372,7 @@ client.ml.putDatafeed({ datafeed_id })
** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed.
This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
It must start and end with alphanumeric characters.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: If set, the datafeed performs aggregation searches.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: If set, the datafeed performs aggregation searches.
Support for aggregations is limited and should be used only with low cardinality data.
** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might be required to search over long time periods, for several months or years.
This search is split into time chunks in order to ensure the load on Elasticsearch is managed.
@ -7389,7 +7403,7 @@ object is passed verbatim to Elasticsearch.
not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default
value is randomly selected between `60s` and `120s`. This randomness improves the query performance
when there are multiple jobs running on the same node.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Specifies runtime fields for the datafeed search.
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Specifies runtime fields for the datafeed search.
** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed.
The detector configuration objects in a job can contain functions that use these script fields.
** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.
@ -7894,7 +7908,7 @@ client.ml.updateDatafeed({ datafeed_id })
** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed.
This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
It must start and end with alphanumeric characters.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only
with low cardinality data.
** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might search over long time periods, for several months or years. This search is split into time
chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of
@ -7928,7 +7942,7 @@ when you are satisfied with the results of the job.
not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default
value is randomly selected between `60s` and `120s`. This randomness improves the query performance
when there are multiple jobs running on the same node.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Specifies runtime fields for the datafeed search.
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Specifies runtime fields for the datafeed search.
** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed.
The detector configuration objects in a job can contain functions that use these script fields.
** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.
@ -8531,7 +8545,7 @@ client.rollup.rollupSearch({ index })
* *Request (object):*
** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Specifies aggregations.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Specifies aggregations.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query.
** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data.
** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response
@ -8577,7 +8591,8 @@ If set to `false`, the API returns immediately and the indexer is stopped asynch
=== search_application
[discrete]
==== delete
Deletes a search application.
Delete a search application.
Remove a search application and its associated alias. Indices attached to the search application are not removed.
{ref}/delete-search-application.html[Endpoint documentation]
[source,ts]
@ -8594,6 +8609,7 @@ client.searchApplication.delete({ name })
[discrete]
==== delete_behavioral_analytics
Delete a behavioral analytics collection.
The associated data stream is also deleted.
{ref}/delete-analytics-collection.html[Endpoint documentation]
[source,ts]
@ -8609,7 +8625,7 @@ client.searchApplication.deleteBehavioralAnalytics({ name })
[discrete]
==== get
Returns the details about a search application
Get search application details.
{ref}/get-search-application.html[Endpoint documentation]
[source,ts]
@ -8625,7 +8641,7 @@ client.searchApplication.get({ name })
[discrete]
==== get_behavioral_analytics
Returns the existing behavioral analytics collections.
Get behavioral analytics collections.
{ref}/list-analytics-collection.html[Endpoint documentation]
[source,ts]
@ -8670,7 +8686,7 @@ client.searchApplication.postBehavioralAnalyticsEvent()
[discrete]
==== put
Creates or updates a search application.
Create or update a search application.
{ref}/put-search-application.html[Endpoint documentation]
[source,ts]
@ -8688,7 +8704,7 @@ client.searchApplication.put({ name })
[discrete]
==== put_behavioral_analytics
Creates a behavioral analytics collection.
Create a behavioral analytics collection.
{ref}/put-analytics-collection.html[Endpoint documentation]
[source,ts]
@ -8715,7 +8731,9 @@ client.searchApplication.renderQuery()
[discrete]
==== search
Perform a search against a search application.
Run a search application search.
Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template.
Unspecified template parameters are assigned their default values if applicable.
{ref}/search-application-search.html[Endpoint documentation]
[source,ts]
@ -9654,6 +9672,7 @@ client.security.putRole({ name })
** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role.
** *`global` (Optional, Record<string, User-defined value>)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges.
** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries.
** *`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])*: A list of remote indices permissions entries.
** *`metadata` (Optional, Record<string, User-defined value>)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use.
** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected.
** *`description` (Optional, string)*: Optional description of the role descriptor
@ -10588,14 +10607,14 @@ It ignores other request body parameters.
** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails.
** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search.
** *`field_multi_value_leniency` (Optional, boolean)*: Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order).
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
precedence over mapped fields with the same name.
** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, meaning the request waits for complete search results. If the search doesnt finish within this period, the search becomes async.
** *`params` (Optional, Record<string, User-defined value>)*: Values for parameters in the query.
** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for an async or saved synchronous search.
** *`keep_on_completion` (Optional, boolean)*: If true, Elasticsearch stores synchronous searches if you also specify the wait_for_completion_timeout parameter. If false, Elasticsearch only stores async searches that dont finish before the wait_for_completion_timeout.
** *`index_using_frozen` (Optional, boolean)*: If true, the search can run on frozen indices. Defaults to false.
** *`format` (Optional, string)*: Format for the response.
** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))*: Format for the response.
[discrete]
==== translate
@ -11492,7 +11511,7 @@ client.xpack.info({ ... })
==== Arguments
* *Request (object):*
** *`categories` (Optional, string[])*: A list of the information categories to include in the response. For example, `build,license,features`.
** *`categories` (Optional, Enum("build" | "features" | "license")[])*: A list of the information categories to include in the response. For example, `build,license,features`.
** *`accept_enterprise` (Optional, boolean)*: If this param is used it must be set to true
** *`human` (Optional, boolean)*: Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line.

View File

@ -46,7 +46,7 @@ export default class Ingest {
/**
* Deletes a geoip database configuration.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-geoip-database-api.html | Elasticsearch API documentation}
*/
async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestDeleteGeoipDatabaseResponse>
async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestDeleteGeoipDatabaseResponse, unknown>>
@ -140,7 +140,7 @@ export default class Ingest {
/**
* Returns information about one or more geoip database configurations.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-geoip-database-api.html | Elasticsearch API documentation}
*/
async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestGetGeoipDatabaseResponse>
async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestGetGeoipDatabaseResponse, unknown>>
@ -250,7 +250,7 @@ export default class Ingest {
/**
* Returns information about one or more geoip database configurations.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation}
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-geoip-database-api.html | Elasticsearch API documentation}
*/
async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestPutGeoipDatabaseResponse>
async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestPutGeoipDatabaseResponse, unknown>>
@ -301,7 +301,7 @@ export default class Ingest {
async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise<T.IngestPutPipelineResponse>
async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['id']
const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version']
const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version', 'deprecated']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body

View File

@ -47,11 +47,23 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin
export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise<T.OpenPointInTimeResponse>
export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['index']
const acceptedBody: string[] = ['index_filter']
const querystring: Record<string, any> = {}
const body = undefined
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedPath.includes(key)) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error

View File

@ -45,7 +45,7 @@ export default class SearchApplication {
}
/**
* Deletes a search application.
* Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-search-application.html | Elasticsearch API documentation}
*/
async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SearchApplicationDeleteResponse>
@ -77,7 +77,7 @@ export default class SearchApplication {
}
/**
* Delete a behavioral analytics collection.
* Delete a behavioral analytics collection. The associated data stream is also deleted.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-analytics-collection.html | Elasticsearch API documentation}
*/
async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SearchApplicationDeleteBehavioralAnalyticsResponse>
@ -109,7 +109,7 @@ export default class SearchApplication {
}
/**
* Returns the details about a search application
* Get search application details.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-search-application.html | Elasticsearch API documentation}
*/
async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SearchApplicationGetResponse>
@ -141,7 +141,7 @@ export default class SearchApplication {
}
/**
* Returns the existing behavioral analytics collections.
* Get behavioral analytics collections.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-analytics-collection.html | Elasticsearch API documentation}
*/
async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SearchApplicationGetBehavioralAnalyticsResponse>
@ -244,7 +244,7 @@ export default class SearchApplication {
}
/**
* Creates or updates a search application.
* Create or update a search application.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html | Elasticsearch API documentation}
*/
async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SearchApplicationPutResponse>
@ -281,7 +281,7 @@ export default class SearchApplication {
}
/**
* Creates a behavioral analytics collection.
* Create a behavioral analytics collection.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-analytics-collection.html | Elasticsearch API documentation}
*/
async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SearchApplicationPutBehavioralAnalyticsResponse>
@ -345,7 +345,7 @@ export default class SearchApplication {
}
/**
* Perform a search against a search application.
* Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-search.html | Elasticsearch API documentation}
*/
async search<TDocument = unknown, TAggregations = Record<T.AggregateName, T.AggregationsAggregate>> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SearchApplicationSearchResponse<TDocument, TAggregations>>

View File

@ -1685,7 +1685,7 @@ export default class Security {
async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise<T.SecurityPutRoleResponse>
async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['name']
const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'metadata', 'run_as', 'description', 'transient_metadata']
const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'metadata', 'run_as', 'description', 'transient_metadata']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body

View File

@ -374,6 +374,38 @@ export default class Snapshot {
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Verifies the integrity of the contents of a snapshot repository
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation}
*/
async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.SnapshotRepositoryVerifyIntegrityResponse>
async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.SnapshotRepositoryVerifyIntegrityResponse, unknown>>
async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise<T.SnapshotRepositoryVerifyIntegrityResponse>
async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['name']
const querystring: Record<string, any> = {}
const body = undefined
for (const key in params) {
if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
const method = 'POST'
const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_verify_integrity`
const meta: TransportRequestMetadata = {
name: 'snapshot.repository_verify_integrity',
pathParts: {
name: params.name
}
}
return await this.transport.request({ path, method, querystring, body, meta }, options)
}
/**
* Restores a snapshot.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation}

View File

@ -884,9 +884,11 @@ export interface OpenPointInTimeRequest extends RequestBase {
preference?: string
routing?: Routing
expand_wildcards?: ExpandWildcards
index_filter?: QueryDslQueryContainer
}
export interface OpenPointInTimeResponse {
_shards: ShardStatistics
id: Id
}
@ -1281,6 +1283,10 @@ export interface SearchAggregationProfileDebug {
segments_counted?: integer
segments_collected?: integer
map_reducer?: string
brute_force_used?: integer
dynamic_pruning_attempted?: integer
dynamic_pruning_used?: integer
skipped_due_to_no_data?: integer
}
export interface SearchAggregationProfileDelegateDebugFilter {
@ -1333,6 +1339,39 @@ export interface SearchCompletionSuggester extends SearchSuggesterBase {
export type SearchContext = string | GeoLocation
export interface SearchDfsKnnProfile {
vector_operations_count?: long
query: SearchKnnQueryProfileResult[]
rewrite_time: long
collector: SearchKnnCollectorResult[]
}
export interface SearchDfsProfile {
statistics?: SearchDfsStatisticsProfile
knn?: SearchDfsKnnProfile[]
}
export interface SearchDfsStatisticsBreakdown {
collection_statistics: long
collection_statistics_count: long
create_weight: long
create_weight_count: long
rewrite: long
rewrite_count: long
term_statistics: long
term_statistics_count: long
}
export interface SearchDfsStatisticsProfile {
type: string
description: string
time?: Duration
time_in_nanos: DurationValue<UnitNanos>
breakdown: SearchDfsStatisticsBreakdown
debug?: Record<string, any>
children?: SearchDfsStatisticsProfile[]
}
export interface SearchDirectGenerator {
field: Field
max_edits?: integer
@ -1440,10 +1479,10 @@ export interface SearchHit<TDocument = unknown> {
fields?: Record<string, any>
highlight?: Record<string, string[]>
inner_hits?: Record<string, SearchInnerHitsResult>
matched_queries?: string[] | Record<string, double[]>
matched_queries?: string[] | Record<string, double>
_nested?: SearchNestedIdentity
_ignored?: string[]
ignored_field_values?: Record<string, string[]>
ignored_field_values?: Record<string, FieldValue[]>
_shard?: string
_node?: string
_routing?: string
@ -1484,6 +1523,47 @@ export interface SearchInnerHitsResult {
hits: SearchHitsMetadata<any>
}
export interface SearchKnnCollectorResult {
name: string
reason: string
time?: Duration
time_in_nanos: DurationValue<UnitNanos>
children?: SearchKnnCollectorResult[]
}
export interface SearchKnnQueryProfileBreakdown {
advance: long
advance_count: long
build_scorer: long
build_scorer_count: long
compute_max_score: long
compute_max_score_count: long
count_weight: long
count_weight_count: long
create_weight: long
create_weight_count: long
match: long
match_count: long
next_doc: long
next_doc_count: long
score: long
score_count: long
set_min_competitive_score: long
set_min_competitive_score_count: long
shallow_advance: long
shallow_advance_count: long
}
export interface SearchKnnQueryProfileResult {
type: string
description: string
time?: Duration
time_in_nanos: DurationValue<UnitNanos>
breakdown: SearchKnnQueryProfileBreakdown
debug?: Record<string, any>
children?: SearchKnnQueryProfileResult[]
}
export interface SearchLaplaceSmoothingModel {
alpha: double
}
@ -1574,6 +1654,8 @@ export interface SearchQueryBreakdown {
score_count: long
compute_max_score: long
compute_max_score_count: long
count_weight: long
count_weight_count: long
set_min_competitive_score: long
set_min_competitive_score_count: long
}
@ -1614,9 +1696,14 @@ export interface SearchSearchProfile {
export interface SearchShardProfile {
aggregations: SearchAggregationProfile[]
id: string
searches: SearchSearchProfile[]
cluster: string
dfs?: SearchDfsProfile
fetch?: SearchFetchProfile
id: string
index: IndexName
node_id: NodeId
searches: SearchSearchProfile[]
shard_id: long
}
export interface SearchSmoothingModelContainer {
@ -1751,11 +1838,23 @@ export interface SearchShardsRequest extends RequestBase {
}
export interface SearchShardsResponse {
nodes: Record<string, NodeAttributes>
nodes: Record<NodeId, SearchShardsSearchShardsNodeAttributes>
shards: NodeShard[][]
indices: Record<IndexName, SearchShardsShardStoreIndex>
}
export interface SearchShardsSearchShardsNodeAttributes {
name: NodeName
ephemeral_id: Id
transport_address: TransportAddress
external_id: string
attributes: Record<string, string>
roles: NodeRoles
version: VersionString
min_index_version: integer
max_index_version: integer
}
export interface SearchShardsShardStoreIndex {
aliases?: Name[]
filter?: QueryDslQueryContainer
@ -2243,6 +2342,8 @@ export interface GetStats {
total: long
}
export type GrokPattern = string
export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED'
export type Host = string
@ -2397,8 +2498,6 @@ export interface NodeAttributes {
id?: NodeId
name: NodeName
transport_address: TransportAddress
roles?: NodeRoles
external_id?: string
}
export type NodeId = string
@ -2848,7 +2947,7 @@ export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMulti
export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys
& { [property: string]: AggregationsAggregate | string | long }
export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate
export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate
export interface AggregationsAggregateBase {
meta?: Metadata
@ -2922,6 +3021,7 @@ export interface AggregationsAggregationContainer {
rare_terms?: AggregationsRareTermsAggregation
rate?: AggregationsRateAggregation
reverse_nested?: AggregationsReverseNestedAggregation
random_sampler?: AggregationsRandomSamplerAggregation
sampler?: AggregationsSamplerAggregation
scripted_metric?: AggregationsScriptedMetricAggregation
serial_diff?: AggregationsSerialDifferencingAggregation
@ -2933,6 +3033,7 @@ export interface AggregationsAggregationContainer {
sum?: AggregationsSumAggregation
sum_bucket?: AggregationsSumBucketAggregation
terms?: AggregationsTermsAggregation
time_series?: AggregationsTimeSeriesAggregation
top_hits?: AggregationsTopHitsAggregation
t_test?: AggregationsTTestAggregation
top_metrics?: AggregationsTopMetricsAggregation
@ -2942,9 +3043,9 @@ export interface AggregationsAggregationContainer {
}
export interface AggregationsAggregationRange {
from?: double
from?: double | null
key?: string
to?: double
to?: double | null
}
export interface AggregationsArrayPercentilesItem {
@ -3808,6 +3909,12 @@ export interface AggregationsPipelineAggregationBase extends AggregationsBucketP
gap_policy?: AggregationsGapPolicy
}
export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase {
probability: double
seed?: integer
shard_seed?: integer
}
export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggregateBase<AggregationsRangeBucket> {
}
@ -4129,6 +4236,20 @@ export interface AggregationsTestPopulation {
filter?: QueryDslQueryContainer
}
export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucketAggregateBase<AggregationsTimeSeriesBucket> {
}
export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase {
size?: integer
keyed?: boolean
}
export interface AggregationsTimeSeriesBucketKeys extends AggregationsMultiBucketBase {
key: Record<Field, FieldValue>
}
export type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys
& { [property: string]: AggregationsAggregate | Record<Field, FieldValue> | long }
export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase {
hits: SearchHitsMetadata<any>
}
@ -4309,6 +4430,11 @@ export interface AnalysisCjkAnalyzer {
stopwords_path?: string
}
export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase {
type: 'classic'
max_token_length?: integer
}
export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase {
type: 'common_grams'
common_words?: string[]
@ -4395,7 +4521,7 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase {
custom_token_chars?: string
max_gram: integer
min_gram: integer
token_chars: AnalysisTokenChar[]
token_chars?: AnalysisTokenChar[]
}
export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase {
@ -4610,14 +4736,14 @@ export interface AnalysisKeywordAnalyzer {
export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase {
type: 'keyword_marker'
ignore_case?: boolean
keywords?: string[]
keywords?: string | string[]
keywords_path?: string
keywords_pattern?: string
}
export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase {
type: 'keyword'
buffer_size: integer
buffer_size?: integer
}
export interface AnalysisKuromojiAnalyzer {
@ -4738,7 +4864,7 @@ export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase {
custom_token_chars?: string
max_gram: integer
min_gram: integer
token_chars: AnalysisTokenChar[]
token_chars?: AnalysisTokenChar[]
}
export interface AnalysisNoriAnalyzer {
@ -4903,6 +5029,16 @@ export interface AnalysisSimpleAnalyzer {
version?: VersionString
}
export interface AnalysisSimplePatternSplitTokenizer extends AnalysisTokenizerBase {
type: 'simple_pattern_split'
pattern?: string
}
export interface AnalysisSimplePatternTokenizer extends AnalysisTokenizerBase {
type: 'simple_pattern'
pattern?: string
}
export interface AnalysisSnowballAnalyzer {
type: 'snowball'
version?: VersionString
@ -5010,6 +5146,10 @@ export interface AnalysisThaiAnalyzer {
stopwords_path?: string
}
export interface AnalysisThaiTokenizer extends AnalysisTokenizerBase {
type: 'thai'
}
export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom'
export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition
@ -5026,7 +5166,7 @@ export interface AnalysisTokenizerBase {
version?: VersionString
}
export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer
export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisClassicTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisPathHierarchyTokenizer | AnalysisPatternTokenizer | AnalysisSimplePatternTokenizer | AnalysisSimplePatternSplitTokenizer | AnalysisStandardTokenizer | AnalysisThaiTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisIcuTokenizer | AnalysisKuromojiTokenizer | AnalysisNoriTokenizer
export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase {
type: 'trim'
@ -5151,6 +5291,10 @@ export interface MappingCompletionProperty extends MappingDocValuesPropertyBase
type: 'completion'
}
export interface MappingCompositeSubField {
type: MappingRuntimeFieldType
}
export interface MappingConstantKeywordProperty extends MappingPropertyBase {
value?: any
type: 'constant_keyword'
@ -5158,7 +5302,6 @@ export interface MappingConstantKeywordProperty extends MappingPropertyBase {
export interface MappingCorePropertyBase extends MappingPropertyBase {
copy_to?: Fields
similarity?: string
store?: boolean
}
@ -5239,7 +5382,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase {
index?: boolean
index_options?: MappingIndexOptions
index_phrases?: boolean
index_prefixes?: MappingTextIndexPrefixes
index_prefixes?: MappingTextIndexPrefixes | null
norms?: boolean
position_increment_gap?: integer
search_analyzer?: string
@ -5399,6 +5542,7 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase {
normalizer?: string
norms?: boolean
null_value?: string
similarity?: string | null
split_queries_on_whitespace?: boolean
time_series_dimension?: boolean
type: 'keyword'
@ -5494,6 +5638,7 @@ export interface MappingRoutingField {
}
export interface MappingRuntimeField {
fields?: Record<string, MappingCompositeSubField>
fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[]
format?: string
input_field?: Field
@ -5526,6 +5671,7 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase
norms?: boolean
search_analyzer?: string
search_quote_analyzer?: string
similarity?: string | null
term_vector?: MappingTermVectorOption
type: 'search_as_you_type'
}
@ -5591,11 +5737,12 @@ export interface MappingTextProperty extends MappingCorePropertyBase {
index?: boolean
index_options?: MappingIndexOptions
index_phrases?: boolean
index_prefixes?: MappingTextIndexPrefixes
index_prefixes?: MappingTextIndexPrefixes | null
norms?: boolean
position_increment_gap?: integer
search_analyzer?: string
search_quote_analyzer?: string
similarity?: string | null
term_vector?: MappingTermVectorOption
type: 'text'
}
@ -6389,9 +6536,10 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys
export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup
export interface QueryDslTermsSetQuery extends QueryDslQueryBase {
minimum_should_match?: MinimumShouldMatch
minimum_should_match_field?: Field
minimum_should_match_script?: Script | string
terms: string[]
terms: FieldValue[]
}
export interface QueryDslTextExpansionQuery extends QueryDslQueryBase {
@ -6684,39 +6832,39 @@ export interface CatAliasesRequest extends CatCatRequestBase {
export type CatAliasesResponse = CatAliasesAliasesRecord[]
export interface CatAllocationAllocationRecord {
shards: string
s: string
'shards.undesired': string | null
'write_load.forecast': double | null
wlf: double | null
writeLoadForecast: double | null
'disk.indices.forecast': ByteSize | null
dif: ByteSize | null
diskIndicesForecast: ByteSize | null
'disk.indices': ByteSize | null
di: ByteSize | null
diskIndices: ByteSize | null
'disk.used': ByteSize | null
du: ByteSize | null
diskUsed: ByteSize | null
'disk.avail': ByteSize | null
da: ByteSize | null
diskAvail: ByteSize | null
'disk.total': ByteSize | null
dt: ByteSize | null
diskTotal: ByteSize | null
'disk.percent': Percentage | null
dp: Percentage | null
diskPercent: Percentage | null
host: Host | null
h: Host | null
ip: Ip | null
node: string
n: string
'node.role': string | null
r: string | null
role: string | null
nodeRole: string | null
shards?: string
s?: string
'shards.undesired'?: string | null
'write_load.forecast'?: SpecUtilsStringified<double> | null
wlf?: SpecUtilsStringified<double> | null
writeLoadForecast?: SpecUtilsStringified<double> | null
'disk.indices.forecast'?: ByteSize | null
dif?: ByteSize | null
diskIndicesForecast?: ByteSize | null
'disk.indices'?: ByteSize | null
di?: ByteSize | null
diskIndices?: ByteSize | null
'disk.used'?: ByteSize | null
du?: ByteSize | null
diskUsed?: ByteSize | null
'disk.avail'?: ByteSize | null
da?: ByteSize | null
diskAvail?: ByteSize | null
'disk.total'?: ByteSize | null
dt?: ByteSize | null
diskTotal?: ByteSize | null
'disk.percent'?: Percentage | null
dp?: Percentage | null
diskPercent?: Percentage | null
host?: Host | null
h?: Host | null
ip?: Ip | null
node?: string
n?: string
'node.role'?: string | null
r?: string | null
role?: string | null
nodeRole?: string | null
}
export interface CatAllocationRequest extends CatCatRequestBase {
@ -6815,6 +6963,10 @@ export interface CatHealthHealthRecord {
i?: string
'shards.initializing'?: string
shardsInitializing?: string
'unassign.pri'?: string
up?: string
'shards.unassigned.primary'?: string
shardsUnassignedPrimary?: string
unassign?: string
u?: string
'shards.unassigned'?: string
@ -6878,6 +7030,7 @@ export interface CatIndicesIndicesRecord {
ss?: string | null
storeSize?: string | null
'pri.store.size'?: string | null
'dataset.size'?: string | null
'completion.size'?: string
cs?: string
completionSize?: string
@ -7995,6 +8148,7 @@ export interface CatShardsShardsRecord {
dc?: string | null
store?: string | null
sto?: string | null
dataset?: string | null
ip?: string | null
id?: string
node?: string | null
@ -8717,6 +8871,7 @@ export interface ClusterAllocationExplainClusterInfo {
export interface ClusterAllocationExplainCurrentNode {
id: Id
name: Name
roles: NodeRoles
attributes: Record<string, string>
transport_address: TransportAddress
weight_ranking: integer
@ -8739,6 +8894,7 @@ export interface ClusterAllocationExplainNodeAllocationExplanation {
node_decision: ClusterAllocationExplainDecision
node_id: Id
node_name: Name
roles: NodeRoles
store?: ClusterAllocationExplainAllocationStore
transport_address: TransportAddress
weight_ranking: integer
@ -8870,6 +9026,7 @@ export interface ClusterHealthHealthResponseBody {
task_max_waiting_in_queue?: Duration
task_max_waiting_in_queue_millis: DurationValue<UnitMillis>
timed_out: boolean
unassigned_primary_shards: integer
unassigned_shards: integer
}
@ -8883,6 +9040,7 @@ export interface ClusterHealthIndexHealthStats {
shards?: Record<string, ClusterHealthShardHealthStats>
status: HealthStatus
unassigned_shards: integer
unassigned_primary_shards: integer
}
export interface ClusterHealthRequest extends RequestBase {
@ -8909,6 +9067,7 @@ export interface ClusterHealthShardHealthStats {
relocating_shards: integer
status: HealthStatus
unassigned_shards: integer
unassigned_primary_shards: integer
}
export interface ClusterInfoRequest extends RequestBase {
@ -9916,8 +10075,11 @@ export interface EnrichStatsCacheStats {
node_id: Id
count: integer
hits: integer
hits_time_in_millis: DurationValue<UnitMillis>
misses: integer
misses_time_in_millis: DurationValue<UnitMillis>
evictions: integer
size_in_bytes: long
}
export interface EnrichStatsCoordinatorStats {
@ -10037,8 +10199,10 @@ export type EsqlTableValuesLongDouble = double | double[]
export type EsqlTableValuesLongValue = long | long[]
export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow'
export interface EsqlQueryRequest extends RequestBase {
format?: string
format?: EsqlQueryEsqlFormat
delimiter?: string
drop_null_columns?: boolean
columnar?: boolean
@ -11446,6 +11610,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase {
expand_wildcards?: ExpandWildcards
include_defaults?: boolean
master_timeout?: Duration
verbose?: boolean
}
export interface IndicesGetDataStreamResponse {
@ -11830,6 +11995,8 @@ export type IndicesResolveClusterResponse = Record<ClusterAlias, IndicesResolveC
export interface IndicesResolveIndexRequest extends RequestBase {
name: Names
expand_wildcards?: ExpandWildcards
ignore_unavailable?: boolean
allow_no_indices?: boolean
}
export interface IndicesResolveIndexResolveIndexAliasItem {
@ -12397,7 +12564,7 @@ export type InferencePutResponse = InferenceInferenceEndpointInfo
export interface IngestAppendProcessor extends IngestProcessorBase {
field: Field
value: any[]
value: any | any[]
allow_duplicates?: boolean
}
@ -12477,6 +12644,7 @@ export interface IngestDissectProcessor extends IngestProcessorBase {
export interface IngestDotExpanderProcessor extends IngestProcessorBase {
field: Field
override?: boolean
path?: string
}
@ -12503,6 +12671,22 @@ export interface IngestForeachProcessor extends IngestProcessorBase {
processor: IngestProcessorContainer
}
export interface IngestGeoGridProcessor extends IngestProcessorBase {
field: string
tile_type: IngestGeoGridTileType
target_field?: Field
parent_field?: Field
children_field?: Field
non_children_field?: Field
precision_field?: Field
ignore_missing?: boolean
target_format?: IngestGeoGridTargetFormat
}
export type IngestGeoGridTargetFormat = 'geojson' | 'wkt'
export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash'
export interface IngestGeoIpProcessor extends IngestProcessorBase {
database_file?: string
field: Field
@ -12510,13 +12694,14 @@ export interface IngestGeoIpProcessor extends IngestProcessorBase {
ignore_missing?: boolean
properties?: string[]
target_field?: Field
download_database_on_pipeline_creation?: boolean
}
export interface IngestGrokProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
pattern_definitions?: Record<string, string>
patterns: string[]
patterns: GrokPattern[]
trace_match?: boolean
}
@ -12604,6 +12789,7 @@ export interface IngestPipeline {
on_failure?: IngestProcessorContainer[]
processors?: IngestProcessorContainer[]
version?: VersionNumber
deprecated?: boolean
_meta?: Metadata
}
@ -12641,6 +12827,7 @@ export interface IngestProcessorContainer {
enrich?: IngestEnrichProcessor
fail?: IngestFailProcessor
foreach?: IngestForeachProcessor
geo_grid?: IngestGeoGridProcessor
geoip?: IngestGeoIpProcessor
grok?: IngestGrokProcessor
gsub?: IngestGsubProcessor
@ -12651,6 +12838,7 @@ export interface IngestProcessorContainer {
kv?: IngestKeyValueProcessor
lowercase?: IngestLowercaseProcessor
pipeline?: IngestPipelineProcessor
redact?: IngestRedactProcessor
remove?: IngestRemoveProcessor
rename?: IngestRenameProcessor
reroute?: IngestRerouteProcessor
@ -12666,6 +12854,16 @@ export interface IngestProcessorContainer {
user_agent?: IngestUserAgentProcessor
}
export interface IngestRedactProcessor extends IngestProcessorBase {
field: Field
patterns: GrokPattern[]
pattern_definitions?: Record<string, string>
prefix?: string
suffix?: string
ignore_missing?: boolean
skip_if_unlicensed?: boolean
}
export interface IngestRemoveProcessor extends IngestProcessorBase {
field: Fields
keep?: Fields
@ -12750,12 +12948,13 @@ export interface IngestUrlDecodeProcessor extends IngestProcessorBase {
export interface IngestUserAgentProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
options?: IngestUserAgentProperty[]
regex_file?: string
target_field?: Field
properties?: IngestUserAgentProperty[]
extract_device_type?: boolean
}
export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD'
export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version'
export interface IngestDeleteGeoipDatabaseRequest extends RequestBase {
id: Ids
@ -12850,6 +13049,7 @@ export interface IngestPutPipelineRequest extends RequestBase {
on_failure?: IngestProcessorContainer[]
processors?: IngestProcessorContainer[]
version?: VersionNumber
deprecated?: boolean
}
export type IngestPutPipelineResponse = AcknowledgedResponseBase
@ -13238,6 +13438,9 @@ export interface MlCalendarEvent {
description: string
end_time: DateTime
start_time: DateTime
skip_result?: boolean
skip_model_update?: boolean
force_time_shift?: integer
}
export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition
@ -13253,7 +13456,7 @@ export type MlCategorizationStatus = 'ok' | 'warn'
export interface MlCategory {
category_id: ulong
examples: string[]
grok_pattern?: string
grok_pattern?: GrokPattern
job_id: Id
max_matching_length: ulong
partition_field_name?: string
@ -13642,9 +13845,7 @@ export interface MlDelayedDataCheckConfig {
export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated'
export type MlDeploymentAssignmentState = 'starting' | 'started' | 'stopping' | 'failed'
export type MlDeploymentState = 'started' | 'starting' | 'stopping'
export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed'
export interface MlDetectionRule {
actions?: MlRuleAction[]
@ -14234,7 +14435,7 @@ export interface MlTrainedModelDeploymentStats {
rejected_execution_count: integer
reason: string
start_time: EpochTime<UnitMillis>
state: MlDeploymentState
state: MlDeploymentAssignmentState
threads_per_allocation: integer
timeout_count: integer
}
@ -15736,6 +15937,25 @@ export interface NodesHttp {
current_open?: integer
total_opened?: long
clients?: NodesClient[]
routes: Record<string, NodesHttpRoute>
}
export interface NodesHttpRoute {
requests: NodesHttpRouteRequests
responses: NodesHttpRouteResponses
}
export interface NodesHttpRouteRequests {
count: long
total_size_in_bytes: long
size_histogram: NodesSizeHttpHistogram[]
}
export interface NodesHttpRouteResponses {
count: long
total_size_in_bytes: long
handling_time_histogram: NodesTimeHttpHistogram[]
size_histogram: NodesSizeHttpHistogram[]
}
export interface NodesIndexingPressure {
@ -15750,16 +15970,25 @@ export interface NodesIndexingPressureMemory {
}
export interface NodesIngest {
pipelines?: Record<string, NodesIngestTotal>
pipelines?: Record<string, NodesIngestStats>
total?: NodesIngestTotal
}
export interface NodesIngestStats {
count: long
current: long
failed: long
processors: Record<string, NodesKeyedProcessor>[]
time_in_millis: DurationValue<UnitMillis>
ingested_as_first_pipeline_in_bytes: long
produced_as_first_pipeline_in_bytes: long
}
export interface NodesIngestTotal {
count?: long
current?: long
failed?: long
processors?: Record<string, NodesKeyedProcessor>[]
time_in_millis?: DurationValue<UnitMillis>
count: long
current: long
failed: long
time_in_millis: DurationValue<UnitMillis>
}
export interface NodesIoStatDevice {
@ -15964,6 +16193,12 @@ export interface NodesSerializedClusterStateDetail {
compressed_size_in_bytes?: long
}
export interface NodesSizeHttpHistogram {
count: long
ge_bytes?: long
lt_bytes?: long
}
export interface NodesStats {
adaptive_selection?: Record<string, NodesAdaptiveSelection>
breakers?: Record<string, NodesBreaker>
@ -15998,6 +16233,12 @@ export interface NodesThreadCount {
threads?: long
}
export interface NodesTimeHttpHistogram {
count: long
ge_millis?: long
lt_millis?: long
}
export interface NodesTransport {
inbound_handling_time_histogram?: NodesTransportHistogram[]
outbound_handling_time_histogram?: NodesTransportHistogram[]
@ -16986,6 +17227,15 @@ export interface SecurityRealmInfo {
type: string
}
export interface SecurityRemoteIndicesPrivileges {
clusters: Names
field_security?: SecurityFieldSecurity
names: Indices
privileges: SecurityIndexPrivilege[]
query?: SecurityIndicesPrivilegesQuery
allow_restricted_indices?: boolean
}
export interface SecurityRoleDescriptor {
cluster?: SecurityClusterPrivilege[]
indices?: SecurityIndicesPrivileges[]
@ -17638,6 +17888,7 @@ export interface SecurityPutRoleRequest extends RequestBase {
cluster?: SecurityClusterPrivilege[]
global?: Record<string, any>
indices?: SecurityIndicesPrivileges[]
remote_indices?: SecurityRemoteIndicesPrivileges[]
metadata?: Metadata
run_as?: string[]
description?: string
@ -18263,6 +18514,7 @@ export interface SnapshotSnapshotShardFailure {
node_id?: Id
reason: string
shard_id: Id
index_uuid: Id
status: string
}
@ -18413,6 +18665,20 @@ export interface SnapshotGetRepositoryRequest extends RequestBase {
export type SnapshotGetRepositoryResponse = Record<string, SnapshotRepository>
export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase {
name: Names
meta_thread_pool_concurrency?: integer
blob_thread_pool_concurrency?: integer
snapshot_verification_concurrency?: integer
index_verification_concurrency?: integer
index_snapshot_verification_concurrency?: integer
max_failed_shard_snapshots?: integer
verify_blob_contents?: boolean
max_bytes_per_sec?: string
}
export type SnapshotRepositoryVerifyIntegrityResponse = any
export interface SnapshotRestoreRequest extends RequestBase {
repository: Name
snapshot: Name
@ -18517,7 +18783,7 @@ export interface SqlGetAsyncStatusResponse {
}
export interface SqlQueryRequest extends RequestBase {
format?: string
format?: SqlQuerySqlFormat
catalog?: string
columnar?: boolean
cursor?: string
@ -18545,6 +18811,8 @@ export interface SqlQueryResponse {
rows: SqlRow[]
}
export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile'
export interface SqlTranslateRequest extends RequestBase {
fetch_size?: integer
filter?: QueryDslQueryContainer
@ -18754,7 +19022,7 @@ export interface TextStructureFindStructureRequest<TJsonDocument = unknown> {
ecs_compatibility?: string
explain?: boolean
format?: string
grok_pattern?: string
grok_pattern?: GrokPattern
has_header_row?: boolean
line_merge_size_limit?: uint
lines_to_sample?: uint
@ -18781,7 +19049,7 @@ export interface TextStructureFindStructureResponse {
num_lines_analyzed: integer
column_names?: string[]
explanation?: string[]
grok_pattern?: string
grok_pattern?: GrokPattern
multiline_start_pattern?: string
exclude_lines_pattern?: string
java_timestamp_formats?: string[]
@ -18809,7 +19077,7 @@ export interface TextStructureTestGrokPatternMatchedText {
export interface TextStructureTestGrokPatternRequest extends RequestBase {
ecs_compatibility?: string
grok_pattern: string
grok_pattern: GrokPattern
text: string[]
}
@ -19832,7 +20100,7 @@ export interface XpackInfoNativeCodeInformation {
}
export interface XpackInfoRequest extends RequestBase {
categories?: string[]
categories?: XpackInfoXPackCategory[]
accept_enterprise?: boolean
human?: boolean
}
@ -19844,6 +20112,8 @@ export interface XpackInfoResponse {
tagline: string
}
export type XpackInfoXPackCategory = 'build' | 'features' | 'license'
export interface XpackUsageAnalytics extends XpackUsageBase {
stats: XpackUsageAnalyticsStatistics
}

View File

@ -917,9 +917,14 @@ export interface OpenPointInTimeRequest extends RequestBase {
preference?: string
routing?: Routing
expand_wildcards?: ExpandWildcards
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
index_filter?: QueryDslQueryContainer
}
}
export interface OpenPointInTimeResponse {
_shards: ShardStatistics
id: Id
}
@ -1336,6 +1341,10 @@ export interface SearchAggregationProfileDebug {
segments_counted?: integer
segments_collected?: integer
map_reducer?: string
brute_force_used?: integer
dynamic_pruning_attempted?: integer
dynamic_pruning_used?: integer
skipped_due_to_no_data?: integer
}
export interface SearchAggregationProfileDelegateDebugFilter {
@ -1388,6 +1397,39 @@ export interface SearchCompletionSuggester extends SearchSuggesterBase {
export type SearchContext = string | GeoLocation
export interface SearchDfsKnnProfile {
vector_operations_count?: long
query: SearchKnnQueryProfileResult[]
rewrite_time: long
collector: SearchKnnCollectorResult[]
}
export interface SearchDfsProfile {
statistics?: SearchDfsStatisticsProfile
knn?: SearchDfsKnnProfile[]
}
export interface SearchDfsStatisticsBreakdown {
collection_statistics: long
collection_statistics_count: long
create_weight: long
create_weight_count: long
rewrite: long
rewrite_count: long
term_statistics: long
term_statistics_count: long
}
export interface SearchDfsStatisticsProfile {
type: string
description: string
time?: Duration
time_in_nanos: DurationValue<UnitNanos>
breakdown: SearchDfsStatisticsBreakdown
debug?: Record<string, any>
children?: SearchDfsStatisticsProfile[]
}
export interface SearchDirectGenerator {
field: Field
max_edits?: integer
@ -1495,10 +1537,10 @@ export interface SearchHit<TDocument = unknown> {
fields?: Record<string, any>
highlight?: Record<string, string[]>
inner_hits?: Record<string, SearchInnerHitsResult>
matched_queries?: string[] | Record<string, double[]>
matched_queries?: string[] | Record<string, double>
_nested?: SearchNestedIdentity
_ignored?: string[]
ignored_field_values?: Record<string, string[]>
ignored_field_values?: Record<string, FieldValue[]>
_shard?: string
_node?: string
_routing?: string
@ -1539,6 +1581,47 @@ export interface SearchInnerHitsResult {
hits: SearchHitsMetadata<any>
}
export interface SearchKnnCollectorResult {
name: string
reason: string
time?: Duration
time_in_nanos: DurationValue<UnitNanos>
children?: SearchKnnCollectorResult[]
}
export interface SearchKnnQueryProfileBreakdown {
advance: long
advance_count: long
build_scorer: long
build_scorer_count: long
compute_max_score: long
compute_max_score_count: long
count_weight: long
count_weight_count: long
create_weight: long
create_weight_count: long
match: long
match_count: long
next_doc: long
next_doc_count: long
score: long
score_count: long
set_min_competitive_score: long
set_min_competitive_score_count: long
shallow_advance: long
shallow_advance_count: long
}
export interface SearchKnnQueryProfileResult {
type: string
description: string
time?: Duration
time_in_nanos: DurationValue<UnitNanos>
breakdown: SearchKnnQueryProfileBreakdown
debug?: Record<string, any>
children?: SearchKnnQueryProfileResult[]
}
export interface SearchLaplaceSmoothingModel {
alpha: double
}
@ -1629,6 +1712,8 @@ export interface SearchQueryBreakdown {
score_count: long
compute_max_score: long
compute_max_score_count: long
count_weight: long
count_weight_count: long
set_min_competitive_score: long
set_min_competitive_score_count: long
}
@ -1669,9 +1754,14 @@ export interface SearchSearchProfile {
export interface SearchShardProfile {
aggregations: SearchAggregationProfile[]
id: string
searches: SearchSearchProfile[]
cluster: string
dfs?: SearchDfsProfile
fetch?: SearchFetchProfile
id: string
index: IndexName
node_id: NodeId
searches: SearchSearchProfile[]
shard_id: long
}
export interface SearchSmoothingModelContainer {
@ -1809,11 +1899,23 @@ export interface SearchShardsRequest extends RequestBase {
}
export interface SearchShardsResponse {
nodes: Record<string, NodeAttributes>
nodes: Record<NodeId, SearchShardsSearchShardsNodeAttributes>
shards: NodeShard[][]
indices: Record<IndexName, SearchShardsShardStoreIndex>
}
export interface SearchShardsSearchShardsNodeAttributes {
name: NodeName
ephemeral_id: Id
transport_address: TransportAddress
external_id: string
attributes: Record<string, string>
roles: NodeRoles
version: VersionString
min_index_version: integer
max_index_version: integer
}
export interface SearchShardsShardStoreIndex {
aliases?: Name[]
filter?: QueryDslQueryContainer
@ -2316,6 +2418,8 @@ export interface GetStats {
total: long
}
export type GrokPattern = string
export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED'
export type Host = string
@ -2470,8 +2574,6 @@ export interface NodeAttributes {
id?: NodeId
name: NodeName
transport_address: TransportAddress
roles?: NodeRoles
external_id?: string
}
export type NodeId = string
@ -2921,7 +3023,7 @@ export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMulti
export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys
& { [property: string]: AggregationsAggregate | string | long }
export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate
export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate
export interface AggregationsAggregateBase {
meta?: Metadata
@ -2995,6 +3097,7 @@ export interface AggregationsAggregationContainer {
rare_terms?: AggregationsRareTermsAggregation
rate?: AggregationsRateAggregation
reverse_nested?: AggregationsReverseNestedAggregation
random_sampler?: AggregationsRandomSamplerAggregation
sampler?: AggregationsSamplerAggregation
scripted_metric?: AggregationsScriptedMetricAggregation
serial_diff?: AggregationsSerialDifferencingAggregation
@ -3006,6 +3109,7 @@ export interface AggregationsAggregationContainer {
sum?: AggregationsSumAggregation
sum_bucket?: AggregationsSumBucketAggregation
terms?: AggregationsTermsAggregation
time_series?: AggregationsTimeSeriesAggregation
top_hits?: AggregationsTopHitsAggregation
t_test?: AggregationsTTestAggregation
top_metrics?: AggregationsTopMetricsAggregation
@ -3015,9 +3119,9 @@ export interface AggregationsAggregationContainer {
}
export interface AggregationsAggregationRange {
from?: double
from?: double | null
key?: string
to?: double
to?: double | null
}
export interface AggregationsArrayPercentilesItem {
@ -3881,6 +3985,12 @@ export interface AggregationsPipelineAggregationBase extends AggregationsBucketP
gap_policy?: AggregationsGapPolicy
}
export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase {
probability: double
seed?: integer
shard_seed?: integer
}
export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggregateBase<AggregationsRangeBucket> {
}
@ -4202,6 +4312,20 @@ export interface AggregationsTestPopulation {
filter?: QueryDslQueryContainer
}
export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucketAggregateBase<AggregationsTimeSeriesBucket> {
}
export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase {
size?: integer
keyed?: boolean
}
export interface AggregationsTimeSeriesBucketKeys extends AggregationsMultiBucketBase {
key: Record<Field, FieldValue>
}
export type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys
& { [property: string]: AggregationsAggregate | Record<Field, FieldValue> | long }
export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase {
hits: SearchHitsMetadata<any>
}
@ -4382,6 +4506,11 @@ export interface AnalysisCjkAnalyzer {
stopwords_path?: string
}
export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase {
type: 'classic'
max_token_length?: integer
}
export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase {
type: 'common_grams'
common_words?: string[]
@ -4468,7 +4597,7 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase {
custom_token_chars?: string
max_gram: integer
min_gram: integer
token_chars: AnalysisTokenChar[]
token_chars?: AnalysisTokenChar[]
}
export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase {
@ -4683,14 +4812,14 @@ export interface AnalysisKeywordAnalyzer {
export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase {
type: 'keyword_marker'
ignore_case?: boolean
keywords?: string[]
keywords?: string | string[]
keywords_path?: string
keywords_pattern?: string
}
export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase {
type: 'keyword'
buffer_size: integer
buffer_size?: integer
}
export interface AnalysisKuromojiAnalyzer {
@ -4811,7 +4940,7 @@ export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase {
custom_token_chars?: string
max_gram: integer
min_gram: integer
token_chars: AnalysisTokenChar[]
token_chars?: AnalysisTokenChar[]
}
export interface AnalysisNoriAnalyzer {
@ -4976,6 +5105,16 @@ export interface AnalysisSimpleAnalyzer {
version?: VersionString
}
export interface AnalysisSimplePatternSplitTokenizer extends AnalysisTokenizerBase {
type: 'simple_pattern_split'
pattern?: string
}
export interface AnalysisSimplePatternTokenizer extends AnalysisTokenizerBase {
type: 'simple_pattern'
pattern?: string
}
export interface AnalysisSnowballAnalyzer {
type: 'snowball'
version?: VersionString
@ -5083,6 +5222,10 @@ export interface AnalysisThaiAnalyzer {
stopwords_path?: string
}
export interface AnalysisThaiTokenizer extends AnalysisTokenizerBase {
type: 'thai'
}
export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom'
export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition
@ -5099,7 +5242,7 @@ export interface AnalysisTokenizerBase {
version?: VersionString
}
export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer
export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisClassicTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisPathHierarchyTokenizer | AnalysisPatternTokenizer | AnalysisSimplePatternTokenizer | AnalysisSimplePatternSplitTokenizer | AnalysisStandardTokenizer | AnalysisThaiTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisIcuTokenizer | AnalysisKuromojiTokenizer | AnalysisNoriTokenizer
export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase {
type: 'trim'
@ -5224,6 +5367,10 @@ export interface MappingCompletionProperty extends MappingDocValuesPropertyBase
type: 'completion'
}
export interface MappingCompositeSubField {
type: MappingRuntimeFieldType
}
export interface MappingConstantKeywordProperty extends MappingPropertyBase {
value?: any
type: 'constant_keyword'
@ -5231,7 +5378,6 @@ export interface MappingConstantKeywordProperty extends MappingPropertyBase {
export interface MappingCorePropertyBase extends MappingPropertyBase {
copy_to?: Fields
similarity?: string
store?: boolean
}
@ -5312,7 +5458,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase {
index?: boolean
index_options?: MappingIndexOptions
index_phrases?: boolean
index_prefixes?: MappingTextIndexPrefixes
index_prefixes?: MappingTextIndexPrefixes | null
norms?: boolean
position_increment_gap?: integer
search_analyzer?: string
@ -5472,6 +5618,7 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase {
normalizer?: string
norms?: boolean
null_value?: string
similarity?: string | null
split_queries_on_whitespace?: boolean
time_series_dimension?: boolean
type: 'keyword'
@ -5567,6 +5714,7 @@ export interface MappingRoutingField {
}
export interface MappingRuntimeField {
fields?: Record<string, MappingCompositeSubField>
fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[]
format?: string
input_field?: Field
@ -5599,6 +5747,7 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase
norms?: boolean
search_analyzer?: string
search_quote_analyzer?: string
similarity?: string | null
term_vector?: MappingTermVectorOption
type: 'search_as_you_type'
}
@ -5664,11 +5813,12 @@ export interface MappingTextProperty extends MappingCorePropertyBase {
index?: boolean
index_options?: MappingIndexOptions
index_phrases?: boolean
index_prefixes?: MappingTextIndexPrefixes
index_prefixes?: MappingTextIndexPrefixes | null
norms?: boolean
position_increment_gap?: integer
search_analyzer?: string
search_quote_analyzer?: string
similarity?: string | null
term_vector?: MappingTermVectorOption
type: 'text'
}
@ -6462,9 +6612,10 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys
export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup
export interface QueryDslTermsSetQuery extends QueryDslQueryBase {
minimum_should_match?: MinimumShouldMatch
minimum_should_match_field?: Field
minimum_should_match_script?: Script | string
terms: string[]
terms: FieldValue[]
}
export interface QueryDslTextExpansionQuery extends QueryDslQueryBase {
@ -6761,39 +6912,39 @@ export interface CatAliasesRequest extends CatCatRequestBase {
export type CatAliasesResponse = CatAliasesAliasesRecord[]
export interface CatAllocationAllocationRecord {
shards: string
s: string
'shards.undesired': string | null
'write_load.forecast': double | null
wlf: double | null
writeLoadForecast: double | null
'disk.indices.forecast': ByteSize | null
dif: ByteSize | null
diskIndicesForecast: ByteSize | null
'disk.indices': ByteSize | null
di: ByteSize | null
diskIndices: ByteSize | null
'disk.used': ByteSize | null
du: ByteSize | null
diskUsed: ByteSize | null
'disk.avail': ByteSize | null
da: ByteSize | null
diskAvail: ByteSize | null
'disk.total': ByteSize | null
dt: ByteSize | null
diskTotal: ByteSize | null
'disk.percent': Percentage | null
dp: Percentage | null
diskPercent: Percentage | null
host: Host | null
h: Host | null
ip: Ip | null
node: string
n: string
'node.role': string | null
r: string | null
role: string | null
nodeRole: string | null
shards?: string
s?: string
'shards.undesired'?: string | null
'write_load.forecast'?: SpecUtilsStringified<double> | null
wlf?: SpecUtilsStringified<double> | null
writeLoadForecast?: SpecUtilsStringified<double> | null
'disk.indices.forecast'?: ByteSize | null
dif?: ByteSize | null
diskIndicesForecast?: ByteSize | null
'disk.indices'?: ByteSize | null
di?: ByteSize | null
diskIndices?: ByteSize | null
'disk.used'?: ByteSize | null
du?: ByteSize | null
diskUsed?: ByteSize | null
'disk.avail'?: ByteSize | null
da?: ByteSize | null
diskAvail?: ByteSize | null
'disk.total'?: ByteSize | null
dt?: ByteSize | null
diskTotal?: ByteSize | null
'disk.percent'?: Percentage | null
dp?: Percentage | null
diskPercent?: Percentage | null
host?: Host | null
h?: Host | null
ip?: Ip | null
node?: string
n?: string
'node.role'?: string | null
r?: string | null
role?: string | null
nodeRole?: string | null
}
export interface CatAllocationRequest extends CatCatRequestBase {
@ -6892,6 +7043,10 @@ export interface CatHealthHealthRecord {
i?: string
'shards.initializing'?: string
shardsInitializing?: string
'unassign.pri'?: string
up?: string
'shards.unassigned.primary'?: string
shardsUnassignedPrimary?: string
unassign?: string
u?: string
'shards.unassigned'?: string
@ -6955,6 +7110,7 @@ export interface CatIndicesIndicesRecord {
ss?: string | null
storeSize?: string | null
'pri.store.size'?: string | null
'dataset.size'?: string | null
'completion.size'?: string
cs?: string
completionSize?: string
@ -8072,6 +8228,7 @@ export interface CatShardsShardsRecord {
dc?: string | null
store?: string | null
sto?: string | null
dataset?: string | null
ip?: string | null
id?: string
node?: string | null
@ -8806,6 +8963,7 @@ export interface ClusterAllocationExplainClusterInfo {
export interface ClusterAllocationExplainCurrentNode {
id: Id
name: Name
roles: NodeRoles
attributes: Record<string, string>
transport_address: TransportAddress
weight_ranking: integer
@ -8828,6 +8986,7 @@ export interface ClusterAllocationExplainNodeAllocationExplanation {
node_decision: ClusterAllocationExplainDecision
node_id: Id
node_name: Name
roles: NodeRoles
store?: ClusterAllocationExplainAllocationStore
transport_address: TransportAddress
weight_ranking: integer
@ -8962,6 +9121,7 @@ export interface ClusterHealthHealthResponseBody {
task_max_waiting_in_queue?: Duration
task_max_waiting_in_queue_millis: DurationValue<UnitMillis>
timed_out: boolean
unassigned_primary_shards: integer
unassigned_shards: integer
}
@ -8975,6 +9135,7 @@ export interface ClusterHealthIndexHealthStats {
shards?: Record<string, ClusterHealthShardHealthStats>
status: HealthStatus
unassigned_shards: integer
unassigned_primary_shards: integer
}
export interface ClusterHealthRequest extends RequestBase {
@ -9001,6 +9162,7 @@ export interface ClusterHealthShardHealthStats {
relocating_shards: integer
status: HealthStatus
unassigned_shards: integer
unassigned_primary_shards: integer
}
export interface ClusterInfoRequest extends RequestBase {
@ -10068,8 +10230,11 @@ export interface EnrichStatsCacheStats {
node_id: Id
count: integer
hits: integer
hits_time_in_millis: DurationValue<UnitMillis>
misses: integer
misses_time_in_millis: DurationValue<UnitMillis>
evictions: integer
size_in_bytes: long
}
export interface EnrichStatsCoordinatorStats {
@ -10192,8 +10357,10 @@ export type EsqlTableValuesLongDouble = double | double[]
export type EsqlTableValuesLongValue = long | long[]
export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow'
export interface EsqlQueryRequest extends RequestBase {
format?: string
format?: EsqlQueryEsqlFormat
delimiter?: string
drop_null_columns?: boolean
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
@ -11630,6 +11797,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase {
expand_wildcards?: ExpandWildcards
include_defaults?: boolean
master_timeout?: Duration
verbose?: boolean
}
export interface IndicesGetDataStreamResponse {
@ -12033,6 +12201,8 @@ export type IndicesResolveClusterResponse = Record<ClusterAlias, IndicesResolveC
export interface IndicesResolveIndexRequest extends RequestBase {
name: Names
expand_wildcards?: ExpandWildcards
ignore_unavailable?: boolean
allow_no_indices?: boolean
}
export interface IndicesResolveIndexResolveIndexAliasItem {
@ -12622,7 +12792,7 @@ export type InferencePutResponse = InferenceInferenceEndpointInfo
export interface IngestAppendProcessor extends IngestProcessorBase {
field: Field
value: any[]
value: any | any[]
allow_duplicates?: boolean
}
@ -12702,6 +12872,7 @@ export interface IngestDissectProcessor extends IngestProcessorBase {
export interface IngestDotExpanderProcessor extends IngestProcessorBase {
field: Field
override?: boolean
path?: string
}
@ -12728,6 +12899,22 @@ export interface IngestForeachProcessor extends IngestProcessorBase {
processor: IngestProcessorContainer
}
export interface IngestGeoGridProcessor extends IngestProcessorBase {
field: string
tile_type: IngestGeoGridTileType
target_field?: Field
parent_field?: Field
children_field?: Field
non_children_field?: Field
precision_field?: Field
ignore_missing?: boolean
target_format?: IngestGeoGridTargetFormat
}
export type IngestGeoGridTargetFormat = 'geojson' | 'wkt'
export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash'
export interface IngestGeoIpProcessor extends IngestProcessorBase {
database_file?: string
field: Field
@ -12735,13 +12922,14 @@ export interface IngestGeoIpProcessor extends IngestProcessorBase {
ignore_missing?: boolean
properties?: string[]
target_field?: Field
download_database_on_pipeline_creation?: boolean
}
export interface IngestGrokProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
pattern_definitions?: Record<string, string>
patterns: string[]
patterns: GrokPattern[]
trace_match?: boolean
}
@ -12829,6 +13017,7 @@ export interface IngestPipeline {
on_failure?: IngestProcessorContainer[]
processors?: IngestProcessorContainer[]
version?: VersionNumber
deprecated?: boolean
_meta?: Metadata
}
@ -12866,6 +13055,7 @@ export interface IngestProcessorContainer {
enrich?: IngestEnrichProcessor
fail?: IngestFailProcessor
foreach?: IngestForeachProcessor
geo_grid?: IngestGeoGridProcessor
geoip?: IngestGeoIpProcessor
grok?: IngestGrokProcessor
gsub?: IngestGsubProcessor
@ -12876,6 +13066,7 @@ export interface IngestProcessorContainer {
kv?: IngestKeyValueProcessor
lowercase?: IngestLowercaseProcessor
pipeline?: IngestPipelineProcessor
redact?: IngestRedactProcessor
remove?: IngestRemoveProcessor
rename?: IngestRenameProcessor
reroute?: IngestRerouteProcessor
@ -12891,6 +13082,16 @@ export interface IngestProcessorContainer {
user_agent?: IngestUserAgentProcessor
}
export interface IngestRedactProcessor extends IngestProcessorBase {
field: Field
patterns: GrokPattern[]
pattern_definitions?: Record<string, string>
prefix?: string
suffix?: string
ignore_missing?: boolean
skip_if_unlicensed?: boolean
}
export interface IngestRemoveProcessor extends IngestProcessorBase {
field: Fields
keep?: Fields
@ -12975,12 +13176,13 @@ export interface IngestUrlDecodeProcessor extends IngestProcessorBase {
export interface IngestUserAgentProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
options?: IngestUserAgentProperty[]
regex_file?: string
target_field?: Field
properties?: IngestUserAgentProperty[]
extract_device_type?: boolean
}
export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD'
export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version'
export interface IngestDeleteGeoipDatabaseRequest extends RequestBase {
id: Ids
@ -13080,6 +13282,7 @@ export interface IngestPutPipelineRequest extends RequestBase {
on_failure?: IngestProcessorContainer[]
processors?: IngestProcessorContainer[]
version?: VersionNumber
deprecated?: boolean
}
}
@ -13476,6 +13679,9 @@ export interface MlCalendarEvent {
description: string
end_time: DateTime
start_time: DateTime
skip_result?: boolean
skip_model_update?: boolean
force_time_shift?: integer
}
export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition
@ -13491,7 +13697,7 @@ export type MlCategorizationStatus = 'ok' | 'warn'
export interface MlCategory {
category_id: ulong
examples: string[]
grok_pattern?: string
grok_pattern?: GrokPattern
job_id: Id
max_matching_length: ulong
partition_field_name?: string
@ -13880,9 +14086,7 @@ export interface MlDelayedDataCheckConfig {
export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated'
export type MlDeploymentAssignmentState = 'starting' | 'started' | 'stopping' | 'failed'
export type MlDeploymentState = 'started' | 'starting' | 'stopping'
export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed'
export interface MlDetectionRule {
actions?: MlRuleAction[]
@ -14472,7 +14676,7 @@ export interface MlTrainedModelDeploymentStats {
rejected_execution_count: integer
reason: string
start_time: EpochTime<UnitMillis>
state: MlDeploymentState
state: MlDeploymentAssignmentState
threads_per_allocation: integer
timeout_count: integer
}
@ -16088,6 +16292,25 @@ export interface NodesHttp {
current_open?: integer
total_opened?: long
clients?: NodesClient[]
routes: Record<string, NodesHttpRoute>
}
export interface NodesHttpRoute {
requests: NodesHttpRouteRequests
responses: NodesHttpRouteResponses
}
export interface NodesHttpRouteRequests {
count: long
total_size_in_bytes: long
size_histogram: NodesSizeHttpHistogram[]
}
export interface NodesHttpRouteResponses {
count: long
total_size_in_bytes: long
handling_time_histogram: NodesTimeHttpHistogram[]
size_histogram: NodesSizeHttpHistogram[]
}
export interface NodesIndexingPressure {
@ -16102,16 +16325,25 @@ export interface NodesIndexingPressureMemory {
}
export interface NodesIngest {
pipelines?: Record<string, NodesIngestTotal>
pipelines?: Record<string, NodesIngestStats>
total?: NodesIngestTotal
}
export interface NodesIngestStats {
count: long
current: long
failed: long
processors: Record<string, NodesKeyedProcessor>[]
time_in_millis: DurationValue<UnitMillis>
ingested_as_first_pipeline_in_bytes: long
produced_as_first_pipeline_in_bytes: long
}
export interface NodesIngestTotal {
count?: long
current?: long
failed?: long
processors?: Record<string, NodesKeyedProcessor>[]
time_in_millis?: DurationValue<UnitMillis>
count: long
current: long
failed: long
time_in_millis: DurationValue<UnitMillis>
}
export interface NodesIoStatDevice {
@ -16316,6 +16548,12 @@ export interface NodesSerializedClusterStateDetail {
compressed_size_in_bytes?: long
}
export interface NodesSizeHttpHistogram {
count: long
ge_bytes?: long
lt_bytes?: long
}
export interface NodesStats {
adaptive_selection?: Record<string, NodesAdaptiveSelection>
breakers?: Record<string, NodesBreaker>
@ -16350,6 +16588,12 @@ export interface NodesThreadCount {
threads?: long
}
export interface NodesTimeHttpHistogram {
count: long
ge_millis?: long
lt_millis?: long
}
export interface NodesTransport {
inbound_handling_time_histogram?: NodesTransportHistogram[]
outbound_handling_time_histogram?: NodesTransportHistogram[]
@ -17360,6 +17604,15 @@ export interface SecurityRealmInfo {
type: string
}
export interface SecurityRemoteIndicesPrivileges {
clusters: Names
field_security?: SecurityFieldSecurity
names: Indices
privileges: SecurityIndexPrivilege[]
query?: SecurityIndicesPrivilegesQuery
allow_restricted_indices?: boolean
}
export interface SecurityRoleDescriptor {
cluster?: SecurityClusterPrivilege[]
indices?: SecurityIndicesPrivileges[]
@ -18048,6 +18301,7 @@ export interface SecurityPutRoleRequest extends RequestBase {
cluster?: SecurityClusterPrivilege[]
global?: Record<string, any>
indices?: SecurityIndicesPrivileges[]
remote_indices?: SecurityRemoteIndicesPrivileges[]
metadata?: Metadata
run_as?: string[]
description?: string
@ -18720,6 +18974,7 @@ export interface SnapshotSnapshotShardFailure {
node_id?: Id
reason: string
shard_id: Id
index_uuid: Id
status: string
}
@ -18877,6 +19132,20 @@ export interface SnapshotGetRepositoryRequest extends RequestBase {
export type SnapshotGetRepositoryResponse = Record<string, SnapshotRepository>
export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase {
name: Names
meta_thread_pool_concurrency?: integer
blob_thread_pool_concurrency?: integer
snapshot_verification_concurrency?: integer
index_verification_concurrency?: integer
index_snapshot_verification_concurrency?: integer
max_failed_shard_snapshots?: integer
verify_blob_contents?: boolean
max_bytes_per_sec?: string
}
export type SnapshotRepositoryVerifyIntegrityResponse = any
export interface SnapshotRestoreRequest extends RequestBase {
repository: Name
snapshot: Name
@ -18987,7 +19256,7 @@ export interface SqlGetAsyncStatusResponse {
}
export interface SqlQueryRequest extends RequestBase {
format?: string
format?: SqlQuerySqlFormat
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
catalog?: string
@ -19018,6 +19287,8 @@ export interface SqlQueryResponse {
rows: SqlRow[]
}
export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile'
export interface SqlTranslateRequest extends RequestBase {
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
@ -19236,7 +19507,7 @@ export interface TextStructureFindStructureRequest<TJsonDocument = unknown> {
ecs_compatibility?: string
explain?: boolean
format?: string
grok_pattern?: string
grok_pattern?: GrokPattern
has_header_row?: boolean
line_merge_size_limit?: uint
lines_to_sample?: uint
@ -19264,7 +19535,7 @@ export interface TextStructureFindStructureResponse {
num_lines_analyzed: integer
column_names?: string[]
explanation?: string[]
grok_pattern?: string
grok_pattern?: GrokPattern
multiline_start_pattern?: string
exclude_lines_pattern?: string
java_timestamp_formats?: string[]
@ -19294,7 +19565,7 @@ export interface TextStructureTestGrokPatternRequest extends RequestBase {
ecs_compatibility?: string
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
grok_pattern: string
grok_pattern: GrokPattern
text: string[]
}
}
@ -20336,7 +20607,7 @@ export interface XpackInfoNativeCodeInformation {
}
export interface XpackInfoRequest extends RequestBase {
categories?: string[]
categories?: XpackInfoXPackCategory[]
accept_enterprise?: boolean
human?: boolean
}
@ -20348,6 +20619,8 @@ export interface XpackInfoResponse {
tagline: string
}
export type XpackInfoXPackCategory = 'build' | 'features' | 'license'
export interface XpackUsageAnalytics extends XpackUsageBase {
stats: XpackUsageAnalyticsStatistics
}