Auto-generated code for main (#2362)

This commit is contained in:
Elastic Machine
2024-09-23 21:54:43 +02:00
committed by GitHub
parent 58b457eedc
commit 34704b2e5c
43 changed files with 550 additions and 177 deletions

View File

@ -26,7 +26,7 @@ const response1 = await client.cluster.putComponentTemplate({
type: "keyword",
script: {
source:
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
},

View File

@ -0,0 +1,12 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.tasks.list({
human: "true",
detailed: "true",
actions: "indices:data/write/bulk",
});
console.log(response);
----

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "alibabacloud_ai_search_embeddings",
id: "alibabacloud_ai_search_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -10,7 +10,7 @@ const response = await client.search({
type: "keyword",
script: {
source:
"emit(doc['@timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['@timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
},

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "hugging_face_embeddings",
id: "hugging_face_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "google_vertex_ai_embeddings",
id: "google_vertex_ai_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -10,7 +10,7 @@ const response = await client.search({
"date.day_of_week": {
type: "keyword",
script:
"emit(doc['date'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['date'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
aggs: {

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "azure-ai-studio-embeddings",
pipeline: "azure_ai_studio_embeddings",
pipeline: "azure_ai_studio_embeddings_pipeline",
},
});
console.log(response);

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "azure_ai_studio_embeddings",
id: "azure_ai_studio_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "openai-embeddings",
pipeline: "openai_embeddings",
pipeline: "openai_embeddings_pipeline",
},
});
console.log(response);

View File

@ -0,0 +1,8 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.nodes.hotThreads();
console.log(response);
----

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "amazon-bedrock-embeddings",
pipeline: "amazon_bedrock_embeddings",
pipeline: "amazon_bedrock_embeddings_pipeline",
},
});
console.log(response);

View File

@ -10,7 +10,7 @@ const response = await client.search({
type: "keyword",
script: {
source:
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
},

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "openai_embeddings",
id: "openai_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "elser-embeddings",
pipeline: "elser_embeddings",
pipeline: "elser_embeddings_pipeline",
},
});
console.log(response);

View File

@ -0,0 +1,11 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.nodes.stats({
human: "true",
filter_path: "nodes.*.indexing_pressure",
});
console.log(response);
----

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "cohere_embeddings",
id: "cohere_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "hugging-face-embeddings",
pipeline: "hugging_face_embeddings",
pipeline: "hugging_face_embeddings_pipeline",
},
});
console.log(response);

View File

@ -3,8 +3,8 @@
[source, js]
----
const response = await client.nodes.hotThreads({
node_id: "my-node,my-other-node",
const response = await client.nodes.stats({
metric: "breaker",
});
console.log(response);
----

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "google-vertex-ai-embeddings",
pipeline: "google_vertex_ai_embeddings",
pipeline: "google_vertex_ai_embeddings_pipeline",
},
});
console.log(response);

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "cohere-embeddings",
pipeline: "cohere_embeddings",
pipeline: "cohere_embeddings_pipeline",
},
});
console.log(response);

View File

@ -0,0 +1,11 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.snapshot.create({
repository: "my_repository",
snapshot: "_verify_integrity",
});
console.log(response);
----

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "mistral-embeddings",
pipeline: "mistral_embeddings",
pipeline: "mistral_embeddings_pipeline",
},
});
console.log(response);

View File

@ -7,7 +7,7 @@ const response = await client.search({
index: "semantic-embeddings",
query: {
semantic: {
field: "semantic_text",
field: "content",
query: "How to avoid muscle soreness while running?",
},
},

View File

@ -4,7 +4,9 @@
[source, js]
----
const response = await client.tasks.list({
filter_path: "nodes.*.tasks",
pretty: "true",
human: "true",
detailed: "true",
});
console.log(response);
----

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "alibabacloud-ai-search-embeddings",
pipeline: "alibabacloud_ai_search_embeddings",
pipeline: "alibabacloud_ai_search_embeddings_pipeline",
},
});
console.log(response);

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "azure_openai_embeddings",
id: "azure_openai_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -0,0 +1,8 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.cluster.pendingTasks();
console.log(response);
----

View File

@ -8,7 +8,7 @@ const response = await client.search({
day_of_week: {
type: "keyword",
script:
"\n emit(doc['timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ROOT))\n ",
"\n emit(doc['timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ENGLISH))\n ",
},
},
size: 0,

View File

@ -7,14 +7,10 @@ const response = await client.indices.create({
index: "semantic-embeddings",
mappings: {
properties: {
semantic_text: {
content: {
type: "semantic_text",
inference_id: "my-elser-endpoint",
},
content: {
type: "text",
copy_to: "semantic_text",
},
},
},
});

View File

@ -11,7 +11,7 @@ const response = await client.reindex({
},
dest: {
index: "azure-openai-embeddings",
pipeline: "azure_openai_embeddings",
pipeline: "azure_openai_embeddings_pipeline",
},
});
console.log(response);

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "mistral_embeddings",
id: "mistral_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -0,0 +1,12 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.tasks.list({
human: "true",
detailed: "true",
actions: "indices:data/write/search",
});
console.log(response);
----

View File

@ -5,7 +5,7 @@
----
const response = await client.cat.threadPool({
v: "true",
h: "id,name,active,rejected,completed",
h: "id,name,queue,active,rejected,completed",
});
console.log(response);
----

View File

@ -12,7 +12,7 @@ const response = await client.indices.create({
type: "keyword",
script: {
source:
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
},

View File

@ -11,7 +11,7 @@ const response = await client.indices.create({
type: "keyword",
script: {
source:
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))",
"emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))",
},
},
},

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "amazon_bedrock_embeddings",
id: "amazon_bedrock_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.ingest.putPipeline({
id: "elser_embeddings",
id: "elser_embeddings_pipeline",
processors: [
{
inference: {

View File

@ -402,7 +402,7 @@ client.fieldCaps({ ... })
** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all.
** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported.
** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests.
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests.
These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.
** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias,
or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request
@ -750,6 +750,7 @@ client.openPointInTime({ index, keep_alive })
* *Request (object):*
** *`index` (string | string[])*: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices
** *`keep_alive` (string | -1 | 0)*: Extends the time to live of the corresponding point in time.
** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to `match_none` on every shard.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on.
Random by default.
@ -940,7 +941,7 @@ client.search({ ... })
** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search.
Supports wildcards (`*`).
To search all data streams and indices, omit this parameter or use `*` or `_all`.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Defines the aggregations that are run as part of the search request.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Defines the aggregations that are run as part of the search request.
** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*: Collapses search results the values of the specified field.
** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit.
** *`ext` (Optional, Record<string, User-defined value>)*: Configuration of search extensions defined by Elasticsearch plugins.
@ -999,7 +1000,7 @@ If this field is specified, the `_source` parameter defaults to `false`.
You can pass `_source: true` to return both source fields and stored fields in the search response.
** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT).
If you provide a PIT, you cannot specify an `<index>` in the request path.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request.
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request.
These fields take precedence over mapped fields with the same name.
** *`stats` (Optional, string[])*: Stats groups to associate with the search.
Each group maintains a statistics aggregation for its associated searches.
@ -1098,7 +1099,7 @@ client.searchMvt({ index, field, zoom, x, y })
** *`zoom` (number)*: Zoom level for the vector tile to search
** *`x` (number)*: X coordinate for the vector tile to search
** *`y` (number)*: Y coordinate for the vector tile to search
** *`aggs` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Sub-aggregations for the geotile_grid.
** *`aggs` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Sub-aggregations for the geotile_grid.
Supports the following aggregation types:
- avg
@ -1126,7 +1127,7 @@ each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon
of the cells bounding box. If 'point' each feature is a Point that is the centroid
of the cell.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
precedence over mapped fields with the same name.
** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000.
If 0, results dont include the hits layer.
@ -1477,7 +1478,7 @@ client.asyncSearch.submit({ ... })
* *Request (object):*
** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*
** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*
** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit.
** *`ext` (Optional, Record<string, User-defined value>)*: Configuration of search extensions defined by Elasticsearch plugins.
@ -1527,7 +1528,7 @@ parameter defaults to false. You can pass _source: true to return both source fi
and stored fields in the search response.
** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you
cannot specify an <index> in the request path.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
precedence over mapped fields with the same name.
** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics
aggregation for its associated searches. You can retrieve these stats using
@ -3511,7 +3512,7 @@ client.eql.search({ index, query })
** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10
** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit.
** *`result_position` (Optional, Enum("tail" | "head"))*
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*
** *`allow_no_indices` (Optional, boolean)*
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*
** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response.
@ -3565,7 +3566,7 @@ and its format can change at any time but it can give some insight into the perf
of each part of the query.
** *`tables` (Optional, Record<string, Record<string, { integer, keyword, long, double }>>)*: Tables to use with the LOOKUP operation. The top level key is the table
name and the next level key is the column name.
** *`format` (Optional, string)*: A short version of the Accept header, e.g. json, yaml.
** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, e.g. json, yaml.
** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format.
** *`drop_null_columns` (Optional, boolean)*: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results?
Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns.
@ -3668,7 +3669,7 @@ client.fleet.search({ index })
* *Request (object):*
** *`index` (string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*
** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*
** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit.
** *`ext` (Optional, Record<string, User-defined value>)*: Configuration of search extensions defined by Elasticsearch plugins.
@ -3717,7 +3718,7 @@ parameter defaults to false. You can pass _source: true to return both source fi
and stored fields in the search response.
** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you
cannot specify an <index> in the request path.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
precedence over mapped fields with the same name.
** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics
aggregation for its associated searches. You can retrieve these stats using
@ -4030,7 +4031,7 @@ If specified, the `analyzer` parameter overrides this value.
** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token.
** *`text` (Optional, string | string[])*: Text to analyze.
If an array of strings is provided, it is analyzed as a multi-value field.
** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, max_token_length } | { type, max_token_length } | { type, max_token_length } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, flags, group, pattern } | { type, rule_files })*: Tokenizer to use to convert text into tokens.
** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })*: Tokenizer to use to convert text into tokens.
[discrete]
==== clear_cache
@ -4671,6 +4672,7 @@ Wildcard (`*`) expressions are supported. If omitted, all data streams are retur
Supports a list of values, such as `open,hidden`.
** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
** *`verbose` (Optional, boolean)*: Whether the maximum timestamp for each data stream should be calculated and returned.
[discrete]
==== get_field_mapping
@ -5061,7 +5063,7 @@ application-specific metadata.
- Mapping parameters
** *`_routing` (Optional, { required })*: Enable making a routing value required on indexed documents.
** *`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })*: Control whether the _source field is enabled on the index.
** *`runtime` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Mapping of runtime fields for the index.
** *`runtime` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Mapping of runtime fields for the index.
** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.
This behavior applies even if the request targets other open indices.
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
@ -5262,6 +5264,10 @@ Resources on remote clusters can be specified using the `<cluster>`:`<name>` syn
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
Supports a list of values, such as `open,hidden`.
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.
This behavior applies even if the request targets other open indices.
For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.
[discrete]
==== rollover
@ -5805,9 +5811,11 @@ client.ingest.putPipeline({ id })
** *`id` (string)*: ID of the ingest pipeline to create or update.
** *`_meta` (Optional, Record<string, User-defined value>)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch.
** *`description` (Optional, string)*: Description of the ingest pipeline.
** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors.
** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified.
** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, redact, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors.
** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, redact, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified.
** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers.
** *`deprecated` (Optional, boolean)*: Marks this ingest pipeline as deprecated.
When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
** *`if_version` (Optional, number)*: Required version for optimistic concurrency control for pipeline updates
@ -5829,7 +5837,7 @@ client.ingest.simulate({ docs })
** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline.
** *`id` (Optional, string)*: Pipeline to test.
If you dont specify a `pipeline` in the request body, this parameter is required.
** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })*: Pipeline to test.
** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*: Pipeline to test.
If you dont specify the `pipeline` request path parameter, this parameter is required.
If you specify both this and the request path parameter, the API only uses the request path parameter.
** *`verbose` (Optional, boolean)*: If `true`, the response includes output data for each processor in the executed pipeline.
@ -5990,7 +5998,7 @@ client.logstash.putPipeline({ id })
* *Request (object):*
** *`id` (string)*: Identifier for the pipeline.
** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })*
** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*
[discrete]
=== migration
@ -7358,7 +7366,7 @@ client.ml.putDatafeed({ datafeed_id })
** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed.
This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
It must start and end with alphanumeric characters.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: If set, the datafeed performs aggregation searches.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: If set, the datafeed performs aggregation searches.
Support for aggregations is limited and should be used only with low cardinality data.
** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might be required to search over long time periods, for several months or years.
This search is split into time chunks in order to ensure the load on Elasticsearch is managed.
@ -7389,7 +7397,7 @@ object is passed verbatim to Elasticsearch.
not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default
value is randomly selected between `60s` and `120s`. This randomness improves the query performance
when there are multiple jobs running on the same node.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Specifies runtime fields for the datafeed search.
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Specifies runtime fields for the datafeed search.
** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed.
The detector configuration objects in a job can contain functions that use these script fields.
** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.
@ -7894,7 +7902,7 @@ client.ml.updateDatafeed({ datafeed_id })
** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed.
This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
It must start and end with alphanumeric characters.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only
with low cardinality data.
** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might search over long time periods, for several months or years. This search is split into time
chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of
@ -7928,7 +7936,7 @@ when you are satisfied with the results of the job.
not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default
value is randomly selected between `60s` and `120s`. This randomness improves the query performance
when there are multiple jobs running on the same node.
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Specifies runtime fields for the datafeed search.
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Specifies runtime fields for the datafeed search.
** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed.
The detector configuration objects in a job can contain functions that use these script fields.
** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.
@ -8531,7 +8539,7 @@ client.rollup.rollupSearch({ index })
* *Request (object):*
** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Specifies aggregations.
** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Specifies aggregations.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query.
** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data.
** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response
@ -10588,7 +10596,7 @@ It ignores other request body parameters.
** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails.
** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search.
** *`field_multi_value_leniency` (Optional, boolean)*: Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order).
** *`runtime_mappings` (Optional, Record<string, { fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take
precedence over mapped fields with the same name.
** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, meaning the request waits for complete search results. If the search doesnt finish within this period, the search becomes async.
** *`params` (Optional, Record<string, User-defined value>)*: Values for parameters in the query.

View File

@ -301,7 +301,7 @@ export default class Ingest {
async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise<T.IngestPutPipelineResponse>
async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['id']
const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version']
const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version', 'deprecated']
const querystring: Record<string, any> = {}
// @ts-expect-error
const userBody: any = params?.body

View File

@ -47,11 +47,23 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin
export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise<T.OpenPointInTimeResponse>
export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['index']
const acceptedBody: string[] = ['index_filter']
const querystring: Record<string, any> = {}
const body = undefined
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
for (const key in params) {
if (acceptedPath.includes(key)) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error

View File

@ -884,9 +884,11 @@ export interface OpenPointInTimeRequest extends RequestBase {
preference?: string
routing?: Routing
expand_wildcards?: ExpandWildcards
index_filter?: QueryDslQueryContainer
}
export interface OpenPointInTimeResponse {
_shards: ShardStatistics
id: Id
}
@ -1751,11 +1753,23 @@ export interface SearchShardsRequest extends RequestBase {
}
export interface SearchShardsResponse {
nodes: Record<string, NodeAttributes>
nodes: Record<NodeId, SearchShardsSearchShardsNodeAttributes>
shards: NodeShard[][]
indices: Record<IndexName, SearchShardsShardStoreIndex>
}
export interface SearchShardsSearchShardsNodeAttributes {
name: NodeName
ephemeral_id: Id
transport_address: TransportAddress
external_id: string
attributes: Record<string, string>
roles: NodeRoles
version: VersionString
min_index_version: integer
max_index_version: integer
}
export interface SearchShardsShardStoreIndex {
aliases?: Name[]
filter?: QueryDslQueryContainer
@ -2243,6 +2257,8 @@ export interface GetStats {
total: long
}
export type GrokPattern = string
export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED'
export type Host = string
@ -2397,8 +2413,6 @@ export interface NodeAttributes {
id?: NodeId
name: NodeName
transport_address: TransportAddress
roles?: NodeRoles
external_id?: string
}
export type NodeId = string
@ -2848,7 +2862,7 @@ export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMulti
export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys
& { [property: string]: AggregationsAggregate | string | long }
export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate
export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate
export interface AggregationsAggregateBase {
meta?: Metadata
@ -2933,6 +2947,7 @@ export interface AggregationsAggregationContainer {
sum?: AggregationsSumAggregation
sum_bucket?: AggregationsSumBucketAggregation
terms?: AggregationsTermsAggregation
time_series?: AggregationsTimeSeriesAggregation
top_hits?: AggregationsTopHitsAggregation
t_test?: AggregationsTTestAggregation
top_metrics?: AggregationsTopMetricsAggregation
@ -2942,9 +2957,9 @@ export interface AggregationsAggregationContainer {
}
export interface AggregationsAggregationRange {
from?: double
from?: double | null
key?: string
to?: double
to?: double | null
}
export interface AggregationsArrayPercentilesItem {
@ -4129,6 +4144,20 @@ export interface AggregationsTestPopulation {
filter?: QueryDslQueryContainer
}
export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucketAggregateBase<AggregationsTimeSeriesBucket> {
}
export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase {
size?: integer
keyed?: boolean
}
export interface AggregationsTimeSeriesBucketKeys extends AggregationsMultiBucketBase {
key: Record<Field, FieldValue>
}
export type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys
& { [property: string]: AggregationsAggregate | Record<Field, FieldValue> | long }
export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase {
hits: SearchHitsMetadata<any>
}
@ -4309,6 +4338,11 @@ export interface AnalysisCjkAnalyzer {
stopwords_path?: string
}
export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase {
type: 'classic'
max_token_length?: integer
}
export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase {
type: 'common_grams'
common_words?: string[]
@ -4395,7 +4429,7 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase {
custom_token_chars?: string
max_gram: integer
min_gram: integer
token_chars: AnalysisTokenChar[]
token_chars?: AnalysisTokenChar[]
}
export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase {
@ -4738,7 +4772,7 @@ export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase {
custom_token_chars?: string
max_gram: integer
min_gram: integer
token_chars: AnalysisTokenChar[]
token_chars?: AnalysisTokenChar[]
}
export interface AnalysisNoriAnalyzer {
@ -4903,6 +4937,16 @@ export interface AnalysisSimpleAnalyzer {
version?: VersionString
}
export interface AnalysisSimplePatternSplitTokenizer extends AnalysisTokenizerBase {
type: 'simple_pattern_split'
pattern?: string
}
export interface AnalysisSimplePatternTokenizer extends AnalysisTokenizerBase {
type: 'simple_pattern'
pattern?: string
}
export interface AnalysisSnowballAnalyzer {
type: 'snowball'
version?: VersionString
@ -5010,6 +5054,10 @@ export interface AnalysisThaiAnalyzer {
stopwords_path?: string
}
export interface AnalysisThaiTokenizer extends AnalysisTokenizerBase {
type: 'thai'
}
export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom'
export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition
@ -5026,7 +5074,7 @@ export interface AnalysisTokenizerBase {
version?: VersionString
}
export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer
export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisClassicTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisPathHierarchyTokenizer | AnalysisPatternTokenizer | AnalysisSimplePatternTokenizer | AnalysisSimplePatternSplitTokenizer | AnalysisStandardTokenizer | AnalysisThaiTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisIcuTokenizer | AnalysisKuromojiTokenizer | AnalysisNoriTokenizer
export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase {
type: 'trim'
@ -5151,6 +5199,10 @@ export interface MappingCompletionProperty extends MappingDocValuesPropertyBase
type: 'completion'
}
export interface MappingCompositeSubField {
type: MappingRuntimeFieldType
}
export interface MappingConstantKeywordProperty extends MappingPropertyBase {
value?: any
type: 'constant_keyword'
@ -5494,6 +5546,7 @@ export interface MappingRoutingField {
}
export interface MappingRuntimeField {
fields?: Record<string, MappingCompositeSubField>
fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[]
format?: string
input_field?: Field
@ -6684,39 +6737,39 @@ export interface CatAliasesRequest extends CatCatRequestBase {
export type CatAliasesResponse = CatAliasesAliasesRecord[]
export interface CatAllocationAllocationRecord {
shards: string
s: string
'shards.undesired': string | null
'write_load.forecast': double | null
wlf: double | null
writeLoadForecast: double | null
'disk.indices.forecast': ByteSize | null
dif: ByteSize | null
diskIndicesForecast: ByteSize | null
'disk.indices': ByteSize | null
di: ByteSize | null
diskIndices: ByteSize | null
'disk.used': ByteSize | null
du: ByteSize | null
diskUsed: ByteSize | null
'disk.avail': ByteSize | null
da: ByteSize | null
diskAvail: ByteSize | null
'disk.total': ByteSize | null
dt: ByteSize | null
diskTotal: ByteSize | null
'disk.percent': Percentage | null
dp: Percentage | null
diskPercent: Percentage | null
host: Host | null
h: Host | null
ip: Ip | null
node: string
n: string
'node.role': string | null
r: string | null
role: string | null
nodeRole: string | null
shards?: string
s?: string
'shards.undesired'?: string | null
'write_load.forecast'?: SpecUtilsStringified<double> | null
wlf?: SpecUtilsStringified<double> | null
writeLoadForecast?: SpecUtilsStringified<double> | null
'disk.indices.forecast'?: ByteSize | null
dif?: ByteSize | null
diskIndicesForecast?: ByteSize | null
'disk.indices'?: ByteSize | null
di?: ByteSize | null
diskIndices?: ByteSize | null
'disk.used'?: ByteSize | null
du?: ByteSize | null
diskUsed?: ByteSize | null
'disk.avail'?: ByteSize | null
da?: ByteSize | null
diskAvail?: ByteSize | null
'disk.total'?: ByteSize | null
dt?: ByteSize | null
diskTotal?: ByteSize | null
'disk.percent'?: Percentage | null
dp?: Percentage | null
diskPercent?: Percentage | null
host?: Host | null
h?: Host | null
ip?: Ip | null
node?: string
n?: string
'node.role'?: string | null
r?: string | null
role?: string | null
nodeRole?: string | null
}
export interface CatAllocationRequest extends CatCatRequestBase {
@ -6815,6 +6868,10 @@ export interface CatHealthHealthRecord {
i?: string
'shards.initializing'?: string
shardsInitializing?: string
'unassign.pri'?: string
up?: string
'shards.unassigned.primary'?: string
shardsUnassignedPrimary?: string
unassign?: string
u?: string
'shards.unassigned'?: string
@ -6878,6 +6935,7 @@ export interface CatIndicesIndicesRecord {
ss?: string | null
storeSize?: string | null
'pri.store.size'?: string | null
'dataset.size'?: string | null
'completion.size'?: string
cs?: string
completionSize?: string
@ -7995,6 +8053,7 @@ export interface CatShardsShardsRecord {
dc?: string | null
store?: string | null
sto?: string | null
dataset?: string | null
ip?: string | null
id?: string
node?: string | null
@ -8717,6 +8776,7 @@ export interface ClusterAllocationExplainClusterInfo {
export interface ClusterAllocationExplainCurrentNode {
id: Id
name: Name
roles: NodeRoles
attributes: Record<string, string>
transport_address: TransportAddress
weight_ranking: integer
@ -8739,6 +8799,7 @@ export interface ClusterAllocationExplainNodeAllocationExplanation {
node_decision: ClusterAllocationExplainDecision
node_id: Id
node_name: Name
roles: NodeRoles
store?: ClusterAllocationExplainAllocationStore
transport_address: TransportAddress
weight_ranking: integer
@ -8870,6 +8931,7 @@ export interface ClusterHealthHealthResponseBody {
task_max_waiting_in_queue?: Duration
task_max_waiting_in_queue_millis: DurationValue<UnitMillis>
timed_out: boolean
unassigned_primary_shards: integer
unassigned_shards: integer
}
@ -8883,6 +8945,7 @@ export interface ClusterHealthIndexHealthStats {
shards?: Record<string, ClusterHealthShardHealthStats>
status: HealthStatus
unassigned_shards: integer
unassigned_primary_shards: integer
}
export interface ClusterHealthRequest extends RequestBase {
@ -8909,6 +8972,7 @@ export interface ClusterHealthShardHealthStats {
relocating_shards: integer
status: HealthStatus
unassigned_shards: integer
unassigned_primary_shards: integer
}
export interface ClusterInfoRequest extends RequestBase {
@ -9916,8 +9980,11 @@ export interface EnrichStatsCacheStats {
node_id: Id
count: integer
hits: integer
hits_time_in_millis: DurationValue<UnitMillis>
misses: integer
misses_time_in_millis: DurationValue<UnitMillis>
evictions: integer
size_in_bytes: long
}
export interface EnrichStatsCoordinatorStats {
@ -10037,8 +10104,10 @@ export type EsqlTableValuesLongDouble = double | double[]
export type EsqlTableValuesLongValue = long | long[]
export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow'
export interface EsqlQueryRequest extends RequestBase {
format?: string
format?: EsqlQueryEsqlFormat
delimiter?: string
drop_null_columns?: boolean
columnar?: boolean
@ -11446,6 +11515,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase {
expand_wildcards?: ExpandWildcards
include_defaults?: boolean
master_timeout?: Duration
verbose?: boolean
}
export interface IndicesGetDataStreamResponse {
@ -11830,6 +11900,8 @@ export type IndicesResolveClusterResponse = Record<ClusterAlias, IndicesResolveC
export interface IndicesResolveIndexRequest extends RequestBase {
name: Names
expand_wildcards?: ExpandWildcards
ignore_unavailable?: boolean
allow_no_indices?: boolean
}
export interface IndicesResolveIndexResolveIndexAliasItem {
@ -12397,7 +12469,7 @@ export type InferencePutResponse = InferenceInferenceEndpointInfo
export interface IngestAppendProcessor extends IngestProcessorBase {
field: Field
value: any[]
value: any | any[]
allow_duplicates?: boolean
}
@ -12477,6 +12549,7 @@ export interface IngestDissectProcessor extends IngestProcessorBase {
export interface IngestDotExpanderProcessor extends IngestProcessorBase {
field: Field
override?: boolean
path?: string
}
@ -12503,6 +12576,22 @@ export interface IngestForeachProcessor extends IngestProcessorBase {
processor: IngestProcessorContainer
}
export interface IngestGeoGridProcessor extends IngestProcessorBase {
field: string
tile_type: IngestGeoGridTileType
target_field?: Field
parent_field?: Field
children_field?: Field
non_children_field?: Field
precision_field?: Field
ignore_missing?: boolean
target_format?: IngestGeoGridTargetFormat
}
export type IngestGeoGridTargetFormat = 'geojson' | 'wkt'
export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash'
export interface IngestGeoIpProcessor extends IngestProcessorBase {
database_file?: string
field: Field
@ -12510,13 +12599,14 @@ export interface IngestGeoIpProcessor extends IngestProcessorBase {
ignore_missing?: boolean
properties?: string[]
target_field?: Field
download_database_on_pipeline_creation?: boolean
}
export interface IngestGrokProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
pattern_definitions?: Record<string, string>
patterns: string[]
patterns: GrokPattern[]
trace_match?: boolean
}
@ -12604,6 +12694,7 @@ export interface IngestPipeline {
on_failure?: IngestProcessorContainer[]
processors?: IngestProcessorContainer[]
version?: VersionNumber
deprecated?: boolean
_meta?: Metadata
}
@ -12641,6 +12732,7 @@ export interface IngestProcessorContainer {
enrich?: IngestEnrichProcessor
fail?: IngestFailProcessor
foreach?: IngestForeachProcessor
geo_grid?: IngestGeoGridProcessor
geoip?: IngestGeoIpProcessor
grok?: IngestGrokProcessor
gsub?: IngestGsubProcessor
@ -12651,6 +12743,7 @@ export interface IngestProcessorContainer {
kv?: IngestKeyValueProcessor
lowercase?: IngestLowercaseProcessor
pipeline?: IngestPipelineProcessor
redact?: IngestRedactProcessor
remove?: IngestRemoveProcessor
rename?: IngestRenameProcessor
reroute?: IngestRerouteProcessor
@ -12666,6 +12759,16 @@ export interface IngestProcessorContainer {
user_agent?: IngestUserAgentProcessor
}
export interface IngestRedactProcessor extends IngestProcessorBase {
field: Field
patterns: GrokPattern[]
pattern_definitions?: Record<string, string>
prefix?: string
suffix?: string
ignore_missing?: boolean
skip_if_unlicensed?: boolean
}
export interface IngestRemoveProcessor extends IngestProcessorBase {
field: Fields
keep?: Fields
@ -12750,12 +12853,13 @@ export interface IngestUrlDecodeProcessor extends IngestProcessorBase {
export interface IngestUserAgentProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
options?: IngestUserAgentProperty[]
regex_file?: string
target_field?: Field
properties?: IngestUserAgentProperty[]
extract_device_type?: boolean
}
export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD'
export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version'
export interface IngestDeleteGeoipDatabaseRequest extends RequestBase {
id: Ids
@ -12850,6 +12954,7 @@ export interface IngestPutPipelineRequest extends RequestBase {
on_failure?: IngestProcessorContainer[]
processors?: IngestProcessorContainer[]
version?: VersionNumber
deprecated?: boolean
}
export type IngestPutPipelineResponse = AcknowledgedResponseBase
@ -13253,7 +13358,7 @@ export type MlCategorizationStatus = 'ok' | 'warn'
export interface MlCategory {
category_id: ulong
examples: string[]
grok_pattern?: string
grok_pattern?: GrokPattern
job_id: Id
max_matching_length: ulong
partition_field_name?: string
@ -15736,6 +15841,25 @@ export interface NodesHttp {
current_open?: integer
total_opened?: long
clients?: NodesClient[]
routes: Record<string, NodesHttpRoute>
}
export interface NodesHttpRoute {
requests: NodesHttpRouteRequests
responses: NodesHttpRouteResponses
}
export interface NodesHttpRouteRequests {
count: long
total_size_in_bytes: long
size_histogram: NodesSizeHttpHistogram[]
}
export interface NodesHttpRouteResponses {
count: long
total_size_in_bytes: long
handling_time_histogram: NodesTimeHttpHistogram[]
size_histogram: NodesSizeHttpHistogram[]
}
export interface NodesIndexingPressure {
@ -15750,16 +15874,25 @@ export interface NodesIndexingPressureMemory {
}
export interface NodesIngest {
pipelines?: Record<string, NodesIngestTotal>
pipelines?: Record<string, NodesIngestStats>
total?: NodesIngestTotal
}
export interface NodesIngestStats {
count: long
current: long
failed: long
processors: Record<string, NodesKeyedProcessor>[]
time_in_millis: DurationValue<UnitMillis>
ingested_as_first_pipeline_in_bytes: long
produced_as_first_pipeline_in_bytes: long
}
export interface NodesIngestTotal {
count?: long
current?: long
failed?: long
processors?: Record<string, NodesKeyedProcessor>[]
time_in_millis?: DurationValue<UnitMillis>
count: long
current: long
failed: long
time_in_millis: DurationValue<UnitMillis>
}
export interface NodesIoStatDevice {
@ -15964,6 +16097,12 @@ export interface NodesSerializedClusterStateDetail {
compressed_size_in_bytes?: long
}
export interface NodesSizeHttpHistogram {
count: long
ge_bytes?: long
lt_bytes?: long
}
export interface NodesStats {
adaptive_selection?: Record<string, NodesAdaptiveSelection>
breakers?: Record<string, NodesBreaker>
@ -15998,6 +16137,12 @@ export interface NodesThreadCount {
threads?: long
}
export interface NodesTimeHttpHistogram {
count: long
ge_millis?: long
lt_millis?: long
}
export interface NodesTransport {
inbound_handling_time_histogram?: NodesTransportHistogram[]
outbound_handling_time_histogram?: NodesTransportHistogram[]
@ -18754,7 +18899,7 @@ export interface TextStructureFindStructureRequest<TJsonDocument = unknown> {
ecs_compatibility?: string
explain?: boolean
format?: string
grok_pattern?: string
grok_pattern?: GrokPattern
has_header_row?: boolean
line_merge_size_limit?: uint
lines_to_sample?: uint
@ -18781,7 +18926,7 @@ export interface TextStructureFindStructureResponse {
num_lines_analyzed: integer
column_names?: string[]
explanation?: string[]
grok_pattern?: string
grok_pattern?: GrokPattern
multiline_start_pattern?: string
exclude_lines_pattern?: string
java_timestamp_formats?: string[]
@ -18809,7 +18954,7 @@ export interface TextStructureTestGrokPatternMatchedText {
export interface TextStructureTestGrokPatternRequest extends RequestBase {
ecs_compatibility?: string
grok_pattern: string
grok_pattern: GrokPattern
text: string[]
}

View File

@ -917,9 +917,14 @@ export interface OpenPointInTimeRequest extends RequestBase {
preference?: string
routing?: Routing
expand_wildcards?: ExpandWildcards
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
index_filter?: QueryDslQueryContainer
}
}
export interface OpenPointInTimeResponse {
_shards: ShardStatistics
id: Id
}
@ -1809,11 +1814,23 @@ export interface SearchShardsRequest extends RequestBase {
}
export interface SearchShardsResponse {
nodes: Record<string, NodeAttributes>
nodes: Record<NodeId, SearchShardsSearchShardsNodeAttributes>
shards: NodeShard[][]
indices: Record<IndexName, SearchShardsShardStoreIndex>
}
export interface SearchShardsSearchShardsNodeAttributes {
name: NodeName
ephemeral_id: Id
transport_address: TransportAddress
external_id: string
attributes: Record<string, string>
roles: NodeRoles
version: VersionString
min_index_version: integer
max_index_version: integer
}
export interface SearchShardsShardStoreIndex {
aliases?: Name[]
filter?: QueryDslQueryContainer
@ -2316,6 +2333,8 @@ export interface GetStats {
total: long
}
export type GrokPattern = string
export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED'
export type Host = string
@ -2470,8 +2489,6 @@ export interface NodeAttributes {
id?: NodeId
name: NodeName
transport_address: TransportAddress
roles?: NodeRoles
external_id?: string
}
export type NodeId = string
@ -2921,7 +2938,7 @@ export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMulti
export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys
& { [property: string]: AggregationsAggregate | string | long }
export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate
export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate
export interface AggregationsAggregateBase {
meta?: Metadata
@ -3006,6 +3023,7 @@ export interface AggregationsAggregationContainer {
sum?: AggregationsSumAggregation
sum_bucket?: AggregationsSumBucketAggregation
terms?: AggregationsTermsAggregation
time_series?: AggregationsTimeSeriesAggregation
top_hits?: AggregationsTopHitsAggregation
t_test?: AggregationsTTestAggregation
top_metrics?: AggregationsTopMetricsAggregation
@ -3015,9 +3033,9 @@ export interface AggregationsAggregationContainer {
}
export interface AggregationsAggregationRange {
from?: double
from?: double | null
key?: string
to?: double
to?: double | null
}
export interface AggregationsArrayPercentilesItem {
@ -4202,6 +4220,20 @@ export interface AggregationsTestPopulation {
filter?: QueryDslQueryContainer
}
export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucketAggregateBase<AggregationsTimeSeriesBucket> {
}
export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase {
size?: integer
keyed?: boolean
}
export interface AggregationsTimeSeriesBucketKeys extends AggregationsMultiBucketBase {
key: Record<Field, FieldValue>
}
export type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys
& { [property: string]: AggregationsAggregate | Record<Field, FieldValue> | long }
export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase {
hits: SearchHitsMetadata<any>
}
@ -4382,6 +4414,11 @@ export interface AnalysisCjkAnalyzer {
stopwords_path?: string
}
export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase {
type: 'classic'
max_token_length?: integer
}
export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase {
type: 'common_grams'
common_words?: string[]
@ -4468,7 +4505,7 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase {
custom_token_chars?: string
max_gram: integer
min_gram: integer
token_chars: AnalysisTokenChar[]
token_chars?: AnalysisTokenChar[]
}
export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase {
@ -4811,7 +4848,7 @@ export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase {
custom_token_chars?: string
max_gram: integer
min_gram: integer
token_chars: AnalysisTokenChar[]
token_chars?: AnalysisTokenChar[]
}
export interface AnalysisNoriAnalyzer {
@ -4976,6 +5013,16 @@ export interface AnalysisSimpleAnalyzer {
version?: VersionString
}
export interface AnalysisSimplePatternSplitTokenizer extends AnalysisTokenizerBase {
type: 'simple_pattern_split'
pattern?: string
}
export interface AnalysisSimplePatternTokenizer extends AnalysisTokenizerBase {
type: 'simple_pattern'
pattern?: string
}
export interface AnalysisSnowballAnalyzer {
type: 'snowball'
version?: VersionString
@ -5083,6 +5130,10 @@ export interface AnalysisThaiAnalyzer {
stopwords_path?: string
}
export interface AnalysisThaiTokenizer extends AnalysisTokenizerBase {
type: 'thai'
}
export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom'
export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition
@ -5099,7 +5150,7 @@ export interface AnalysisTokenizerBase {
version?: VersionString
}
export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisNoriTokenizer | AnalysisPathHierarchyTokenizer | AnalysisStandardTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisKuromojiTokenizer | AnalysisPatternTokenizer | AnalysisIcuTokenizer
export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisClassicTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisPathHierarchyTokenizer | AnalysisPatternTokenizer | AnalysisSimplePatternTokenizer | AnalysisSimplePatternSplitTokenizer | AnalysisStandardTokenizer | AnalysisThaiTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisIcuTokenizer | AnalysisKuromojiTokenizer | AnalysisNoriTokenizer
export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase {
type: 'trim'
@ -5224,6 +5275,10 @@ export interface MappingCompletionProperty extends MappingDocValuesPropertyBase
type: 'completion'
}
export interface MappingCompositeSubField {
type: MappingRuntimeFieldType
}
export interface MappingConstantKeywordProperty extends MappingPropertyBase {
value?: any
type: 'constant_keyword'
@ -5567,6 +5622,7 @@ export interface MappingRoutingField {
}
export interface MappingRuntimeField {
fields?: Record<string, MappingCompositeSubField>
fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[]
format?: string
input_field?: Field
@ -6761,39 +6817,39 @@ export interface CatAliasesRequest extends CatCatRequestBase {
export type CatAliasesResponse = CatAliasesAliasesRecord[]
export interface CatAllocationAllocationRecord {
shards: string
s: string
'shards.undesired': string | null
'write_load.forecast': double | null
wlf: double | null
writeLoadForecast: double | null
'disk.indices.forecast': ByteSize | null
dif: ByteSize | null
diskIndicesForecast: ByteSize | null
'disk.indices': ByteSize | null
di: ByteSize | null
diskIndices: ByteSize | null
'disk.used': ByteSize | null
du: ByteSize | null
diskUsed: ByteSize | null
'disk.avail': ByteSize | null
da: ByteSize | null
diskAvail: ByteSize | null
'disk.total': ByteSize | null
dt: ByteSize | null
diskTotal: ByteSize | null
'disk.percent': Percentage | null
dp: Percentage | null
diskPercent: Percentage | null
host: Host | null
h: Host | null
ip: Ip | null
node: string
n: string
'node.role': string | null
r: string | null
role: string | null
nodeRole: string | null
shards?: string
s?: string
'shards.undesired'?: string | null
'write_load.forecast'?: SpecUtilsStringified<double> | null
wlf?: SpecUtilsStringified<double> | null
writeLoadForecast?: SpecUtilsStringified<double> | null
'disk.indices.forecast'?: ByteSize | null
dif?: ByteSize | null
diskIndicesForecast?: ByteSize | null
'disk.indices'?: ByteSize | null
di?: ByteSize | null
diskIndices?: ByteSize | null
'disk.used'?: ByteSize | null
du?: ByteSize | null
diskUsed?: ByteSize | null
'disk.avail'?: ByteSize | null
da?: ByteSize | null
diskAvail?: ByteSize | null
'disk.total'?: ByteSize | null
dt?: ByteSize | null
diskTotal?: ByteSize | null
'disk.percent'?: Percentage | null
dp?: Percentage | null
diskPercent?: Percentage | null
host?: Host | null
h?: Host | null
ip?: Ip | null
node?: string
n?: string
'node.role'?: string | null
r?: string | null
role?: string | null
nodeRole?: string | null
}
export interface CatAllocationRequest extends CatCatRequestBase {
@ -6892,6 +6948,10 @@ export interface CatHealthHealthRecord {
i?: string
'shards.initializing'?: string
shardsInitializing?: string
'unassign.pri'?: string
up?: string
'shards.unassigned.primary'?: string
shardsUnassignedPrimary?: string
unassign?: string
u?: string
'shards.unassigned'?: string
@ -6955,6 +7015,7 @@ export interface CatIndicesIndicesRecord {
ss?: string | null
storeSize?: string | null
'pri.store.size'?: string | null
'dataset.size'?: string | null
'completion.size'?: string
cs?: string
completionSize?: string
@ -8072,6 +8133,7 @@ export interface CatShardsShardsRecord {
dc?: string | null
store?: string | null
sto?: string | null
dataset?: string | null
ip?: string | null
id?: string
node?: string | null
@ -8806,6 +8868,7 @@ export interface ClusterAllocationExplainClusterInfo {
export interface ClusterAllocationExplainCurrentNode {
id: Id
name: Name
roles: NodeRoles
attributes: Record<string, string>
transport_address: TransportAddress
weight_ranking: integer
@ -8828,6 +8891,7 @@ export interface ClusterAllocationExplainNodeAllocationExplanation {
node_decision: ClusterAllocationExplainDecision
node_id: Id
node_name: Name
roles: NodeRoles
store?: ClusterAllocationExplainAllocationStore
transport_address: TransportAddress
weight_ranking: integer
@ -8962,6 +9026,7 @@ export interface ClusterHealthHealthResponseBody {
task_max_waiting_in_queue?: Duration
task_max_waiting_in_queue_millis: DurationValue<UnitMillis>
timed_out: boolean
unassigned_primary_shards: integer
unassigned_shards: integer
}
@ -8975,6 +9040,7 @@ export interface ClusterHealthIndexHealthStats {
shards?: Record<string, ClusterHealthShardHealthStats>
status: HealthStatus
unassigned_shards: integer
unassigned_primary_shards: integer
}
export interface ClusterHealthRequest extends RequestBase {
@ -9001,6 +9067,7 @@ export interface ClusterHealthShardHealthStats {
relocating_shards: integer
status: HealthStatus
unassigned_shards: integer
unassigned_primary_shards: integer
}
export interface ClusterInfoRequest extends RequestBase {
@ -10068,8 +10135,11 @@ export interface EnrichStatsCacheStats {
node_id: Id
count: integer
hits: integer
hits_time_in_millis: DurationValue<UnitMillis>
misses: integer
misses_time_in_millis: DurationValue<UnitMillis>
evictions: integer
size_in_bytes: long
}
export interface EnrichStatsCoordinatorStats {
@ -10192,8 +10262,10 @@ export type EsqlTableValuesLongDouble = double | double[]
export type EsqlTableValuesLongValue = long | long[]
export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow'
export interface EsqlQueryRequest extends RequestBase {
format?: string
format?: EsqlQueryEsqlFormat
delimiter?: string
drop_null_columns?: boolean
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
@ -11630,6 +11702,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase {
expand_wildcards?: ExpandWildcards
include_defaults?: boolean
master_timeout?: Duration
verbose?: boolean
}
export interface IndicesGetDataStreamResponse {
@ -12033,6 +12106,8 @@ export type IndicesResolveClusterResponse = Record<ClusterAlias, IndicesResolveC
export interface IndicesResolveIndexRequest extends RequestBase {
name: Names
expand_wildcards?: ExpandWildcards
ignore_unavailable?: boolean
allow_no_indices?: boolean
}
export interface IndicesResolveIndexResolveIndexAliasItem {
@ -12622,7 +12697,7 @@ export type InferencePutResponse = InferenceInferenceEndpointInfo
export interface IngestAppendProcessor extends IngestProcessorBase {
field: Field
value: any[]
value: any | any[]
allow_duplicates?: boolean
}
@ -12702,6 +12777,7 @@ export interface IngestDissectProcessor extends IngestProcessorBase {
export interface IngestDotExpanderProcessor extends IngestProcessorBase {
field: Field
override?: boolean
path?: string
}
@ -12728,6 +12804,22 @@ export interface IngestForeachProcessor extends IngestProcessorBase {
processor: IngestProcessorContainer
}
export interface IngestGeoGridProcessor extends IngestProcessorBase {
field: string
tile_type: IngestGeoGridTileType
target_field?: Field
parent_field?: Field
children_field?: Field
non_children_field?: Field
precision_field?: Field
ignore_missing?: boolean
target_format?: IngestGeoGridTargetFormat
}
export type IngestGeoGridTargetFormat = 'geojson' | 'wkt'
export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash'
export interface IngestGeoIpProcessor extends IngestProcessorBase {
database_file?: string
field: Field
@ -12735,13 +12827,14 @@ export interface IngestGeoIpProcessor extends IngestProcessorBase {
ignore_missing?: boolean
properties?: string[]
target_field?: Field
download_database_on_pipeline_creation?: boolean
}
export interface IngestGrokProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
pattern_definitions?: Record<string, string>
patterns: string[]
patterns: GrokPattern[]
trace_match?: boolean
}
@ -12829,6 +12922,7 @@ export interface IngestPipeline {
on_failure?: IngestProcessorContainer[]
processors?: IngestProcessorContainer[]
version?: VersionNumber
deprecated?: boolean
_meta?: Metadata
}
@ -12866,6 +12960,7 @@ export interface IngestProcessorContainer {
enrich?: IngestEnrichProcessor
fail?: IngestFailProcessor
foreach?: IngestForeachProcessor
geo_grid?: IngestGeoGridProcessor
geoip?: IngestGeoIpProcessor
grok?: IngestGrokProcessor
gsub?: IngestGsubProcessor
@ -12876,6 +12971,7 @@ export interface IngestProcessorContainer {
kv?: IngestKeyValueProcessor
lowercase?: IngestLowercaseProcessor
pipeline?: IngestPipelineProcessor
redact?: IngestRedactProcessor
remove?: IngestRemoveProcessor
rename?: IngestRenameProcessor
reroute?: IngestRerouteProcessor
@ -12891,6 +12987,16 @@ export interface IngestProcessorContainer {
user_agent?: IngestUserAgentProcessor
}
export interface IngestRedactProcessor extends IngestProcessorBase {
field: Field
patterns: GrokPattern[]
pattern_definitions?: Record<string, string>
prefix?: string
suffix?: string
ignore_missing?: boolean
skip_if_unlicensed?: boolean
}
export interface IngestRemoveProcessor extends IngestProcessorBase {
field: Fields
keep?: Fields
@ -12975,12 +13081,13 @@ export interface IngestUrlDecodeProcessor extends IngestProcessorBase {
export interface IngestUserAgentProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
options?: IngestUserAgentProperty[]
regex_file?: string
target_field?: Field
properties?: IngestUserAgentProperty[]
extract_device_type?: boolean
}
export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD'
export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version'
export interface IngestDeleteGeoipDatabaseRequest extends RequestBase {
id: Ids
@ -13080,6 +13187,7 @@ export interface IngestPutPipelineRequest extends RequestBase {
on_failure?: IngestProcessorContainer[]
processors?: IngestProcessorContainer[]
version?: VersionNumber
deprecated?: boolean
}
}
@ -13491,7 +13599,7 @@ export type MlCategorizationStatus = 'ok' | 'warn'
export interface MlCategory {
category_id: ulong
examples: string[]
grok_pattern?: string
grok_pattern?: GrokPattern
job_id: Id
max_matching_length: ulong
partition_field_name?: string
@ -16088,6 +16196,25 @@ export interface NodesHttp {
current_open?: integer
total_opened?: long
clients?: NodesClient[]
routes: Record<string, NodesHttpRoute>
}
export interface NodesHttpRoute {
requests: NodesHttpRouteRequests
responses: NodesHttpRouteResponses
}
export interface NodesHttpRouteRequests {
count: long
total_size_in_bytes: long
size_histogram: NodesSizeHttpHistogram[]
}
export interface NodesHttpRouteResponses {
count: long
total_size_in_bytes: long
handling_time_histogram: NodesTimeHttpHistogram[]
size_histogram: NodesSizeHttpHistogram[]
}
export interface NodesIndexingPressure {
@ -16102,16 +16229,25 @@ export interface NodesIndexingPressureMemory {
}
export interface NodesIngest {
pipelines?: Record<string, NodesIngestTotal>
pipelines?: Record<string, NodesIngestStats>
total?: NodesIngestTotal
}
export interface NodesIngestStats {
count: long
current: long
failed: long
processors: Record<string, NodesKeyedProcessor>[]
time_in_millis: DurationValue<UnitMillis>
ingested_as_first_pipeline_in_bytes: long
produced_as_first_pipeline_in_bytes: long
}
export interface NodesIngestTotal {
count?: long
current?: long
failed?: long
processors?: Record<string, NodesKeyedProcessor>[]
time_in_millis?: DurationValue<UnitMillis>
count: long
current: long
failed: long
time_in_millis: DurationValue<UnitMillis>
}
export interface NodesIoStatDevice {
@ -16316,6 +16452,12 @@ export interface NodesSerializedClusterStateDetail {
compressed_size_in_bytes?: long
}
export interface NodesSizeHttpHistogram {
count: long
ge_bytes?: long
lt_bytes?: long
}
export interface NodesStats {
adaptive_selection?: Record<string, NodesAdaptiveSelection>
breakers?: Record<string, NodesBreaker>
@ -16350,6 +16492,12 @@ export interface NodesThreadCount {
threads?: long
}
export interface NodesTimeHttpHistogram {
count: long
ge_millis?: long
lt_millis?: long
}
export interface NodesTransport {
inbound_handling_time_histogram?: NodesTransportHistogram[]
outbound_handling_time_histogram?: NodesTransportHistogram[]
@ -19236,7 +19384,7 @@ export interface TextStructureFindStructureRequest<TJsonDocument = unknown> {
ecs_compatibility?: string
explain?: boolean
format?: string
grok_pattern?: string
grok_pattern?: GrokPattern
has_header_row?: boolean
line_merge_size_limit?: uint
lines_to_sample?: uint
@ -19264,7 +19412,7 @@ export interface TextStructureFindStructureResponse {
num_lines_analyzed: integer
column_names?: string[]
explanation?: string[]
grok_pattern?: string
grok_pattern?: GrokPattern
multiline_start_pattern?: string
exclude_lines_pattern?: string
java_timestamp_formats?: string[]
@ -19294,7 +19442,7 @@ export interface TextStructureTestGrokPatternRequest extends RequestBase {
ecs_compatibility?: string
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
grok_pattern: string
grok_pattern: GrokPattern
text: string[]
}
}