Auto-generated API code (#2867)

This commit is contained in:
Elastic Machine
2025-06-09 17:29:04 +02:00
committed by GitHub
parent d1ba1423c8
commit 05b7ee6f5b
5 changed files with 178 additions and 21 deletions

View File

@ -1734,7 +1734,7 @@ client.search({ ... })
** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. ** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution.
** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The search definition using the Query DSL. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The search definition using the Query DSL.
** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. ** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases.
** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. ** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule, rescorer, linear, pinned })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`.
** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Retrieve a script evaluation (based on different fields) for each hit. ** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Retrieve a script evaluation (based on different fields) for each hit.
** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. ** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page.
** *`size` (Optional, number)*: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. ** *`size` (Optional, number)*: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property.
@ -7231,9 +7231,45 @@ Changes dynamic index settings in real time.
For data streams, index setting changes are applied to all backing indices by default. For data streams, index setting changes are applied to all backing indices by default.
To revert a setting to the default value, use a null value. To revert a setting to the default value, use a null value.
The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.
To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.
There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:
----
{
"number_of_replicas": 1
}
----
Or you can use an `index` setting object:
----
{
"index": {
"number_of_replicas": 1
}
}
----
Or you can use dot annotation:
----
{
"index.number_of_replicas": 1
}
----
Or you can embed any of the aforementioned options in a `settings` object. For example:
----
{
"settings": {
"index": {
"number_of_replicas": 1
}
}
}
----
NOTE: You can only define new analyzers on closed indices. NOTE: You can only define new analyzers on closed indices.
To add an analyzer, you must close the index, define the analyzer, and reopen the index. To add an analyzer, you must close the index, define the analyzer, and reopen the index.
You cannot close the write index of a data stream. You cannot close the write index of a data stream.
@ -8016,9 +8052,6 @@ Perform chat completion inference
The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
It only works with the `chat_completion` task type for `openai` and `elastic` inference services. It only works with the `chat_completion` task type for `openai` and `elastic` inference services.
IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming.
The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities.
The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support.
@ -8421,7 +8454,7 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser
==== Arguments ==== Arguments
* *Request (object):* * *Request (object):*
** *`task_type` (Enum("rerank" | "text_embedding"))*: The type of the inference task that the model will perform. ** *`task_type` (Enum("rerank" | "text_embedding" | "completion" | "chat_completion"))*: The type of the inference task that the model will perform.
** *`googlevertexai_inference_id` (string)*: The unique identifier of the inference endpoint. ** *`googlevertexai_inference_id` (string)*: The unique identifier of the inference endpoint.
** *`service` (Enum("googlevertexai"))*: The type of service supported for the specified task type. In this case, `googlevertexai`. ** *`service` (Enum("googlevertexai"))*: The type of service supported for the specified task type. In this case, `googlevertexai`.
** *`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })*: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. ** *`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })*: Settings used to install the inference model. These settings are specific to the `googlevertexai` service.

View File

@ -1854,7 +1854,7 @@ export default class Indices {
} }
/** /**
* Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { "number_of_replicas": 1 } ``` Or you can use an `index` setting object: ``` { "index": { "number_of_replicas": 1 } } ``` Or you can use dot annotation: ``` { "index.number_of_replicas": 1 } ``` Or you can embed any of the aforementioned options in a `settings` object. For example: ``` { "settings": { "index": { "number_of_replicas": 1 } } } ``` NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-update-settings.html | Elasticsearch API documentation} * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-update-settings.html | Elasticsearch API documentation}
*/ */
async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPutSettingsResponse> async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesPutSettingsResponse>

View File

@ -45,7 +45,7 @@ export default class Inference {
} }
/** /**
* Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API. * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/chat-completion-inference-api.html | Elasticsearch API documentation} * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/chat-completion-inference-api.html | Elasticsearch API documentation}
*/ */
async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceChatCompletionUnifiedResponse> async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceChatCompletionUnifiedResponse>

View File

@ -2243,7 +2243,7 @@ export type EpochTime<Unit = unknown> = Unit
export interface ErrorCauseKeys { export interface ErrorCauseKeys {
type: string type: string
reason?: string reason?: string | null
stack_trace?: string stack_trace?: string
caused_by?: ErrorCause caused_by?: ErrorCause
root_cause?: ErrorCause[] root_cause?: ErrorCause[]
@ -2426,6 +2426,12 @@ export interface InlineGetKeys<TDocument = unknown> {
export type InlineGet<TDocument = unknown> = InlineGetKeys<TDocument> export type InlineGet<TDocument = unknown> = InlineGetKeys<TDocument>
& { [property: string]: any } & { [property: string]: any }
export interface InnerRetriever {
retriever: RetrieverContainer
weight: float
normalizer: ScoreNormalizer
}
export type Ip = string export type Ip = string
export interface KnnQuery extends QueryDslQueryBase { export interface KnnQuery extends QueryDslQueryBase {
@ -2471,6 +2477,11 @@ export type Level = 'cluster' | 'indices' | 'shards'
export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED'
export interface LinearRetriever extends RetrieverBase {
retrievers?: InnerRetriever[]
rank_window_size: integer
}
export type MapboxVectorTiles = ArrayBuffer export type MapboxVectorTiles = ArrayBuffer
export interface MergesStats { export interface MergesStats {
@ -2559,6 +2570,13 @@ export type Password = string
export type Percentage = string | float export type Percentage = string | float
export interface PinnedRetriever extends RetrieverBase {
retriever: RetrieverContainer
ids?: string[]
docs?: SpecifiedDocument[]
rank_window_size: integer
}
export type PipelineName = string export type PipelineName = string
export interface PluginStats { export interface PluginStats {
@ -2644,6 +2662,11 @@ export interface RescoreVector {
oversample: float oversample: float
} }
export interface RescorerRetriever extends RetrieverBase {
retriever: RetrieverContainer
rescore: SearchRescore | SearchRescore[]
}
export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop'
export interface Retries { export interface Retries {
@ -2654,6 +2677,7 @@ export interface Retries {
export interface RetrieverBase { export interface RetrieverBase {
filter?: QueryDslQueryContainer | QueryDslQueryContainer[] filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
min_score?: float min_score?: float
_name?: string
} }
export interface RetrieverContainer { export interface RetrieverContainer {
@ -2662,6 +2686,9 @@ export interface RetrieverContainer {
rrf?: RRFRetriever rrf?: RRFRetriever
text_similarity_reranker?: TextSimilarityReranker text_similarity_reranker?: TextSimilarityReranker
rule?: RuleRetriever rule?: RuleRetriever
rescorer?: RescorerRetriever
linear?: LinearRetriever
pinned?: PinnedRetriever
} }
export type Routing = string export type Routing = string
@ -2672,7 +2699,7 @@ export interface RrfRank {
} }
export interface RuleRetriever extends RetrieverBase { export interface RuleRetriever extends RetrieverBase {
ruleset_ids: Id[] ruleset_ids: Id | Id[]
match_criteria: any match_criteria: any
retriever: RetrieverContainer retriever: RetrieverContainer
rank_window_size?: integer rank_window_size?: integer
@ -2680,6 +2707,8 @@ export interface RuleRetriever extends RetrieverBase {
export type ScalarValue = long | double | string | boolean | null export type ScalarValue = long | double | string | boolean | null
export type ScoreNormalizer = 'none' | 'minmax' | 'l2_norm'
export interface ScoreSort { export interface ScoreSort {
order?: SortOrder order?: SortOrder
} }
@ -2828,6 +2857,11 @@ export type SortOrder = 'asc' | 'desc'
export type SortResults = FieldValue[] export type SortResults = FieldValue[]
export interface SpecifiedDocument {
index?: IndexName
id: Id
}
export interface StandardRetriever extends RetrieverBase { export interface StandardRetriever extends RetrieverBase {
query?: QueryDslQueryContainer query?: QueryDslQueryContainer
search_after?: SortResults search_after?: SortResults
@ -6108,7 +6142,7 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys
export type QueryDslGeoExecution = 'memory' | 'indexed' export type QueryDslGeoExecution = 'memory' | 'indexed'
export interface QueryDslGeoGridQuery extends QueryDslQueryBase { export interface QueryDslGeoGridQuery extends QueryDslQueryBase {
geogrid?: GeoTile geotile?: GeoTile
geohash?: GeoHash geohash?: GeoHash
geohex?: GeoHexCell geohex?: GeoHexCell
} }
@ -6178,6 +6212,8 @@ export interface QueryDslIntervalsContainer {
fuzzy?: QueryDslIntervalsFuzzy fuzzy?: QueryDslIntervalsFuzzy
match?: QueryDslIntervalsMatch match?: QueryDslIntervalsMatch
prefix?: QueryDslIntervalsPrefix prefix?: QueryDslIntervalsPrefix
range?: QueryDslIntervalsRange
regexp?: QueryDslIntervalsRegexp
wildcard?: QueryDslIntervalsWildcard wildcard?: QueryDslIntervalsWildcard
} }
@ -6223,9 +6259,26 @@ export interface QueryDslIntervalsQuery extends QueryDslQueryBase {
fuzzy?: QueryDslIntervalsFuzzy fuzzy?: QueryDslIntervalsFuzzy
match?: QueryDslIntervalsMatch match?: QueryDslIntervalsMatch
prefix?: QueryDslIntervalsPrefix prefix?: QueryDslIntervalsPrefix
range?: QueryDslIntervalsRange
regexp?: QueryDslIntervalsRegexp
wildcard?: QueryDslIntervalsWildcard wildcard?: QueryDslIntervalsWildcard
} }
export interface QueryDslIntervalsRange {
analyzer?: string
gte?: string
gt?: string
lte?: string
lt?: string
use_field?: Field
}
export interface QueryDslIntervalsRegexp {
analyzer?: string
pattern: string
use_field?: Field
}
export interface QueryDslIntervalsWildcard { export interface QueryDslIntervalsWildcard {
analyzer?: string analyzer?: string
pattern: string pattern: string
@ -6543,7 +6596,8 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase {
export interface QueryDslRuleQuery extends QueryDslQueryBase { export interface QueryDslRuleQuery extends QueryDslQueryBase {
organic: QueryDslQueryContainer organic: QueryDslQueryContainer
ruleset_ids: Id[] ruleset_ids?: Id | Id[]
ruleset_id?: string
match_criteria: any match_criteria: any
} }
@ -13208,7 +13262,7 @@ export interface InferenceGoogleVertexAITaskSettings {
top_n?: integer top_n?: integer
} }
export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' | 'completion' | 'chat_completion'
export interface InferenceHuggingFaceServiceSettings { export interface InferenceHuggingFaceServiceSettings {
api_key: string api_key: string
@ -19900,6 +19954,14 @@ export interface SlmSnapshotLifecycle {
stats: SlmStatistics stats: SlmStatistics
} }
export interface SlmSnapshotPolicyStats {
policy: string
snapshots_taken: long
snapshots_failed: long
snapshots_deleted: long
snapshot_deletion_failures: long
}
export interface SlmStatistics { export interface SlmStatistics {
retention_deletion_time?: Duration retention_deletion_time?: Duration
retention_deletion_time_millis?: DurationValue<UnitMillis> retention_deletion_time_millis?: DurationValue<UnitMillis>
@ -19965,7 +20027,7 @@ export interface SlmGetStatsResponse {
total_snapshot_deletion_failures: long total_snapshot_deletion_failures: long
total_snapshots_failed: long total_snapshots_failed: long
total_snapshots_taken: long total_snapshots_taken: long
policy_stats: string[] policy_stats: SlmSnapshotPolicyStats[]
} }
export interface SlmGetStatusRequest extends RequestBase { export interface SlmGetStatusRequest extends RequestBase {

View File

@ -2320,7 +2320,7 @@ export type EpochTime<Unit = unknown> = Unit
export interface ErrorCauseKeys { export interface ErrorCauseKeys {
type: string type: string
reason?: string reason?: string | null
stack_trace?: string stack_trace?: string
caused_by?: ErrorCause caused_by?: ErrorCause
root_cause?: ErrorCause[] root_cause?: ErrorCause[]
@ -2503,6 +2503,12 @@ export interface InlineGetKeys<TDocument = unknown> {
export type InlineGet<TDocument = unknown> = InlineGetKeys<TDocument> export type InlineGet<TDocument = unknown> = InlineGetKeys<TDocument>
& { [property: string]: any } & { [property: string]: any }
export interface InnerRetriever {
retriever: RetrieverContainer
weight: float
normalizer: ScoreNormalizer
}
export type Ip = string export type Ip = string
export interface KnnQuery extends QueryDslQueryBase { export interface KnnQuery extends QueryDslQueryBase {
@ -2548,6 +2554,11 @@ export type Level = 'cluster' | 'indices' | 'shards'
export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED'
export interface LinearRetriever extends RetrieverBase {
retrievers?: InnerRetriever[]
rank_window_size: integer
}
export type MapboxVectorTiles = ArrayBuffer export type MapboxVectorTiles = ArrayBuffer
export interface MergesStats { export interface MergesStats {
@ -2636,6 +2647,13 @@ export type Password = string
export type Percentage = string | float export type Percentage = string | float
export interface PinnedRetriever extends RetrieverBase {
retriever: RetrieverContainer
ids?: string[]
docs?: SpecifiedDocument[]
rank_window_size: integer
}
export type PipelineName = string export type PipelineName = string
export interface PluginStats { export interface PluginStats {
@ -2721,6 +2739,11 @@ export interface RescoreVector {
oversample: float oversample: float
} }
export interface RescorerRetriever extends RetrieverBase {
retriever: RetrieverContainer
rescore: SearchRescore | SearchRescore[]
}
export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop'
export interface Retries { export interface Retries {
@ -2731,6 +2754,7 @@ export interface Retries {
export interface RetrieverBase { export interface RetrieverBase {
filter?: QueryDslQueryContainer | QueryDslQueryContainer[] filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
min_score?: float min_score?: float
_name?: string
} }
export interface RetrieverContainer { export interface RetrieverContainer {
@ -2739,6 +2763,9 @@ export interface RetrieverContainer {
rrf?: RRFRetriever rrf?: RRFRetriever
text_similarity_reranker?: TextSimilarityReranker text_similarity_reranker?: TextSimilarityReranker
rule?: RuleRetriever rule?: RuleRetriever
rescorer?: RescorerRetriever
linear?: LinearRetriever
pinned?: PinnedRetriever
} }
export type Routing = string export type Routing = string
@ -2749,7 +2776,7 @@ export interface RrfRank {
} }
export interface RuleRetriever extends RetrieverBase { export interface RuleRetriever extends RetrieverBase {
ruleset_ids: Id[] ruleset_ids: Id | Id[]
match_criteria: any match_criteria: any
retriever: RetrieverContainer retriever: RetrieverContainer
rank_window_size?: integer rank_window_size?: integer
@ -2757,6 +2784,8 @@ export interface RuleRetriever extends RetrieverBase {
export type ScalarValue = long | double | string | boolean | null export type ScalarValue = long | double | string | boolean | null
export type ScoreNormalizer = 'none' | 'minmax' | 'l2_norm'
export interface ScoreSort { export interface ScoreSort {
order?: SortOrder order?: SortOrder
} }
@ -2905,6 +2934,11 @@ export type SortOrder = 'asc' | 'desc'
export type SortResults = FieldValue[] export type SortResults = FieldValue[]
export interface SpecifiedDocument {
index?: IndexName
id: Id
}
export interface StandardRetriever extends RetrieverBase { export interface StandardRetriever extends RetrieverBase {
query?: QueryDslQueryContainer query?: QueryDslQueryContainer
search_after?: SortResults search_after?: SortResults
@ -6185,7 +6219,7 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys
export type QueryDslGeoExecution = 'memory' | 'indexed' export type QueryDslGeoExecution = 'memory' | 'indexed'
export interface QueryDslGeoGridQuery extends QueryDslQueryBase { export interface QueryDslGeoGridQuery extends QueryDslQueryBase {
geogrid?: GeoTile geotile?: GeoTile
geohash?: GeoHash geohash?: GeoHash
geohex?: GeoHexCell geohex?: GeoHexCell
} }
@ -6255,6 +6289,8 @@ export interface QueryDslIntervalsContainer {
fuzzy?: QueryDslIntervalsFuzzy fuzzy?: QueryDslIntervalsFuzzy
match?: QueryDslIntervalsMatch match?: QueryDslIntervalsMatch
prefix?: QueryDslIntervalsPrefix prefix?: QueryDslIntervalsPrefix
range?: QueryDslIntervalsRange
regexp?: QueryDslIntervalsRegexp
wildcard?: QueryDslIntervalsWildcard wildcard?: QueryDslIntervalsWildcard
} }
@ -6300,9 +6336,26 @@ export interface QueryDslIntervalsQuery extends QueryDslQueryBase {
fuzzy?: QueryDslIntervalsFuzzy fuzzy?: QueryDslIntervalsFuzzy
match?: QueryDslIntervalsMatch match?: QueryDslIntervalsMatch
prefix?: QueryDslIntervalsPrefix prefix?: QueryDslIntervalsPrefix
range?: QueryDslIntervalsRange
regexp?: QueryDslIntervalsRegexp
wildcard?: QueryDslIntervalsWildcard wildcard?: QueryDslIntervalsWildcard
} }
export interface QueryDslIntervalsRange {
analyzer?: string
gte?: string
gt?: string
lte?: string
lt?: string
use_field?: Field
}
export interface QueryDslIntervalsRegexp {
analyzer?: string
pattern: string
use_field?: Field
}
export interface QueryDslIntervalsWildcard { export interface QueryDslIntervalsWildcard {
analyzer?: string analyzer?: string
pattern: string pattern: string
@ -6620,7 +6673,8 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase {
export interface QueryDslRuleQuery extends QueryDslQueryBase { export interface QueryDslRuleQuery extends QueryDslQueryBase {
organic: QueryDslQueryContainer organic: QueryDslQueryContainer
ruleset_ids: Id[] ruleset_ids?: Id | Id[]
ruleset_id?: string
match_criteria: any match_criteria: any
} }
@ -13450,7 +13504,7 @@ export interface InferenceGoogleVertexAITaskSettings {
top_n?: integer top_n?: integer
} }
export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' | 'completion' | 'chat_completion'
export interface InferenceHuggingFaceServiceSettings { export interface InferenceHuggingFaceServiceSettings {
api_key: string api_key: string
@ -20479,6 +20533,14 @@ export interface SlmSnapshotLifecycle {
stats: SlmStatistics stats: SlmStatistics
} }
export interface SlmSnapshotPolicyStats {
policy: string
snapshots_taken: long
snapshots_failed: long
snapshots_deleted: long
snapshot_deletion_failures: long
}
export interface SlmStatistics { export interface SlmStatistics {
retention_deletion_time?: Duration retention_deletion_time?: Duration
retention_deletion_time_millis?: DurationValue<UnitMillis> retention_deletion_time_millis?: DurationValue<UnitMillis>
@ -20544,7 +20606,7 @@ export interface SlmGetStatsResponse {
total_snapshot_deletion_failures: long total_snapshot_deletion_failures: long
total_snapshots_failed: long total_snapshots_failed: long
total_snapshots_taken: long total_snapshots_taken: long
policy_stats: string[] policy_stats: SlmSnapshotPolicyStats[]
} }
export interface SlmGetStatusRequest extends RequestBase { export interface SlmGetStatusRequest extends RequestBase {