From aa9249bf2575678c321010e775027e4068301a89 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 31 Mar 2025 17:11:34 +0100 Subject: [PATCH] Auto-generated API code (#2688) --- docs/reference.asciidoc | 4 ++-- src/api/types.ts | 32 +++++++++++++++++++++++++++++--- src/api/typesWithBodyKey.ts | 32 +++++++++++++++++++++++++++++--- 3 files changed, 60 insertions(+), 8 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 87db4a2e9..9aee853ec 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -6850,7 +6850,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. ** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -9712,7 +9712,7 @@ specified. ** *`definition` (Optional, { preprocessors, trained_model })*: The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. ** *`description` (Optional, string)*: A human-readable description of the inference trained model. -** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The default configuration for inference. This can be either a regression +** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, learning_to_rank, ner, pass_through, text_embedding, text_expansion, question_answering })*: The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. diff --git a/src/api/types.ts b/src/api/types.ts index 552b11dbd..c37ab2e7f 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -5297,6 +5297,10 @@ export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { fielddata?: IndicesNumericFielddata index?: boolean null_value?: boolean + ignore_malformed?: boolean + script?: Script | string + on_script_error?: MappingOnScriptError + time_series_dimension?: boolean type: 'boolean' } @@ -10165,10 +10169,11 @@ export interface EnrichDeletePolicyRequest extends RequestBase { export type EnrichDeletePolicyResponse = AcknowledgedResponseBase -export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' +export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' | 'CANCELLED' export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { phase: EnrichExecutePolicyEnrichPolicyPhase + step?: string } export interface EnrichExecutePolicyRequest extends RequestBase { @@ -10178,7 +10183,7 @@ export interface EnrichExecutePolicyRequest extends RequestBase { export interface EnrichExecutePolicyResponse { status?: EnrichExecutePolicyExecuteEnrichPolicyStatus - task_id?: TaskId + task?: TaskId } export interface EnrichGetPolicyRequest extends RequestBase { @@ -12674,7 +12679,7 @@ export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] -export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint { +export interface InferenceInferenceChunkingSettings { max_chunk_size?: integer overlap?: integer sentence_overlap?: integer @@ -14269,6 +14274,8 @@ export interface MlExponentialAverageCalculationContext { previous_exponential_average_ms?: DurationValue } +export type MlFeatureExtractor = MlQueryFeatureExtractor + export interface MlFillMaskInferenceOptions { mask_token?: string num_top_classes?: integer @@ -14334,6 +14341,7 @@ export interface MlInferenceConfigCreateContainer { text_classification?: MlTextClassificationInferenceOptions zero_shot_classification?: MlZeroShotClassificationInferenceOptions fill_mask?: MlFillMaskInferenceOptions + learning_to_rank?: MlLearningToRankConfig ner?: MlNerInferenceOptions pass_through?: MlPassThroughInferenceOptions text_embedding?: MlTextEmbeddingInferenceOptions @@ -14480,6 +14488,12 @@ export interface MlJobTimingStats { minimum_bucket_processing_time_ms?: DurationValue } +export interface MlLearningToRankConfig { + default_params?: Record + feature_extractors?: Record[] + num_top_feature_importance_values: integer +} + export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' export interface MlModelPackageConfig { @@ -14622,6 +14636,12 @@ export interface MlPerPartitionCategorization { export type MlPredictedValue = ScalarValue | ScalarValue[] +export interface MlQueryFeatureExtractor { + default_score?: float + feature_name: string + query: QueryDslQueryContainer +} + export interface MlQuestionAnsweringInferenceOptions { num_top_classes?: integer tokenization?: MlTokenizationConfigContainer @@ -14666,6 +14686,7 @@ export interface MlTextClassificationInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string classification_labels?: string[] + vocabulary?: MlVocabulary } export interface MlTextClassificationInferenceUpdateOptions { @@ -14708,6 +14729,7 @@ export interface MlTokenizationConfigContainer { bert_ja?: MlNlpBertTokenizationConfig mpnet?: MlNlpBertTokenizationConfig roberta?: MlNlpRobertaTokenizationConfig + xlm_roberta?: MlXlmRobertaTokenizationConfig } export type MlTokenizationTruncate = 'first' | 'second' | 'none' @@ -14785,6 +14807,7 @@ export interface MlTrainedModelConfig { model_size_bytes?: ByteSize model_package?: MlModelPackageConfig location?: MlTrainedModelLocation + platform_architecture?: string prefix_strings?: MlTrainedModelPrefixStrings } @@ -14920,6 +14943,9 @@ export interface MlVocabulary { index: IndexName } +export interface MlXlmRobertaTokenizationConfig extends MlCommonTokenizationConfig { +} + export interface MlZeroShotClassificationInferenceOptions { tokenization?: MlTokenizationConfigContainer hypothesis_template?: string diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 3d667c22e..721119f5b 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -5374,6 +5374,10 @@ export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { fielddata?: IndicesNumericFielddata index?: boolean null_value?: boolean + ignore_malformed?: boolean + script?: Script | string + on_script_error?: MappingOnScriptError + time_series_dimension?: boolean type: 'boolean' } @@ -10330,10 +10334,11 @@ export interface EnrichDeletePolicyRequest extends RequestBase { export type EnrichDeletePolicyResponse = AcknowledgedResponseBase -export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' +export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' | 'CANCELLED' export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { phase: EnrichExecutePolicyEnrichPolicyPhase + step?: string } export interface EnrichExecutePolicyRequest extends RequestBase { @@ -10343,7 +10348,7 @@ export interface EnrichExecutePolicyRequest extends RequestBase { export interface EnrichExecutePolicyResponse { status?: EnrichExecutePolicyExecuteEnrichPolicyStatus - task_id?: TaskId + task?: TaskId } export interface EnrichGetPolicyRequest extends RequestBase { @@ -12914,7 +12919,7 @@ export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] -export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint { +export interface InferenceInferenceChunkingSettings { max_chunk_size?: integer overlap?: integer sentence_overlap?: integer @@ -14531,6 +14536,8 @@ export interface MlExponentialAverageCalculationContext { previous_exponential_average_ms?: DurationValue } +export type MlFeatureExtractor = MlQueryFeatureExtractor + export interface MlFillMaskInferenceOptions { mask_token?: string num_top_classes?: integer @@ -14596,6 +14603,7 @@ export interface MlInferenceConfigCreateContainer { text_classification?: MlTextClassificationInferenceOptions zero_shot_classification?: MlZeroShotClassificationInferenceOptions fill_mask?: MlFillMaskInferenceOptions + learning_to_rank?: MlLearningToRankConfig ner?: MlNerInferenceOptions pass_through?: MlPassThroughInferenceOptions text_embedding?: MlTextEmbeddingInferenceOptions @@ -14742,6 +14750,12 @@ export interface MlJobTimingStats { minimum_bucket_processing_time_ms?: DurationValue } +export interface MlLearningToRankConfig { + default_params?: Record + feature_extractors?: Record[] + num_top_feature_importance_values: integer +} + export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' export interface MlModelPackageConfig { @@ -14884,6 +14898,12 @@ export interface MlPerPartitionCategorization { export type MlPredictedValue = ScalarValue | ScalarValue[] +export interface MlQueryFeatureExtractor { + default_score?: float + feature_name: string + query: QueryDslQueryContainer +} + export interface MlQuestionAnsweringInferenceOptions { num_top_classes?: integer tokenization?: MlTokenizationConfigContainer @@ -14928,6 +14948,7 @@ export interface MlTextClassificationInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string classification_labels?: string[] + vocabulary?: MlVocabulary } export interface MlTextClassificationInferenceUpdateOptions { @@ -14970,6 +14991,7 @@ export interface MlTokenizationConfigContainer { bert_ja?: MlNlpBertTokenizationConfig mpnet?: MlNlpBertTokenizationConfig roberta?: MlNlpRobertaTokenizationConfig + xlm_roberta?: MlXlmRobertaTokenizationConfig } export type MlTokenizationTruncate = 'first' | 'second' | 'none' @@ -15047,6 +15069,7 @@ export interface MlTrainedModelConfig { model_size_bytes?: ByteSize model_package?: MlModelPackageConfig location?: MlTrainedModelLocation + platform_architecture?: string prefix_strings?: MlTrainedModelPrefixStrings } @@ -15182,6 +15205,9 @@ export interface MlVocabulary { index: IndexName } +export interface MlXlmRobertaTokenizationConfig extends MlCommonTokenizationConfig { +} + export interface MlZeroShotClassificationInferenceOptions { tokenization?: MlTokenizationConfigContainer hypothesis_template?: string