From 1eed52b2356a9fa28deb2646e705034bebe9899d Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 17 Apr 2024 18:22:45 +0200 Subject: [PATCH] Auto-generated code for 8.13 (#2223) --- docs/reference.asciidoc | 2 +- src/api/types.ts | 15 ++++++++------- src/api/typesWithBodyKey.ts | 15 ++++++++------- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 94b9ccaff..b175276a8 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -3280,7 +3280,7 @@ This could be a built-in analyzer, or an analyzer that’s been configured in th ** *`field` (Optional, string)*: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. -** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, rule_files } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. +** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, rule_files } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. ** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token. ** *`text` (Optional, string | string[])*: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. diff --git a/src/api/types.ts b/src/api/types.ts index e2a828909..9be5891fb 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -2285,7 +2285,6 @@ export interface KnnQuery extends QueryDslQueryBase { query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder num_candidates?: long - boost?: float filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float } @@ -2418,14 +2417,14 @@ export interface PluginStats { export type PropertyName = string export interface QueryCacheStats { - cache_count: integer - cache_size: integer - evictions: integer - hit_count: integer + cache_count: long + cache_size: long + evictions: long + hit_count: long memory_size?: ByteSize memory_size_in_bytes: long - miss_count: integer - total_count: integer + miss_count: long + total_count: long } export type QueryVector = float[] @@ -4692,6 +4691,7 @@ export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase lenient?: boolean synonyms?: string[] synonyms_path?: string + synonyms_set?: string tokenizer?: string updateable?: boolean } @@ -4703,6 +4703,7 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { lenient?: boolean synonyms?: string[] synonyms_path?: string + synonyms_set?: string tokenizer?: string updateable?: boolean } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index bdcc2f7e8..70c00df04 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -2358,7 +2358,6 @@ export interface KnnQuery extends QueryDslQueryBase { query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder num_candidates?: long - boost?: float filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float } @@ -2491,14 +2490,14 @@ export interface PluginStats { export type PropertyName = string export interface QueryCacheStats { - cache_count: integer - cache_size: integer - evictions: integer - hit_count: integer + cache_count: long + cache_size: long + evictions: long + hit_count: long memory_size?: ByteSize memory_size_in_bytes: long - miss_count: integer - total_count: integer + miss_count: long + total_count: long } export type QueryVector = float[] @@ -4765,6 +4764,7 @@ export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase lenient?: boolean synonyms?: string[] synonyms_path?: string + synonyms_set?: string tokenizer?: string updateable?: boolean } @@ -4776,6 +4776,7 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { lenient?: boolean synonyms?: string[] synonyms_path?: string + synonyms_set?: string tokenizer?: string updateable?: boolean }