Auto-generated code for 8.13 (#2223)
This commit is contained in:
@ -3280,7 +3280,7 @@ This could be a built-in analyzer, or an analyzer that’s been configured in th
|
|||||||
** *`field` (Optional, string)*: Field used to derive the analyzer.
|
** *`field` (Optional, string)*: Field used to derive the analyzer.
|
||||||
To use this parameter, you must specify an index.
|
To use this parameter, you must specify an index.
|
||||||
If specified, the `analyzer` parameter overrides this value.
|
If specified, the `analyzer` parameter overrides this value.
|
||||||
** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, rule_files } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer.
|
** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, rule_files } | { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer.
|
||||||
** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token.
|
** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token.
|
||||||
** *`text` (Optional, string | string[])*: Text to analyze.
|
** *`text` (Optional, string | string[])*: Text to analyze.
|
||||||
If an array of strings is provided, it is analyzed as a multi-value field.
|
If an array of strings is provided, it is analyzed as a multi-value field.
|
||||||
|
|||||||
@ -2285,7 +2285,6 @@ export interface KnnQuery extends QueryDslQueryBase {
|
|||||||
query_vector?: QueryVector
|
query_vector?: QueryVector
|
||||||
query_vector_builder?: QueryVectorBuilder
|
query_vector_builder?: QueryVectorBuilder
|
||||||
num_candidates?: long
|
num_candidates?: long
|
||||||
boost?: float
|
|
||||||
filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
|
filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
|
||||||
similarity?: float
|
similarity?: float
|
||||||
}
|
}
|
||||||
@ -2418,14 +2417,14 @@ export interface PluginStats {
|
|||||||
export type PropertyName = string
|
export type PropertyName = string
|
||||||
|
|
||||||
export interface QueryCacheStats {
|
export interface QueryCacheStats {
|
||||||
cache_count: integer
|
cache_count: long
|
||||||
cache_size: integer
|
cache_size: long
|
||||||
evictions: integer
|
evictions: long
|
||||||
hit_count: integer
|
hit_count: long
|
||||||
memory_size?: ByteSize
|
memory_size?: ByteSize
|
||||||
memory_size_in_bytes: long
|
memory_size_in_bytes: long
|
||||||
miss_count: integer
|
miss_count: long
|
||||||
total_count: integer
|
total_count: long
|
||||||
}
|
}
|
||||||
|
|
||||||
export type QueryVector = float[]
|
export type QueryVector = float[]
|
||||||
@ -4692,6 +4691,7 @@ export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase
|
|||||||
lenient?: boolean
|
lenient?: boolean
|
||||||
synonyms?: string[]
|
synonyms?: string[]
|
||||||
synonyms_path?: string
|
synonyms_path?: string
|
||||||
|
synonyms_set?: string
|
||||||
tokenizer?: string
|
tokenizer?: string
|
||||||
updateable?: boolean
|
updateable?: boolean
|
||||||
}
|
}
|
||||||
@ -4703,6 +4703,7 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase {
|
|||||||
lenient?: boolean
|
lenient?: boolean
|
||||||
synonyms?: string[]
|
synonyms?: string[]
|
||||||
synonyms_path?: string
|
synonyms_path?: string
|
||||||
|
synonyms_set?: string
|
||||||
tokenizer?: string
|
tokenizer?: string
|
||||||
updateable?: boolean
|
updateable?: boolean
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2358,7 +2358,6 @@ export interface KnnQuery extends QueryDslQueryBase {
|
|||||||
query_vector?: QueryVector
|
query_vector?: QueryVector
|
||||||
query_vector_builder?: QueryVectorBuilder
|
query_vector_builder?: QueryVectorBuilder
|
||||||
num_candidates?: long
|
num_candidates?: long
|
||||||
boost?: float
|
|
||||||
filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
|
filter?: QueryDslQueryContainer | QueryDslQueryContainer[]
|
||||||
similarity?: float
|
similarity?: float
|
||||||
}
|
}
|
||||||
@ -2491,14 +2490,14 @@ export interface PluginStats {
|
|||||||
export type PropertyName = string
|
export type PropertyName = string
|
||||||
|
|
||||||
export interface QueryCacheStats {
|
export interface QueryCacheStats {
|
||||||
cache_count: integer
|
cache_count: long
|
||||||
cache_size: integer
|
cache_size: long
|
||||||
evictions: integer
|
evictions: long
|
||||||
hit_count: integer
|
hit_count: long
|
||||||
memory_size?: ByteSize
|
memory_size?: ByteSize
|
||||||
memory_size_in_bytes: long
|
memory_size_in_bytes: long
|
||||||
miss_count: integer
|
miss_count: long
|
||||||
total_count: integer
|
total_count: long
|
||||||
}
|
}
|
||||||
|
|
||||||
export type QueryVector = float[]
|
export type QueryVector = float[]
|
||||||
@ -4765,6 +4764,7 @@ export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase
|
|||||||
lenient?: boolean
|
lenient?: boolean
|
||||||
synonyms?: string[]
|
synonyms?: string[]
|
||||||
synonyms_path?: string
|
synonyms_path?: string
|
||||||
|
synonyms_set?: string
|
||||||
tokenizer?: string
|
tokenizer?: string
|
||||||
updateable?: boolean
|
updateable?: boolean
|
||||||
}
|
}
|
||||||
@ -4776,6 +4776,7 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase {
|
|||||||
lenient?: boolean
|
lenient?: boolean
|
||||||
synonyms?: string[]
|
synonyms?: string[]
|
||||||
synonyms_path?: string
|
synonyms_path?: string
|
||||||
|
synonyms_set?: string
|
||||||
tokenizer?: string
|
tokenizer?: string
|
||||||
updateable?: boolean
|
updateable?: boolean
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user