15433 lines
349 KiB
Plaintext
15433 lines
349 KiB
Plaintext
[[reference-shared-types]]
|
||
|
||
////////
|
||
===========================================================================================================================
|
||
|| ||
|
||
|| ||
|
||
|| ||
|
||
|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ ||
|
||
|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ ||
|
||
|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ ||
|
||
|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ ||
|
||
|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ ||
|
||
|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ ||
|
||
|| ||
|
||
|| ||
|
||
|| This file is autogenerated, DO NOT send pull requests that changes this file directly. ||
|
||
|| You should update the script that does the generation, which can be found in: ||
|
||
|| https://github.com/elastic/elastic-client-generator-js ||
|
||
|| ||
|
||
|| You can run the script with the following command: ||
|
||
|| npm run elasticsearch -- --version <version> ||
|
||
|| ||
|
||
|| ||
|
||
|| ||
|
||
===========================================================================================================================
|
||
////////
|
||
|
||
|
||
|
||
== Shared types
|
||
|
||
|
||
[discrete]
|
||
[[AcknowledgedResponseBase]]
|
||
=== AcknowledgedResponseBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AcknowledgedResponseBase {
|
||
pass:[/**] @property acknowledged For a successful response, this value is always true. On failure, an exception is returned instead. */
|
||
acknowledged: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregateName]]
|
||
=== AggregateName
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregateName = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[BulkIndexByScrollFailure]]
|
||
=== BulkIndexByScrollFailure
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface BulkIndexByScrollFailure {
|
||
cause: <<ErrorCause>>
|
||
id: <<Id>>
|
||
index: <<IndexName>>
|
||
status: <<integer>>
|
||
type: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[BulkStats]]
|
||
=== BulkStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface BulkStats {
|
||
total_operations: <<long>>
|
||
total_time?: <<Duration>>
|
||
total_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
total_size?: <<ByteSize>>
|
||
total_size_in_bytes: <<long>>
|
||
avg_time?: <<Duration>>
|
||
avg_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
avg_size?: <<ByteSize>>
|
||
avg_size_in_bytes: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ByteSize]]
|
||
=== ByteSize
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ByteSize = <<long>> | string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Bytes]]
|
||
=== Bytes
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[CategoryId]]
|
||
=== CategoryId
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type CategoryId = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ClusterAlias]]
|
||
=== ClusterAlias
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ClusterAlias = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ClusterDetails]]
|
||
=== ClusterDetails
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ClusterDetails {
|
||
status: <<ClusterSearchStatus>>
|
||
indices: string
|
||
took?: <<DurationValue>><<<UnitMillis>>>
|
||
timed_out: boolean
|
||
_shards?: <<ShardStatistics>>
|
||
failures?: <<ShardFailure>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ClusterInfoTarget]]
|
||
=== ClusterInfoTarget
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ClusterInfoTarget = '_all' | 'http' | 'ingest' | 'thread_pool' | 'script'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ClusterInfoTargets]]
|
||
=== ClusterInfoTargets
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ClusterInfoTargets = <<ClusterInfoTarget>> | <<ClusterInfoTarget>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ClusterSearchStatus]]
|
||
=== ClusterSearchStatus
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ClusterSearchStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ClusterStatistics]]
|
||
=== ClusterStatistics
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ClusterStatistics {
|
||
skipped: <<integer>>
|
||
successful: <<integer>>
|
||
total: <<integer>>
|
||
running: <<integer>>
|
||
partial: <<integer>>
|
||
failed: <<integer>>
|
||
details?: Record<<<ClusterAlias>>, <<ClusterDetails>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[CompletionStats]]
|
||
=== CompletionStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface CompletionStats {
|
||
pass:[/**] @property size_in_bytes Total amount, in bytes, of memory used for completion across all shards assigned to selected nodes. */
|
||
size_in_bytes: <<long>>
|
||
pass:[/**] @property size Total amount of memory used for completion across all shards assigned to selected nodes. */
|
||
size?: <<ByteSize>>
|
||
fields?: Record<<<Field>>, <<FieldSizeUsage>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Conflicts]]
|
||
=== Conflicts
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Conflicts = 'abort' | 'proceed'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[CoordsGeoBounds]]
|
||
=== CoordsGeoBounds
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface CoordsGeoBounds {
|
||
top: <<double>>
|
||
bottom: <<double>>
|
||
left: <<double>>
|
||
right: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DFIIndependenceMeasure]]
|
||
=== DFIIndependenceMeasure
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DFIIndependenceMeasure = 'standardized' | 'saturated' | 'chisquared'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DFRAfterEffect]]
|
||
=== DFRAfterEffect
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DFRAfterEffect = 'no' | 'b' | 'l'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DFRBasicModel]]
|
||
=== DFRBasicModel
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DFRBasicModel = 'be' | 'd' | 'g' | 'if' | 'in' | 'ine' | 'p'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DataStreamName]]
|
||
=== DataStreamName
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DataStreamName = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DataStreamNames]]
|
||
=== DataStreamNames
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DataStreamNames = <<DataStreamName>> | <<DataStreamName>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DateFormat]]
|
||
=== DateFormat
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DateFormat = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DateMath]]
|
||
=== DateMath
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DateMath = string | Date
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DateTime]]
|
||
=== DateTime
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DateTime = string | <<EpochTime>><<<UnitMillis>>> | Date
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Distance]]
|
||
=== Distance
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Distance = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DistanceUnit]]
|
||
=== DistanceUnit
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DocStats]]
|
||
=== DocStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface DocStats {
|
||
pass:[/**] @property count Total number of non-deleted documents across all primary shards assigned to selected nodes. This number is based on documents in Lucene segments and may include documents from nested fields. */
|
||
count: <<long>>
|
||
pass:[/**] @property deleted Total number of deleted documents across all primary shards assigned to selected nodes. This number is based on documents in Lucene segments. Elasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged. */
|
||
deleted?: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Duration]]
|
||
=== Duration
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Duration = string | -1 | 0
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DurationLarge]]
|
||
=== DurationLarge
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DurationLarge = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[DurationValue]]
|
||
=== DurationValue
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type DurationValue<Unit = unknown> = Unit
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ElasticsearchVersionInfo]]
|
||
=== ElasticsearchVersionInfo
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ElasticsearchVersionInfo {
|
||
build_date: <<DateTime>>
|
||
build_flavor: string
|
||
build_hash: string
|
||
build_snapshot: boolean
|
||
build_type: string
|
||
lucene_version: <<VersionString>>
|
||
minimum_index_compatibility_version: <<VersionString>>
|
||
minimum_wire_compatibility_version: <<VersionString>>
|
||
number: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ElasticsearchVersionMinInfo]]
|
||
=== ElasticsearchVersionMinInfo
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ElasticsearchVersionMinInfo {
|
||
build_flavor: string
|
||
minimum_index_compatibility_version: <<VersionString>>
|
||
minimum_wire_compatibility_version: <<VersionString>>
|
||
number: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[EmptyObject]]
|
||
=== EmptyObject
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface EmptyObject {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[EpochTime]]
|
||
=== EpochTime
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type EpochTime<Unit = unknown> = Unit
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ErrorCause]]
|
||
=== ErrorCause
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ErrorCauseKeys {
|
||
type: string
|
||
reason?: string
|
||
stack_trace?: string
|
||
caused_by?: <<ErrorCause>>
|
||
root_cause?: <<ErrorCause>>[]
|
||
suppressed?: <<ErrorCause>>[]
|
||
}
|
||
type ErrorCause = ErrorCauseKeys
|
||
& { [property: string]: any }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ErrorResponseBase]]
|
||
=== ErrorResponseBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ErrorResponseBase {
|
||
error: <<ErrorCause>>
|
||
status: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[EsqlColumns]]
|
||
=== EsqlColumns
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type EsqlColumns = ArrayBuffer
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ExpandWildcard]]
|
||
=== ExpandWildcard
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ExpandWildcards]]
|
||
=== ExpandWildcards
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ExpandWildcards = <<ExpandWildcard>> | <<ExpandWildcard>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Field]]
|
||
=== Field
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Field = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[FieldMemoryUsage]]
|
||
=== FieldMemoryUsage
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface FieldMemoryUsage {
|
||
memory_size?: <<ByteSize>>
|
||
memory_size_in_bytes: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[FieldSizeUsage]]
|
||
=== FieldSizeUsage
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface FieldSizeUsage {
|
||
size?: <<ByteSize>>
|
||
size_in_bytes: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[FieldSort]]
|
||
=== FieldSort
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface FieldSort {
|
||
missing?: <<AggregationsMissing>>
|
||
mode?: <<SortMode>>
|
||
nested?: <<NestedSortValue>>
|
||
order?: <<SortOrder>>
|
||
unmapped_type?: <<MappingFieldType>>
|
||
numeric_type?: <<FieldSortNumericType>>
|
||
format?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[FieldSortNumericType]]
|
||
=== FieldSortNumericType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type FieldSortNumericType = '<<long>>' | '<<double>>' | 'date' | 'date_nanos'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[FieldValue]]
|
||
=== FieldValue
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type FieldValue = <<long>> | <<double>> | string | boolean | null | any
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[FielddataStats]]
|
||
=== FielddataStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface FielddataStats {
|
||
evictions?: <<long>>
|
||
memory_size?: <<ByteSize>>
|
||
memory_size_in_bytes: <<long>>
|
||
fields?: Record<<<Field>>, <<FieldMemoryUsage>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Fields]]
|
||
=== Fields
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Fields = <<Field>> | <<Field>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[FlushStats]]
|
||
=== FlushStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface FlushStats {
|
||
periodic: <<long>>
|
||
total: <<long>>
|
||
total_time?: <<Duration>>
|
||
total_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Fuzziness]]
|
||
=== Fuzziness
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Fuzziness = string | <<integer>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoBounds]]
|
||
=== GeoBounds
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GeoBounds = <<CoordsGeoBounds>> | <<TopLeftBottomRightGeoBounds>> | <<TopRightBottomLeftGeoBounds>> | <<WktGeoBounds>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoDistanceSort]]
|
||
=== GeoDistanceSort
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface GeoDistanceSortKeys {
|
||
mode?: <<SortMode>>
|
||
distance_type?: <<GeoDistanceType>>
|
||
ignore_unmapped?: boolean
|
||
order?: <<SortOrder>>
|
||
unit?: <<DistanceUnit>>
|
||
nested?: <<NestedSortValue>>
|
||
}
|
||
type GeoDistanceSort = GeoDistanceSortKeys
|
||
& { [property: string]: <<GeoLocation>> | <<GeoLocation>>[] | <<SortMode>> | <<GeoDistanceType>> | boolean | <<SortOrder>> | <<DistanceUnit>> | <<NestedSortValue>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoDistanceType]]
|
||
=== GeoDistanceType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GeoDistanceType = 'arc' | 'plane'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoHash]]
|
||
=== GeoHash
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GeoHash = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoHashLocation]]
|
||
=== GeoHashLocation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface GeoHashLocation {
|
||
geohash: <<GeoHash>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoHashPrecision]]
|
||
=== GeoHashPrecision
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GeoHashPrecision = number | string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoHexCell]]
|
||
=== GeoHexCell
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GeoHexCell = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoLine]]
|
||
=== GeoLine
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface GeoLine {
|
||
pass:[/**] @property type Always `"LineString"` */
|
||
type: string
|
||
pass:[/**] @property coordinates Array of `[lon, lat]` coordinates */
|
||
coordinates: <<double>>[][]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoLocation]]
|
||
=== GeoLocation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GeoLocation = <<LatLonGeoLocation>> | <<GeoHashLocation>> | <<double>>[] | string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoShape]]
|
||
=== GeoShape
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GeoShape = any
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoShapeRelation]]
|
||
=== GeoShapeRelation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoTile]]
|
||
=== GeoTile
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GeoTile = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GeoTilePrecision]]
|
||
=== GeoTilePrecision
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GeoTilePrecision = number
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GetStats]]
|
||
=== GetStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface GetStats {
|
||
current: <<long>>
|
||
exists_time?: <<Duration>>
|
||
exists_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
exists_total: <<long>>
|
||
missing_time?: <<Duration>>
|
||
missing_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
missing_total: <<long>>
|
||
time?: <<Duration>>
|
||
time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
total: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[GrokPattern]]
|
||
=== GrokPattern
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type GrokPattern = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[HealthStatus]]
|
||
=== HealthStatus
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Host]]
|
||
=== Host
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Host = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[HttpHeaders]]
|
||
=== HttpHeaders
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type HttpHeaders = Record<string, string | string[]>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[IBDistribution]]
|
||
=== IBDistribution
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type IBDistribution = 'll' | 'spl'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[IBLambda]]
|
||
=== IBLambda
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type IBLambda = 'df' | 'ttf'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Id]]
|
||
=== Id
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Id = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Ids]]
|
||
=== Ids
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Ids = <<Id>> | <<Id>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[IndexAlias]]
|
||
=== IndexAlias
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type IndexAlias = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[IndexName]]
|
||
=== IndexName
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type IndexName = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[IndexPattern]]
|
||
=== IndexPattern
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type IndexPattern = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[IndexPatterns]]
|
||
=== IndexPatterns
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type IndexPatterns = <<IndexPattern>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[IndexingStats]]
|
||
=== IndexingStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface IndexingStats {
|
||
index_current: <<long>>
|
||
delete_current: <<long>>
|
||
delete_time?: <<Duration>>
|
||
delete_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
delete_total: <<long>>
|
||
is_throttled: boolean
|
||
noop_update_total: <<long>>
|
||
throttle_time?: <<Duration>>
|
||
throttle_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
index_time?: <<Duration>>
|
||
index_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
index_total: <<long>>
|
||
index_failed: <<long>>
|
||
types?: Record<string, <<IndexingStats>>>
|
||
write_load?: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Indices]]
|
||
=== Indices
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Indices = <<IndexName>> | <<IndexName>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[IndicesOptions]]
|
||
=== IndicesOptions
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface IndicesOptions {
|
||
pass:[/**] @property allow_no_indices If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */
|
||
allow_no_indices?: boolean
|
||
pass:[/**] @property expand_wildcards <<Type>> of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */
|
||
expand_wildcards?: <<ExpandWildcards>>
|
||
pass:[/**] @property ignore_unavailable If true, missing or closed indices are not included in the response. */
|
||
ignore_unavailable?: boolean
|
||
pass:[/**] @property ignore_throttled If true, concrete, expanded or aliased indices are ignored when frozen. */
|
||
ignore_throttled?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[IndicesResponseBase]]
|
||
=== IndicesResponseBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface IndicesResponseBase extends <<AcknowledgedResponseBase>> {
|
||
_shards?: <<ShardStatistics>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[InlineGet]]
|
||
=== InlineGet
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface InlineGetKeys<TDocument = unknown> {
|
||
fields?: Record<string, any>
|
||
found: boolean
|
||
_seq_no?: <<SequenceNumber>>
|
||
_primary_term?: <<long>>
|
||
_routing?: <<Routing>>
|
||
_source?: TDocument
|
||
}
|
||
type InlineGet<TDocument = unknown> = InlineGetKeys<TDocument>
|
||
& { [property: string]: any }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Ip]]
|
||
=== Ip
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Ip = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[KnnQuery]]
|
||
=== KnnQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface KnnQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property field The name of the vector field to search against */
|
||
field: <<Field>>
|
||
pass:[/**] @property query_vector The query vector */
|
||
query_vector?: <<QueryVector>>
|
||
pass:[/**] @property query_vector_builder The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */
|
||
query_vector_builder?: <<QueryVectorBuilder>>
|
||
pass:[/**] @property num_candidates The number of nearest neighbor candidates to consider per shard */
|
||
num_candidates?: <<integer>>
|
||
pass:[/**] @property k The final number of nearest neighbors to return as top hits */
|
||
k?: <<integer>>
|
||
pass:[/**] @property filter Filters for the kNN search query */
|
||
filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[]
|
||
pass:[/**] @property similarity The minimum similarity for a vector to be considered a match */
|
||
similarity?: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[KnnRetriever]]
|
||
=== KnnRetriever
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface KnnRetriever extends <<RetrieverBase>> {
|
||
pass:[/**] @property field The name of the vector field to search against. */
|
||
field: string
|
||
pass:[/**] @property query_vector <<Query>> vector. Must have the same number of dimensions as the vector field you are searching against. You must provide a query_vector_builder or query_vector, but not both. */
|
||
query_vector?: <<QueryVector>>
|
||
pass:[/**] @property query_vector_builder Defines a model to build a query vector. */
|
||
query_vector_builder?: <<QueryVectorBuilder>>
|
||
pass:[/**] @property k Number of nearest neighbors to return as top hits. */
|
||
k: <<integer>>
|
||
pass:[/**] @property num_candidates Number of nearest neighbor candidates to consider per shard. */
|
||
num_candidates: <<integer>>
|
||
pass:[/**] @property similarity The minimum similarity required for a document to be considered a match. */
|
||
similarity?: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[KnnSearch]]
|
||
=== KnnSearch
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface KnnSearch {
|
||
pass:[/**] @property field The name of the vector field to search against */
|
||
field: <<Field>>
|
||
pass:[/**] @property query_vector The query vector */
|
||
query_vector?: <<QueryVector>>
|
||
pass:[/**] @property query_vector_builder The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */
|
||
query_vector_builder?: <<QueryVectorBuilder>>
|
||
pass:[/**] @property k The final number of nearest neighbors to return as top hits */
|
||
k?: <<integer>>
|
||
pass:[/**] @property num_candidates The number of nearest neighbor candidates to consider per shard */
|
||
num_candidates?: <<integer>>
|
||
pass:[/**] @property boost Boost value to apply to kNN scores */
|
||
boost?: <<float>>
|
||
pass:[/**] @property filter Filters for the kNN search query */
|
||
filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[]
|
||
pass:[/**] @property similarity The minimum similarity for a vector to be considered a match */
|
||
similarity?: <<float>>
|
||
pass:[/**] @property inner_hits If defined, each search hit will contain inner hits. */
|
||
inner_hits?: <<SearchInnerHits>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[LatLonGeoLocation]]
|
||
=== LatLonGeoLocation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface LatLonGeoLocation {
|
||
pass:[/**] @property lat Latitude */
|
||
lat: <<double>>
|
||
pass:[/**] @property lon Longitude */
|
||
lon: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Level]]
|
||
=== Level
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Level = 'cluster' | 'indices' | 'shards'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[LifecycleOperationMode]]
|
||
=== LifecycleOperationMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MapboxVectorTiles]]
|
||
=== MapboxVectorTiles
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MapboxVectorTiles = ArrayBuffer
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MergesStats]]
|
||
=== MergesStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MergesStats {
|
||
current: <<long>>
|
||
current_docs: <<long>>
|
||
current_size?: string
|
||
current_size_in_bytes: <<long>>
|
||
total: <<long>>
|
||
total_auto_throttle?: string
|
||
total_auto_throttle_in_bytes: <<long>>
|
||
total_docs: <<long>>
|
||
total_size?: string
|
||
total_size_in_bytes: <<long>>
|
||
total_stopped_time?: <<Duration>>
|
||
total_stopped_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
total_throttled_time?: <<Duration>>
|
||
total_throttled_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
total_time?: <<Duration>>
|
||
total_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Metadata]]
|
||
=== Metadata
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Metadata = Record<string, any>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Metrics]]
|
||
=== Metrics
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Metrics = string | string[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MinimumShouldMatch]]
|
||
=== MinimumShouldMatch
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MinimumShouldMatch = <<integer>> | string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MultiTermQueryRewrite]]
|
||
=== MultiTermQueryRewrite
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MultiTermQueryRewrite = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Name]]
|
||
=== Name
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Name = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Names]]
|
||
=== Names
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Names = <<Name>> | <<Name>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Namespace]]
|
||
=== Namespace
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Namespace = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[NestedSortValue]]
|
||
=== NestedSortValue
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface NestedSortValue {
|
||
filter?: <<QueryDslQueryContainer>>
|
||
max_children?: <<integer>>
|
||
nested?: <<NestedSortValue>>
|
||
path: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[NodeAttributes]]
|
||
=== NodeAttributes
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface NodeAttributes {
|
||
pass:[/**] @property attributes Lists node attributes. */
|
||
attributes: Record<string, string>
|
||
pass:[/**] @property ephemeral_id The ephemeral ID of the node. */
|
||
ephemeral_id: <<Id>>
|
||
pass:[/**] @property id The unique identifier of the node. */
|
||
id?: <<NodeId>>
|
||
pass:[/**] @property name The unique identifier of the node. */
|
||
name: <<NodeName>>
|
||
pass:[/**] @property transport_address The host and port where transport HTTP connections are accepted. */
|
||
transport_address: <<TransportAddress>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[NodeId]]
|
||
=== NodeId
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type NodeId = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[NodeIds]]
|
||
=== NodeIds
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type NodeIds = <<NodeId>> | <<NodeId>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[NodeName]]
|
||
=== NodeName
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type NodeName = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[NodeRole]]
|
||
=== NodeRole
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type NodeRole = 'master' | 'data' | 'data_cold' | 'data_content' | 'data_frozen' | 'data_hot' | 'data_warm' | 'client' | 'ingest' | 'ml' | 'voting_only' | 'transform' | 'remote_cluster_client' | 'coordinating_only'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[NodeRoles]]
|
||
=== NodeRoles
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type NodeRoles = <<NodeRole>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[NodeShard]]
|
||
=== NodeShard
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface NodeShard {
|
||
state: IndicesStatsShardRoutingState
|
||
primary: boolean
|
||
node?: <<NodeName>>
|
||
shard: <<integer>>
|
||
index: <<IndexName>>
|
||
allocation_id?: Record<string, <<Id>>>
|
||
recovery_source?: Record<string, <<Id>>>
|
||
unassigned_info?: ClusterAllocationExplainUnassignedInformation
|
||
relocating_node?: <<NodeId>> | null
|
||
relocation_failure_info?: <<RelocationFailureInfo>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[NodeStatistics]]
|
||
=== NodeStatistics
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface NodeStatistics {
|
||
failures?: <<ErrorCause>>[]
|
||
pass:[/**] @property total Total number of nodes selected by the request. */
|
||
total: <<integer>>
|
||
pass:[/**] @property successful Number of nodes that responded successfully to the request. */
|
||
successful: <<integer>>
|
||
pass:[/**] @property failed Number of nodes that rejected the request or failed to respond. If this value is not 0, a reason for the rejection or failure is included in the response. */
|
||
failed: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Normalization]]
|
||
=== Normalization
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Normalization = 'no' | 'h1' | 'h2' | 'h3' | 'z'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[OpType]]
|
||
=== OpType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type OpType = 'index' | 'create'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Password]]
|
||
=== Password
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Password = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Percentage]]
|
||
=== Percentage
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Percentage = string | <<float>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[PipelineName]]
|
||
=== PipelineName
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type PipelineName = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[PluginStats]]
|
||
=== PluginStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface PluginStats {
|
||
classname: string
|
||
description: string
|
||
elasticsearch_version: <<VersionString>>
|
||
extended_plugins: string[]
|
||
has_native_controller: boolean
|
||
java_version: <<VersionString>>
|
||
name: <<Name>>
|
||
version: <<VersionString>>
|
||
licensed: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[PropertyName]]
|
||
=== PropertyName
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type PropertyName = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryCacheStats]]
|
||
=== QueryCacheStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryCacheStats {
|
||
pass:[/**] @property cache_count Total number of entries added to the query cache across all shards assigned to selected nodes. This number includes current and evicted entries. */
|
||
cache_count: <<long>>
|
||
pass:[/**] @property cache_size Total number of entries currently in the query cache across all shards assigned to selected nodes. */
|
||
cache_size: <<long>>
|
||
pass:[/**] @property evictions Total number of query cache evictions across all shards assigned to selected nodes. */
|
||
evictions: <<long>>
|
||
pass:[/**] @property hit_count Total count of query cache hits across all shards assigned to selected nodes. */
|
||
hit_count: <<long>>
|
||
pass:[/**] @property memory_size Total amount of memory used for the query cache across all shards assigned to selected nodes. */
|
||
memory_size?: <<ByteSize>>
|
||
pass:[/**] @property memory_size_in_bytes Total amount, in bytes, of memory used for the query cache across all shards assigned to selected nodes. */
|
||
memory_size_in_bytes: <<long>>
|
||
pass:[/**] @property miss_count Total count of query cache misses across all shards assigned to selected nodes. */
|
||
miss_count: <<long>>
|
||
pass:[/**] @property total_count Total count of hits and misses in the query cache across all shards assigned to selected nodes. */
|
||
total_count: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryVector]]
|
||
=== QueryVector
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryVector = <<float>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryVectorBuilder]]
|
||
=== QueryVectorBuilder
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryVectorBuilder {
|
||
text_embedding?: <<TextEmbedding>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RRFRetriever]]
|
||
=== RRFRetriever
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RRFRetriever extends <<RetrieverBase>> {
|
||
pass:[/**] @property retrievers A list of child retrievers to specify which sets of returned top documents will have the RRF formula applied to them. */
|
||
retrievers: <<RetrieverContainer>>[]
|
||
pass:[/**] @property rank_constant This value determines how much influence documents in individual result sets per query have over the final ranked result set. */
|
||
rank_constant?: <<integer>>
|
||
pass:[/**] @property rank_window_size This value determines the size of the individual result sets per query. */
|
||
rank_window_size?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankBase]]
|
||
=== RankBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankBase {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankContainer]]
|
||
=== RankContainer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankContainer {
|
||
pass:[/**] @property rrf The reciprocal rank fusion parameters */
|
||
rrf?: <<RrfRank>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RecoveryStats]]
|
||
=== RecoveryStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RecoveryStats {
|
||
current_as_source: <<long>>
|
||
current_as_target: <<long>>
|
||
throttle_time?: <<Duration>>
|
||
throttle_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Refresh]]
|
||
=== Refresh
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Refresh = boolean | 'true' | 'false' | 'wait_for'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RefreshStats]]
|
||
=== RefreshStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RefreshStats {
|
||
external_total: <<long>>
|
||
external_total_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
listeners: <<long>>
|
||
total: <<long>>
|
||
total_time?: <<Duration>>
|
||
total_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RelationName]]
|
||
=== RelationName
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type RelationName = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RelocationFailureInfo]]
|
||
=== RelocationFailureInfo
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RelocationFailureInfo {
|
||
failed_attempts: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RequestBase]]
|
||
=== RequestBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RequestBase extends <<SpecUtilsCommonQueryParameters>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RequestCacheStats]]
|
||
=== RequestCacheStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RequestCacheStats {
|
||
evictions: <<long>>
|
||
hit_count: <<long>>
|
||
memory_size?: string
|
||
memory_size_in_bytes: <<long>>
|
||
miss_count: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Result]]
|
||
=== Result
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Retries]]
|
||
=== Retries
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface Retries {
|
||
bulk: <<long>>
|
||
search: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RetrieverBase]]
|
||
=== RetrieverBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RetrieverBase {
|
||
pass:[/**] @property filter <<Query>> to filter the documents that can match. */
|
||
filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[]
|
||
pass:[/**] @property min_score Minimum _score for matching documents. Documents with a lower _score are not included in the top documents. */
|
||
min_score?: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RetrieverContainer]]
|
||
=== RetrieverContainer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RetrieverContainer {
|
||
pass:[/**] @property standard A retriever that replaces the functionality of a traditional query. */
|
||
standard?: <<StandardRetriever>>
|
||
pass:[/**] @property knn A retriever that replaces the functionality of a knn search. */
|
||
knn?: <<KnnRetriever>>
|
||
pass:[/**] @property rrf A retriever that produces top documents from reciprocal rank fusion (RRF). */
|
||
rrf?: <<RRFRetriever>>
|
||
pass:[/**] @property text_similarity_reranker A retriever that reranks the top documents based on a reranking model using the InferenceAPI */
|
||
text_similarity_reranker?: <<TextSimilarityReranker>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Routing]]
|
||
=== Routing
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Routing = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RrfRank]]
|
||
=== RrfRank
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RrfRank {
|
||
pass:[/**] @property rank_constant How much influence documents in individual result sets per query have over the final ranked result set */
|
||
rank_constant?: <<long>>
|
||
pass:[/**] @property rank_window_size Size of the individual result sets per query */
|
||
rank_window_size?: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ScalarValue]]
|
||
=== ScalarValue
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ScalarValue = <<long>> | <<double>> | string | boolean | null
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ScoreSort]]
|
||
=== ScoreSort
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ScoreSort {
|
||
order?: <<SortOrder>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Script]]
|
||
=== Script
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface Script {
|
||
pass:[/**] @property source The script source. */
|
||
source?: string
|
||
pass:[/**] @property id The `id` for a stored script. */
|
||
id?: <<Id>>
|
||
pass:[/**] @property params Specifies any named parameters that are passed into the script as variables. Use parameters instead of hard-coded values to decrease compile time. */
|
||
params?: Record<string, any>
|
||
pass:[/**] @property lang Specifies the language the script is written in. */
|
||
lang?: <<ScriptLanguage>>
|
||
options?: Record<string, string>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ScriptField]]
|
||
=== ScriptField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ScriptField {
|
||
script: <<Script>> | string
|
||
ignore_failure?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ScriptLanguage]]
|
||
=== ScriptLanguage
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ScriptSort]]
|
||
=== ScriptSort
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ScriptSort {
|
||
order?: <<SortOrder>>
|
||
script: <<Script>> | string
|
||
type?: <<ScriptSortType>>
|
||
mode?: <<SortMode>>
|
||
nested?: <<NestedSortValue>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ScriptSortType]]
|
||
=== ScriptSortType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ScriptSortType = 'string' | 'number' | 'version'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ScriptTransform]]
|
||
=== ScriptTransform
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ScriptTransform {
|
||
lang?: string
|
||
params?: Record<string, any>
|
||
source?: string
|
||
id?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ScrollId]]
|
||
=== ScrollId
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ScrollId = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ScrollIds]]
|
||
=== ScrollIds
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ScrollIds = <<ScrollId>> | <<ScrollId>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SearchStats]]
|
||
=== SearchStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SearchStats {
|
||
fetch_current: <<long>>
|
||
fetch_time?: <<Duration>>
|
||
fetch_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
fetch_total: <<long>>
|
||
open_contexts?: <<long>>
|
||
query_current: <<long>>
|
||
query_time?: <<Duration>>
|
||
query_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
query_total: <<long>>
|
||
scroll_current: <<long>>
|
||
scroll_time?: <<Duration>>
|
||
scroll_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
scroll_total: <<long>>
|
||
suggest_current: <<long>>
|
||
suggest_time?: <<Duration>>
|
||
suggest_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
suggest_total: <<long>>
|
||
groups?: Record<string, <<SearchStats>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SearchTransform]]
|
||
=== SearchTransform
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SearchTransform {
|
||
request: <<WatcherSearchInputRequestDefinition>>
|
||
timeout: <<Duration>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SearchType]]
|
||
=== SearchType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SegmentsStats]]
|
||
=== SegmentsStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SegmentsStats {
|
||
pass:[/**] @property count Total number of segments across all shards assigned to selected nodes. */
|
||
count: <<integer>>
|
||
pass:[/**] @property doc_values_memory Total amount of memory used for doc values across all shards assigned to selected nodes. */
|
||
doc_values_memory?: <<ByteSize>>
|
||
pass:[/**] @property doc_values_memory_in_bytes Total amount, in bytes, of memory used for doc values across all shards assigned to selected nodes. */
|
||
doc_values_memory_in_bytes: <<long>>
|
||
pass:[/**] @property file_sizes This object is not populated by the cluster stats API. To get information on segment files, use the node stats API. */
|
||
file_sizes: Record<string, IndicesStatsShardFileSizeInfo>
|
||
pass:[/**] @property fixed_bit_set Total amount of memory used by fixed bit sets across all shards assigned to selected nodes. Fixed bit sets are used for nested object field types and type filters for join fields. */
|
||
fixed_bit_set?: <<ByteSize>>
|
||
pass:[/**] @property fixed_bit_set_memory_in_bytes Total amount of memory, in bytes, used by fixed bit sets across all shards assigned to selected nodes. */
|
||
fixed_bit_set_memory_in_bytes: <<long>>
|
||
pass:[/**] @property index_writer_memory Total amount of memory used by all index writers across all shards assigned to selected nodes. */
|
||
index_writer_memory?: <<ByteSize>>
|
||
index_writer_max_memory_in_bytes?: <<long>>
|
||
pass:[/**] @property index_writer_memory_in_bytes Total amount, in bytes, of memory used by all index writers across all shards assigned to selected nodes. */
|
||
index_writer_memory_in_bytes: <<long>>
|
||
pass:[/**] @property max_unsafe_auto_id_timestamp Unix timestamp, in milliseconds, of the most recently retried indexing request. */
|
||
max_unsafe_auto_id_timestamp: <<long>>
|
||
pass:[/**] @property memory Total amount of memory used for segments across all shards assigned to selected nodes. */
|
||
memory?: <<ByteSize>>
|
||
pass:[/**] @property memory_in_bytes Total amount, in bytes, of memory used for segments across all shards assigned to selected nodes. */
|
||
memory_in_bytes: <<long>>
|
||
pass:[/**] @property norms_memory Total amount of memory used for normalization factors across all shards assigned to selected nodes. */
|
||
norms_memory?: <<ByteSize>>
|
||
pass:[/**] @property norms_memory_in_bytes Total amount, in bytes, of memory used for normalization factors across all shards assigned to selected nodes. */
|
||
norms_memory_in_bytes: <<long>>
|
||
pass:[/**] @property points_memory Total amount of memory used for points across all shards assigned to selected nodes. */
|
||
points_memory?: <<ByteSize>>
|
||
pass:[/**] @property points_memory_in_bytes Total amount, in bytes, of memory used for points across all shards assigned to selected nodes. */
|
||
points_memory_in_bytes: <<long>>
|
||
stored_memory?: <<ByteSize>>
|
||
pass:[/**] @property stored_fields_memory_in_bytes Total amount, in bytes, of memory used for stored fields across all shards assigned to selected nodes. */
|
||
stored_fields_memory_in_bytes: <<long>>
|
||
pass:[/**] @property terms_memory_in_bytes Total amount, in bytes, of memory used for terms across all shards assigned to selected nodes. */
|
||
terms_memory_in_bytes: <<long>>
|
||
pass:[/**] @property terms_memory Total amount of memory used for terms across all shards assigned to selected nodes. */
|
||
terms_memory?: <<ByteSize>>
|
||
pass:[/**] @property term_vectory_memory Total amount of memory used for term vectors across all shards assigned to selected nodes. */
|
||
term_vectory_memory?: <<ByteSize>>
|
||
pass:[/**] @property term_vectors_memory_in_bytes Total amount, in bytes, of memory used for term vectors across all shards assigned to selected nodes. */
|
||
term_vectors_memory_in_bytes: <<long>>
|
||
pass:[/**] @property version_map_memory Total amount of memory used by all version maps across all shards assigned to selected nodes. */
|
||
version_map_memory?: <<ByteSize>>
|
||
pass:[/**] @property version_map_memory_in_bytes Total amount, in bytes, of memory used by all version maps across all shards assigned to selected nodes. */
|
||
version_map_memory_in_bytes: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SequenceNumber]]
|
||
=== SequenceNumber
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SequenceNumber = <<long>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Service]]
|
||
=== Service
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Service = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ShardFailure]]
|
||
=== ShardFailure
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ShardFailure {
|
||
index?: <<IndexName>>
|
||
node?: string
|
||
reason: <<ErrorCause>>
|
||
shard: <<integer>>
|
||
status?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ShardStatistics]]
|
||
=== ShardStatistics
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ShardStatistics {
|
||
failed: <<uint>>
|
||
pass:[/**] @property successful Indicates how many shards have successfully run the search. */
|
||
successful: <<uint>>
|
||
pass:[/**] @property total Indicates how many shards the search will run on overall. */
|
||
total: <<uint>>
|
||
failures?: <<ShardFailure>>[]
|
||
skipped?: <<uint>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ShardsOperationResponseBase]]
|
||
=== ShardsOperationResponseBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface ShardsOperationResponseBase {
|
||
_shards?: <<ShardStatistics>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SlicedScroll]]
|
||
=== SlicedScroll
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SlicedScroll {
|
||
field?: <<Field>>
|
||
id: <<Id>>
|
||
max: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Slices]]
|
||
=== Slices
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Slices = <<integer>> | <<SlicesCalculation>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SlicesCalculation]]
|
||
=== SlicesCalculation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SlicesCalculation = 'auto'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Sort]]
|
||
=== Sort
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Sort = <<SortCombinations>> | <<SortCombinations>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SortCombinations]]
|
||
=== SortCombinations
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SortCombinations = <<Field>> | <<SortOptions>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SortMode]]
|
||
=== SortMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SortMode = 'min' | 'max' | 'sum' | 'avg' | 'median'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SortOptions]]
|
||
=== SortOptions
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SortOptionsKeys {
|
||
_score?: <<ScoreSort>>
|
||
_doc?: <<ScoreSort>>
|
||
_geo_distance?: <<GeoDistanceSort>>
|
||
_script?: <<ScriptSort>>
|
||
}
|
||
type SortOptions = SortOptionsKeys
|
||
& { [property: string]: <<FieldSort>> | <<SortOrder>> | <<ScoreSort>> | <<GeoDistanceSort>> | <<ScriptSort>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SortOrder]]
|
||
=== SortOrder
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SortOrder = 'asc' | 'desc'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SortResults]]
|
||
=== SortResults
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SortResults = <<FieldValue>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[StandardRetriever]]
|
||
=== StandardRetriever
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface StandardRetriever extends <<RetrieverBase>> {
|
||
pass:[/**] @property query Defines a query to retrieve a set of top documents. */
|
||
query?: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property search_after Defines a search after object parameter used for pagination. */
|
||
search_after?: <<SortResults>>
|
||
pass:[/**] @property terminate_after Maximum number of documents to collect for each shard. */
|
||
terminate_after?: <<integer>>
|
||
pass:[/**] @property sort A sort object that that specifies the order of matching documents. */
|
||
sort?: <<Sort>>
|
||
pass:[/**] @property collapse Collapses the top documents by a specified key into a single top document per key. */
|
||
collapse?: <<SearchFieldCollapse>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[StoreStats]]
|
||
=== StoreStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface StoreStats {
|
||
pass:[/**] @property size Total size of all shards assigned to selected nodes. */
|
||
size?: <<ByteSize>>
|
||
pass:[/**] @property size_in_bytes Total size, in bytes, of all shards assigned to selected nodes. */
|
||
size_in_bytes: <<long>>
|
||
pass:[/**] @property reserved A prediction of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */
|
||
reserved?: <<ByteSize>>
|
||
pass:[/**] @property reserved_in_bytes A prediction, in bytes, of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */
|
||
reserved_in_bytes: <<long>>
|
||
pass:[/**] @property total_data_set_size Total data set size of all shards assigned to selected nodes. This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */
|
||
total_data_set_size?: <<ByteSize>>
|
||
pass:[/**] @property total_data_set_size_in_bytes Total data set size, in bytes, of all shards assigned to selected nodes. This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */
|
||
total_data_set_size_in_bytes?: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[StoredScript]]
|
||
=== StoredScript
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface StoredScript {
|
||
pass:[/**] @property lang Specifies the language the script is written in. */
|
||
lang: <<ScriptLanguage>>
|
||
options?: Record<string, string>
|
||
pass:[/**] @property source The script source. */
|
||
source: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SuggestMode]]
|
||
=== SuggestMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SuggestMode = 'missing' | 'popular' | 'always'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SuggestionName]]
|
||
=== SuggestionName
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SuggestionName = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TaskFailure]]
|
||
=== TaskFailure
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface TaskFailure {
|
||
task_id: <<long>>
|
||
node_id: <<NodeId>>
|
||
status: string
|
||
reason: <<ErrorCause>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TaskId]]
|
||
=== TaskId
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type TaskId = string | <<integer>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TextEmbedding]]
|
||
=== TextEmbedding
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface TextEmbedding {
|
||
model_id: string
|
||
model_text: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TextSimilarityReranker]]
|
||
=== TextSimilarityReranker
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface TextSimilarityReranker extends <<RetrieverBase>> {
|
||
pass:[/**] @property retriever The nested retriever which will produce the first-level results, that will later be used for reranking. */
|
||
retriever: <<RetrieverContainer>>
|
||
pass:[/**] @property rank_window_size This value determines how many documents we will consider from the nested retriever. */
|
||
rank_window_size?: <<integer>>
|
||
pass:[/**] @property inference_id Unique identifier of the inference endpoint created using the inference API. */
|
||
inference_id?: string
|
||
pass:[/**] @property inference_text The text snippet used as the basis for similarity comparison */
|
||
inference_text?: string
|
||
pass:[/**] @property field The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text */
|
||
field?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ThreadType]]
|
||
=== ThreadType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TimeOfDay]]
|
||
=== TimeOfDay
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type TimeOfDay = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TimeUnit]]
|
||
=== TimeUnit
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TimeZone]]
|
||
=== TimeZone
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type TimeZone = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TopLeftBottomRightGeoBounds]]
|
||
=== TopLeftBottomRightGeoBounds
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface TopLeftBottomRightGeoBounds {
|
||
top_left: <<GeoLocation>>
|
||
bottom_right: <<GeoLocation>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TopRightBottomLeftGeoBounds]]
|
||
=== TopRightBottomLeftGeoBounds
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface TopRightBottomLeftGeoBounds {
|
||
top_right: <<GeoLocation>>
|
||
bottom_left: <<GeoLocation>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TransformContainer]]
|
||
=== TransformContainer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface TransformContainer {
|
||
chain?: <<TransformContainer>>[]
|
||
script?: <<ScriptTransform>>
|
||
search?: <<SearchTransform>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TranslogStats]]
|
||
=== TranslogStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface TranslogStats {
|
||
earliest_last_modified_age: <<long>>
|
||
operations: <<long>>
|
||
size?: string
|
||
size_in_bytes: <<long>>
|
||
uncommitted_operations: <<integer>>
|
||
uncommitted_size?: string
|
||
uncommitted_size_in_bytes: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[TransportAddress]]
|
||
=== TransportAddress
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type TransportAddress = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[UnitFloatMillis]]
|
||
=== UnitFloatMillis
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type UnitFloatMillis = <<double>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[UnitMillis]]
|
||
=== UnitMillis
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type UnitMillis = <<long>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[UnitNanos]]
|
||
=== UnitNanos
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type UnitNanos = <<long>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[UnitSeconds]]
|
||
=== UnitSeconds
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type UnitSeconds = <<long>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Username]]
|
||
=== Username
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Username = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[Uuid]]
|
||
=== Uuid
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type Uuid = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[VersionNumber]]
|
||
=== VersionNumber
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type VersionNumber = <<long>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[VersionString]]
|
||
=== VersionString
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type VersionString = string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[VersionType]]
|
||
=== VersionType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type VersionType = 'internal' | 'external' | 'external_gte' | 'force'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[WaitForActiveShardOptions]]
|
||
=== WaitForActiveShardOptions
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type WaitForActiveShardOptions = 'all' | 'index-setting'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[WaitForActiveShards]]
|
||
=== WaitForActiveShards
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type WaitForActiveShards = <<integer>> | <<WaitForActiveShardOptions>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[WaitForEvents]]
|
||
=== WaitForEvents
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[WarmerStats]]
|
||
=== WarmerStats
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface WarmerStats {
|
||
current: <<long>>
|
||
total: <<long>>
|
||
total_time?: <<Duration>>
|
||
total_time_in_millis: <<DurationValue>><<<UnitMillis>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[WktGeoBounds]]
|
||
=== WktGeoBounds
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface WktGeoBounds {
|
||
wkt: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[WriteResponseBase]]
|
||
=== WriteResponseBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface WriteResponseBase {
|
||
_id: <<Id>>
|
||
_index: <<IndexName>>
|
||
_primary_term?: <<long>>
|
||
result: <<Result>>
|
||
_seq_no?: <<SequenceNumber>>
|
||
_shards: <<ShardStatistics>>
|
||
_version: <<VersionNumber>>
|
||
forced_refresh?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[byte]]
|
||
=== byte
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type byte = number
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[double]]
|
||
=== double
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type double = number
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[float]]
|
||
=== float
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type float = number
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[integer]]
|
||
=== integer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type integer = number
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[long]]
|
||
=== long
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type long = number
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[short]]
|
||
=== short
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type short = number
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[uint]]
|
||
=== uint
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type uint = number
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[ulong]]
|
||
=== ulong
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type ulong = number
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsBaseNode]]
|
||
=== SpecUtilsBaseNode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SpecUtilsBaseNode {
|
||
attributes: Record<string, string>
|
||
host: <<Host>>
|
||
ip: <<Ip>>
|
||
name: <<Name>>
|
||
roles?: <<NodeRoles>>
|
||
transport_address: <<TransportAddress>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsNullValue]]
|
||
=== SpecUtilsNullValue
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SpecUtilsNullValue = null
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsPipeSeparatedFlags]]
|
||
=== SpecUtilsPipeSeparatedFlags
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SpecUtilsPipeSeparatedFlags<T = unknown> = T | string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsStringified]]
|
||
=== SpecUtilsStringified
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SpecUtilsStringified<T = unknown> = T | string
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsVoid]]
|
||
=== SpecUtilsVoid
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsWithNullValue]]
|
||
=== SpecUtilsWithNullValue
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type SpecUtilsWithNullValue<T = unknown> = T | <<SpecUtilsNullValue>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsAdditionalProperties]]
|
||
=== SpecUtilsAdditionalProperties
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SpecUtilsAdditionalProperties<TKey = unknown, TValue = unknown> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsAdditionalProperty]]
|
||
=== SpecUtilsAdditionalProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SpecUtilsAdditionalProperty<TKey = unknown, TValue = unknown> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsCommonQueryParameters]]
|
||
=== SpecUtilsCommonQueryParameters
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SpecUtilsCommonQueryParameters {
|
||
pass:[/**] @property error_trace When set to `true` Elasticsearch will include the full stack trace of errors when they occur. */
|
||
error_trace?: boolean
|
||
pass:[/**] @property filter_path Comma-separated list of filters in dot notation which reduce the response returned by Elasticsearch. */
|
||
filter_path?: string | string[]
|
||
pass:[/**] @property human When set to `true` will return statistics in a format suitable for humans. For example `"exists_time": "1h"` for humans and `"eixsts_time_in_millis": 3600000` for computers. When disabled the human readable values will be omitted. This makes sense for responses being consumed only by machines. */
|
||
human?: boolean
|
||
pass:[/**] @property pretty If set to `true` the returned JSON will be "pretty-formatted". Only use this option for debugging only. */
|
||
pretty?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsCommonCatQueryParameters]]
|
||
=== SpecUtilsCommonCatQueryParameters
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SpecUtilsCommonCatQueryParameters {
|
||
pass:[/**] @property format Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. */
|
||
format?: string
|
||
pass:[/**] @property h List of columns to appear in the response. Supports simple wildcards. */
|
||
h?: <<Names>>
|
||
pass:[/**] @property help When set to `true` will output available columns. This option can't be combined with any other query string option. */
|
||
help?: boolean
|
||
pass:[/**] @property local If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */
|
||
local?: boolean
|
||
pass:[/**] @property master_timeout Period to wait for a connection to the master node. */
|
||
master_timeout?: <<Duration>>
|
||
pass:[/**] @property s List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */
|
||
s?: <<Names>>
|
||
pass:[/**] @property v When set to `true` will enable verbose output. */
|
||
v?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[SpecUtilsOverloadOf]]
|
||
=== SpecUtilsOverloadOf
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface SpecUtilsOverloadOf<TDefinition = unknown> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAdjacencyMatrixAggregate]]
|
||
=== AggregationsAdjacencyMatrixAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAdjacencyMatrixAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsAdjacencyMatrixBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAdjacencyMatrixAggregation]]
|
||
=== AggregationsAdjacencyMatrixAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAdjacencyMatrixAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property filters Filters used to create buckets. At least one filter is required. */
|
||
filters?: Record<string, <<QueryDslQueryContainer>>>
|
||
pass:[/**] @property separator Separator used to concatenate filter names. Defaults to &. */
|
||
separator?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAdjacencyMatrixBucket]]
|
||
=== AggregationsAdjacencyMatrixBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAdjacencyMatrixBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key: string
|
||
}
|
||
type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | string | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAggregate]]
|
||
=== AggregationsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsAggregate = <<AggregationsCardinalityAggregate>> | <<AggregationsHdrPercentilesAggregate>> | <<AggregationsHdrPercentileRanksAggregate>> | <<AggregationsTDigestPercentilesAggregate>> | <<AggregationsTDigestPercentileRanksAggregate>> | <<AggregationsPercentilesBucketAggregate>> | <<AggregationsMedianAbsoluteDeviationAggregate>> | <<AggregationsMinAggregate>> | <<AggregationsMaxAggregate>> | <<AggregationsSumAggregate>> | <<AggregationsAvgAggregate>> | <<AggregationsWeightedAvgAggregate>> | <<AggregationsValueCountAggregate>> | <<AggregationsSimpleValueAggregate>> | <<AggregationsDerivativeAggregate>> | <<AggregationsBucketMetricValueAggregate>> | <<AggregationsStatsAggregate>> | <<AggregationsStatsBucketAggregate>> | <<AggregationsExtendedStatsAggregate>> | <<AggregationsExtendedStatsBucketAggregate>> | <<AggregationsGeoBoundsAggregate>> | <<AggregationsGeoCentroidAggregate>> | <<AggregationsHistogramAggregate>> | <<AggregationsDateHistogramAggregate>> | <<AggregationsAutoDateHistogramAggregate>> | <<AggregationsVariableWidthHistogramAggregate>> | <<AggregationsStringTermsAggregate>> | <<AggregationsLongTermsAggregate>> | <<AggregationsDoubleTermsAggregate>> | <<AggregationsUnmappedTermsAggregate>> | <<AggregationsLongRareTermsAggregate>> | <<AggregationsStringRareTermsAggregate>> | <<AggregationsUnmappedRareTermsAggregate>> | <<AggregationsMultiTermsAggregate>> | <<AggregationsMissingAggregate>> | <<AggregationsNestedAggregate>> | <<AggregationsReverseNestedAggregate>> | <<AggregationsGlobalAggregate>> | <<AggregationsFilterAggregate>> | <<AggregationsChildrenAggregate>> | <<AggregationsParentAggregate>> | <<AggregationsSamplerAggregate>> | <<AggregationsUnmappedSamplerAggregate>> | <<AggregationsGeoHashGridAggregate>> | <<AggregationsGeoTileGridAggregate>> | <<AggregationsGeoHexGridAggregate>> | <<AggregationsRangeAggregate>> | <<AggregationsDateRangeAggregate>> | <<AggregationsGeoDistanceAggregate>> | <<AggregationsIpRangeAggregate>> | <<AggregationsIpPrefixAggregate>> | <<AggregationsFiltersAggregate>> | <<AggregationsAdjacencyMatrixAggregate>> | <<AggregationsSignificantLongTermsAggregate>> | <<AggregationsSignificantStringTermsAggregate>> | <<AggregationsUnmappedSignificantTermsAggregate>> | <<AggregationsCompositeAggregate>> | <<AggregationsFrequentItemSetsAggregate>> | <<AggregationsTimeSeriesAggregate>> | <<AggregationsScriptedMetricAggregate>> | <<AggregationsTopHitsAggregate>> | <<AggregationsInferenceAggregate>> | <<AggregationsStringStatsAggregate>> | <<AggregationsBoxPlotAggregate>> | <<AggregationsTopMetricsAggregate>> | <<AggregationsTTestAggregate>> | <<AggregationsRateAggregate>> | <<AggregationsCumulativeCardinalityAggregate>> | <<AggregationsMatrixStatsAggregate>> | <<AggregationsGeoLineAggregate>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAggregateBase]]
|
||
=== AggregationsAggregateBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAggregateBase {
|
||
meta?: <<Metadata>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAggregateOrder]]
|
||
=== AggregationsAggregateOrder
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsAggregateOrder = Partial<Record<<<Field>>, <<SortOrder>>>> | Partial<Record<<<Field>>, <<SortOrder>>>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAggregation]]
|
||
=== AggregationsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAggregation {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAggregationContainer]]
|
||
=== AggregationsAggregationContainer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAggregationContainer {
|
||
pass:[/**] @property aggregations Sub-aggregations for this aggregation. Only applies to bucket aggregations. */
|
||
aggregations?: Record<string, <<AggregationsAggregationContainer>>>
|
||
pass:[/**] @property aggs Sub-aggregations for this aggregation. Only applies to bucket aggregations. */
|
||
aggs?: Record<string, <<AggregationsAggregationContainer>>>
|
||
meta?: <<Metadata>>
|
||
pass:[/**] @property adjacency_matrix A bucket aggregation returning a form of adjacency matrix. The request provides a collection of named filter expressions, similar to the `filters` aggregation. Each bucket in the response represents a non-empty cell in the matrix of intersecting filters. */
|
||
adjacency_matrix?: <<AggregationsAdjacencyMatrixAggregation>>
|
||
pass:[/**] @property auto_date_histogram A multi-bucket aggregation similar to the date histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */
|
||
auto_date_histogram?: <<AggregationsAutoDateHistogramAggregation>>
|
||
pass:[/**] @property avg A single-value metrics aggregation that computes the average of numeric values that are extracted from the aggregated documents. */
|
||
avg?: <<AggregationsAverageAggregation>>
|
||
pass:[/**] @property avg_bucket A sibling pipeline aggregation which calculates the mean value of a specified metric in a sibling aggregation. The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. */
|
||
avg_bucket?: <<AggregationsAverageBucketAggregation>>
|
||
pass:[/**] @property boxplot A metrics aggregation that computes a box plot of numeric values extracted from the aggregated documents. */
|
||
boxplot?: <<AggregationsBoxplotAggregation>>
|
||
pass:[/**] @property bucket_script A parent pipeline aggregation which runs a script which can perform per bucket computations on metrics in the parent multi-bucket aggregation. */
|
||
bucket_script?: <<AggregationsBucketScriptAggregation>>
|
||
pass:[/**] @property bucket_selector A parent pipeline aggregation which runs a script to determine whether the current bucket will be retained in the parent multi-bucket aggregation. */
|
||
bucket_selector?: <<AggregationsBucketSelectorAggregation>>
|
||
pass:[/**] @property bucket_sort A parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. */
|
||
bucket_sort?: <<AggregationsBucketSortAggregation>>
|
||
pass:[/**] @property bucket_count_ks_test A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov test ("K-S test") against a provided distribution and the distribution implied by the documents counts in the configured sibling aggregation. */
|
||
bucket_count_ks_test?: <<AggregationsBucketKsAggregation>>
|
||
pass:[/**] @property bucket_correlation A sibling pipeline aggregation which runs a correlation function on the configured sibling multi-bucket aggregation. */
|
||
bucket_correlation?: <<AggregationsBucketCorrelationAggregation>>
|
||
pass:[/**] @property cardinality A single-value metrics aggregation that calculates an approximate count of distinct values. */
|
||
cardinality?: <<AggregationsCardinalityAggregation>>
|
||
pass:[/**] @property categorize_text A multi-bucket aggregation that groups semi-structured text into buckets. */
|
||
categorize_text?: <<AggregationsCategorizeTextAggregation>>
|
||
pass:[/**] @property children A single bucket aggregation that selects child documents that have the specified type, as defined in a `join` field. */
|
||
children?: <<AggregationsChildrenAggregation>>
|
||
pass:[/**] @property composite A multi-bucket aggregation that creates composite buckets from different sources. Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */
|
||
composite?: <<AggregationsCompositeAggregation>>
|
||
pass:[/**] @property cumulative_cardinality A parent pipeline aggregation which calculates the cumulative cardinality in a parent `histogram` or `date_histogram` aggregation. */
|
||
cumulative_cardinality?: <<AggregationsCumulativeCardinalityAggregation>>
|
||
pass:[/**] @property cumulative_sum A parent pipeline aggregation which calculates the cumulative sum of a specified metric in a parent `histogram` or `date_histogram` aggregation. */
|
||
cumulative_sum?: <<AggregationsCumulativeSumAggregation>>
|
||
pass:[/**] @property date_histogram A multi-bucket values source based aggregation that can be applied on date values or date range values extracted from the documents. It dynamically builds fixed size (interval) buckets over the values. */
|
||
date_histogram?: <<AggregationsDateHistogramAggregation>>
|
||
pass:[/**] @property date_range A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */
|
||
date_range?: <<AggregationsDateRangeAggregation>>
|
||
pass:[/**] @property derivative A parent pipeline aggregation which calculates the derivative of a specified metric in a parent `histogram` or `date_histogram` aggregation. */
|
||
derivative?: <<AggregationsDerivativeAggregation>>
|
||
pass:[/**] @property diversified_sampler A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. Similar to the `sampler` aggregation, but adds the ability to limit the number of matches that share a common value. */
|
||
diversified_sampler?: <<AggregationsDiversifiedSamplerAggregation>>
|
||
pass:[/**] @property extended_stats A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */
|
||
extended_stats?: <<AggregationsExtendedStatsAggregation>>
|
||
pass:[/**] @property extended_stats_bucket A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */
|
||
extended_stats_bucket?: <<AggregationsExtendedStatsBucketAggregation>>
|
||
pass:[/**] @property frequent_item_sets A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. */
|
||
frequent_item_sets?: <<AggregationsFrequentItemSetsAggregation>>
|
||
pass:[/**] @property filter A single bucket aggregation that narrows the set of documents to those that match a query. */
|
||
filter?: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property filters A multi-bucket aggregation where each bucket contains the documents that match a query. */
|
||
filters?: <<AggregationsFiltersAggregation>>
|
||
pass:[/**] @property geo_bounds A metric aggregation that computes the geographic bounding box containing all values for a Geopoint or Geoshape field. */
|
||
geo_bounds?: <<AggregationsGeoBoundsAggregation>>
|
||
pass:[/**] @property geo_centroid A metric aggregation that computes the weighted centroid from all coordinate values for geo fields. */
|
||
geo_centroid?: <<AggregationsGeoCentroidAggregation>>
|
||
pass:[/**] @property geo_distance A multi-bucket aggregation that works on `geo_point` fields. Evaluates the distance of each document value from an origin point and determines the buckets it belongs to, based on ranges defined in the request. */
|
||
geo_distance?: <<AggregationsGeoDistanceAggregation>>
|
||
pass:[/**] @property geohash_grid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. Each cell is labeled using a geohash which is of user-definable precision. */
|
||
geohash_grid?: <<AggregationsGeoHashGridAggregation>>
|
||
pass:[/**] @property geo_line Aggregates all `geo_point` values within a bucket into a `LineString` ordered by the chosen sort field. */
|
||
geo_line?: <<AggregationsGeoLineAggregation>>
|
||
pass:[/**] @property geotile_grid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. Each cell corresponds to a map tile as used by many online map sites. */
|
||
geotile_grid?: <<AggregationsGeoTileGridAggregation>>
|
||
pass:[/**] @property geohex_grid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. Each cell corresponds to a H3 cell index and is labeled using the H3Index representation. */
|
||
geohex_grid?: <<AggregationsGeohexGridAggregation>>
|
||
pass:[/**] @property global Defines a single bucket of all the documents within the search execution context. This context is defined by the indices and the document types you’re searching on, but is not influenced by the search query itself. */
|
||
global?: <<AggregationsGlobalAggregation>>
|
||
pass:[/**] @property histogram A multi-bucket values source based aggregation that can be applied on numeric values or numeric range values extracted from the documents. It dynamically builds fixed size (interval) buckets over the values. */
|
||
histogram?: <<AggregationsHistogramAggregation>>
|
||
pass:[/**] @property ip_range A multi-bucket value source based aggregation that enables the user to define a set of IP ranges - each representing a bucket. */
|
||
ip_range?: <<AggregationsIpRangeAggregation>>
|
||
pass:[/**] @property ip_prefix A bucket aggregation that groups documents based on the network or sub-network of an IP address. */
|
||
ip_prefix?: <<AggregationsIpPrefixAggregation>>
|
||
pass:[/**] @property inference A parent pipeline aggregation which loads a pre-trained model and performs inference on the collated result fields from the parent bucket aggregation. */
|
||
inference?: <<AggregationsInferenceAggregation>>
|
||
line?: <<AggregationsGeoLineAggregation>>
|
||
pass:[/**] @property matrix_stats A numeric aggregation that computes the following statistics over a set of document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, `covariance`, and `covariance`. */
|
||
matrix_stats?: <<AggregationsMatrixStatsAggregation>>
|
||
pass:[/**] @property max A single-value metrics aggregation that returns the maximum value among the numeric values extracted from the aggregated documents. */
|
||
max?: <<AggregationsMaxAggregation>>
|
||
pass:[/**] @property max_bucket A sibling pipeline aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */
|
||
max_bucket?: <<AggregationsMaxBucketAggregation>>
|
||
pass:[/**] @property median_absolute_deviation A single-value aggregation that approximates the median absolute deviation of its search results. */
|
||
median_absolute_deviation?: <<AggregationsMedianAbsoluteDeviationAggregation>>
|
||
pass:[/**] @property min A single-value metrics aggregation that returns the minimum value among numeric values extracted from the aggregated documents. */
|
||
min?: <<AggregationsMinAggregation>>
|
||
pass:[/**] @property min_bucket A sibling pipeline aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */
|
||
min_bucket?: <<AggregationsMinBucketAggregation>>
|
||
pass:[/**] @property missing A field data based single bucket aggregation, that creates a bucket of all documents in the current document set context that are missing a field value (effectively, missing a field or having the configured NULL value set). */
|
||
missing?: <<AggregationsMissingAggregation>>
|
||
moving_avg?: <<AggregationsMovingAverageAggregation>>
|
||
pass:[/**] @property moving_percentiles Given an ordered series of percentiles, "slides" a window across those percentiles and computes cumulative percentiles. */
|
||
moving_percentiles?: <<AggregationsMovingPercentilesAggregation>>
|
||
pass:[/**] @property moving_fn Given an ordered series of data, "slides" a window across the data and runs a custom script on each window of data. For convenience, a number of common functions are predefined such as `min`, `max`, and moving averages. */
|
||
moving_fn?: <<AggregationsMovingFunctionAggregation>>
|
||
pass:[/**] @property multi_terms A multi-bucket value source based aggregation where buckets are dynamically built - one per unique set of values. */
|
||
multi_terms?: <<AggregationsMultiTermsAggregation>>
|
||
pass:[/**] @property nested A special single bucket aggregation that enables aggregating nested documents. */
|
||
nested?: <<AggregationsNestedAggregation>>
|
||
pass:[/**] @property normalize A parent pipeline aggregation which calculates the specific normalized/rescaled value for a specific bucket value. */
|
||
normalize?: <<AggregationsNormalizeAggregation>>
|
||
pass:[/**] @property parent A special single bucket aggregation that selects parent documents that have the specified type, as defined in a `join` field. */
|
||
parent?: <<AggregationsParentAggregation>>
|
||
pass:[/**] @property percentile_ranks A multi-value metrics aggregation that calculates one or more percentile ranks over numeric values extracted from the aggregated documents. */
|
||
percentile_ranks?: <<AggregationsPercentileRanksAggregation>>
|
||
pass:[/**] @property percentiles A multi-value metrics aggregation that calculates one or more percentiles over numeric values extracted from the aggregated documents. */
|
||
percentiles?: <<AggregationsPercentilesAggregation>>
|
||
pass:[/**] @property percentiles_bucket A sibling pipeline aggregation which calculates percentiles across all bucket of a specified metric in a sibling aggregation. */
|
||
percentiles_bucket?: <<AggregationsPercentilesBucketAggregation>>
|
||
pass:[/**] @property range A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */
|
||
range?: <<AggregationsRangeAggregation>>
|
||
pass:[/**] @property rare_terms A multi-bucket value source based aggregation which finds "rare" terms — terms that are at the <<long>>-tail of the distribution and are not frequent. */
|
||
rare_terms?: <<AggregationsRareTermsAggregation>>
|
||
pass:[/**] @property rate Calculates a rate of documents or a field in each bucket. Can only be used inside a `date_histogram` or `composite` aggregation. */
|
||
rate?: <<AggregationsRateAggregation>>
|
||
pass:[/**] @property reverse_nested A special single bucket aggregation that enables aggregating on parent documents from nested documents. Should only be defined inside a `nested` aggregation. */
|
||
reverse_nested?: <<AggregationsReverseNestedAggregation>>
|
||
pass:[/**] @property random_sampler A single bucket aggregation that randomly includes documents in the aggregated results. Sampling provides significant speed improvement at the cost of accuracy. */
|
||
random_sampler?: <<AggregationsRandomSamplerAggregation>>
|
||
pass:[/**] @property sampler A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. */
|
||
sampler?: <<AggregationsSamplerAggregation>>
|
||
pass:[/**] @property scripted_metric A metric aggregation that uses scripts to provide a metric output. */
|
||
scripted_metric?: <<AggregationsScriptedMetricAggregation>>
|
||
pass:[/**] @property serial_diff An aggregation that subtracts values in a time series from themselves at different time lags or periods. */
|
||
serial_diff?: <<AggregationsSerialDifferencingAggregation>>
|
||
pass:[/**] @property significant_terms Returns interesting or unusual occurrences of terms in a set. */
|
||
significant_terms?: <<AggregationsSignificantTermsAggregation>>
|
||
pass:[/**] @property significant_text Returns interesting or unusual occurrences of free-text terms in a set. */
|
||
significant_text?: <<AggregationsSignificantTextAggregation>>
|
||
pass:[/**] @property stats A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */
|
||
stats?: <<AggregationsStatsAggregation>>
|
||
pass:[/**] @property stats_bucket A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */
|
||
stats_bucket?: <<AggregationsStatsBucketAggregation>>
|
||
pass:[/**] @property string_stats A multi-value metrics aggregation that computes statistics over string values extracted from the aggregated documents. */
|
||
string_stats?: <<AggregationsStringStatsAggregation>>
|
||
pass:[/**] @property sum A single-value metrics aggregation that sums numeric values that are extracted from the aggregated documents. */
|
||
sum?: <<AggregationsSumAggregation>>
|
||
pass:[/**] @property sum_bucket A sibling pipeline aggregation which calculates the sum of a specified metric across all buckets in a sibling aggregation. */
|
||
sum_bucket?: <<AggregationsSumBucketAggregation>>
|
||
pass:[/**] @property terms A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */
|
||
terms?: <<AggregationsTermsAggregation>>
|
||
pass:[/**] @property time_series The time series aggregation queries data created using a time series index. This is typically data such as metrics or other data streams with a time component, and requires creating an index using the time series mode. */
|
||
time_series?: <<AggregationsTimeSeriesAggregation>>
|
||
pass:[/**] @property top_hits A metric aggregation that returns the top matching documents per bucket. */
|
||
top_hits?: <<AggregationsTopHitsAggregation>>
|
||
pass:[/**] @property t_test A metrics aggregation that performs a statistical hypothesis test in which the test statistic follows a Student’s t-distribution under the null hypothesis on numeric values extracted from the aggregated documents. */
|
||
t_test?: <<AggregationsTTestAggregation>>
|
||
pass:[/**] @property top_metrics A metric aggregation that selects metrics from the document with the largest or smallest sort value. */
|
||
top_metrics?: <<AggregationsTopMetricsAggregation>>
|
||
pass:[/**] @property value_count A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */
|
||
value_count?: <<AggregationsValueCountAggregation>>
|
||
pass:[/**] @property weighted_avg A single-value metrics aggregation that computes the weighted average of numeric values that are extracted from the aggregated documents. */
|
||
weighted_avg?: <<AggregationsWeightedAverageAggregation>>
|
||
pass:[/**] @property variable_width_histogram A multi-bucket aggregation similar to the histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */
|
||
variable_width_histogram?: <<AggregationsVariableWidthHistogramAggregation>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAggregationRange]]
|
||
=== AggregationsAggregationRange
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAggregationRange {
|
||
pass:[/**] @property from Start of the range (inclusive). */
|
||
from?: <<double>> | null
|
||
pass:[/**] @property key Custom key to return the range with. */
|
||
key?: string
|
||
pass:[/**] @property to End of the range (exclusive). */
|
||
to?: <<double>> | null
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsArrayPercentilesItem]]
|
||
=== AggregationsArrayPercentilesItem
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsArrayPercentilesItem {
|
||
key: string
|
||
value: <<double>> | null
|
||
value_as_string?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAutoDateHistogramAggregate]]
|
||
=== AggregationsAutoDateHistogramAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAutoDateHistogramAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsDateHistogramBucket>>> {
|
||
interval: <<DurationLarge>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAutoDateHistogramAggregation]]
|
||
=== AggregationsAutoDateHistogramAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAutoDateHistogramAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property buckets The target number of buckets. */
|
||
buckets?: <<integer>>
|
||
pass:[/**] @property field The field on which to run the aggregation. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property format The date format used to format `key_as_string` in the response. If no `format` is specified, the first date format specified in the field mapping is used. */
|
||
format?: string
|
||
pass:[/**] @property minimum_interval The minimum rounding interval. This can make the collection process more efficient, as the aggregation will not attempt to round at any interval lower than `minimum_interval`. */
|
||
minimum_interval?: <<AggregationsMinimumInterval>>
|
||
pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */
|
||
missing?: <<DateTime>>
|
||
pass:[/**] @property offset Time zone specified as a ISO 8601 UTC offset. */
|
||
offset?: string
|
||
params?: Record<string, any>
|
||
script?: <<Script>> | string
|
||
pass:[/**] @property time_zone Time zone ID. */
|
||
time_zone?: <<TimeZone>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAverageAggregation]]
|
||
=== AggregationsAverageAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAverageAggregation extends <<AggregationsFormatMetricAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAverageBucketAggregation]]
|
||
=== AggregationsAverageBucketAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAverageBucketAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsAvgAggregate]]
|
||
=== AggregationsAvgAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsAvgAggregate extends <<AggregationsSingleMetricAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBoxPlotAggregate]]
|
||
=== AggregationsBoxPlotAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBoxPlotAggregate extends <<AggregationsAggregateBase>> {
|
||
min: <<double>>
|
||
max: <<double>>
|
||
q1: <<double>>
|
||
q2: <<double>>
|
||
q3: <<double>>
|
||
lower: <<double>>
|
||
upper: <<double>>
|
||
min_as_string?: string
|
||
max_as_string?: string
|
||
q1_as_string?: string
|
||
q2_as_string?: string
|
||
q3_as_string?: string
|
||
lower_as_string?: string
|
||
upper_as_string?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBoxplotAggregation]]
|
||
=== AggregationsBoxplotAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBoxplotAggregation extends <<AggregationsMetricAggregationBase>> {
|
||
pass:[/**] @property compression Limits the maximum number of nodes used by the underlying <<TDigest>> algorithm to `20 * compression`, enabling control of memory usage and approximation error. */
|
||
compression?: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketAggregationBase]]
|
||
=== AggregationsBucketAggregationBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketAggregationBase {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketCorrelationAggregation]]
|
||
=== AggregationsBucketCorrelationAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketCorrelationAggregation extends <<AggregationsBucketPathAggregation>> {
|
||
pass:[/**] @property function The correlation function to execute. */
|
||
function: <<AggregationsBucketCorrelationFunction>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketCorrelationFunction]]
|
||
=== AggregationsBucketCorrelationFunction
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketCorrelationFunction {
|
||
pass:[/**] @property count_correlation The configuration to calculate a count correlation. This function is designed for determining the correlation of a term value and a given metric. */
|
||
count_correlation: <<AggregationsBucketCorrelationFunctionCountCorrelation>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketCorrelationFunctionCountCorrelation]]
|
||
=== AggregationsBucketCorrelationFunctionCountCorrelation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketCorrelationFunctionCountCorrelation {
|
||
pass:[/**] @property indicator The indicator with which to correlate the configured `bucket_path` values. */
|
||
indicator: <<AggregationsBucketCorrelationFunctionCountCorrelationIndicator>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketCorrelationFunctionCountCorrelationIndicator]]
|
||
=== AggregationsBucketCorrelationFunctionCountCorrelationIndicator
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator {
|
||
pass:[/**] @property doc_count The total number of documents that initially created the expectations. It’s required to be greater than or equal to the sum of all values in the buckets_path as this is the originating superset of data to which the term values are correlated. */
|
||
doc_count: <<integer>>
|
||
pass:[/**] @property expectations An array of numbers with which to correlate the configured `bucket_path` values. The length of this value must always equal the number of buckets returned by the `bucket_path`. */
|
||
expectations: <<double>>[]
|
||
pass:[/**] @property fractions An array of fractions to use when averaging and calculating variance. This should be used if the pre-calculated data and the buckets_path have known gaps. The length of fractions, if provided, must equal expectations. */
|
||
fractions?: <<double>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketKsAggregation]]
|
||
=== AggregationsBucketKsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketKsAggregation extends <<AggregationsBucketPathAggregation>> {
|
||
pass:[/**] @property alternative A list of string values indicating which K-S test alternative to calculate. The valid values are: "greater", "less", "two_sided". This parameter is key for determining the K-S statistic used when calculating the K-S test. Default value is all possible alternative hypotheses. */
|
||
alternative?: string[]
|
||
pass:[/**] @property fractions A list of doubles indicating the distribution of the samples with which to compare to the `buckets_path` results. In typical usage this is the overall proportion of documents in each bucket, which is compared with the actual document proportions in each bucket from the sibling aggregation counts. The default is to assume that overall documents are uniformly distributed on these buckets, which they would be if one used equal percentiles of a metric to define the bucket end points. */
|
||
fractions?: <<double>>[]
|
||
pass:[/**] @property sampling_method Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of the returned values. This determines the cumulative distribution function (CDF) points used comparing the two samples. Default is `upper_tail`, which emphasizes the upper end of the CDF points. Valid options are: `upper_tail`, `uniform`, and `lower_tail`. */
|
||
sampling_method?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketMetricValueAggregate]]
|
||
=== AggregationsBucketMetricValueAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketMetricValueAggregate extends <<AggregationsSingleMetricAggregateBase>> {
|
||
keys: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketPathAggregation]]
|
||
=== AggregationsBucketPathAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketPathAggregation {
|
||
pass:[/**] @property buckets_path Path to the buckets that contain one set of values to correlate. */
|
||
buckets_path?: <<AggregationsBucketsPath>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketScriptAggregation]]
|
||
=== AggregationsBucketScriptAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketScriptAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
pass:[/**] @property script The script to run for this aggregation. */
|
||
script?: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketSelectorAggregation]]
|
||
=== AggregationsBucketSelectorAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketSelectorAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
pass:[/**] @property script The script to run for this aggregation. */
|
||
script?: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketSortAggregation]]
|
||
=== AggregationsBucketSortAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsBucketSortAggregation {
|
||
pass:[/**] @property from <<Buckets>> in positions prior to `from` will be truncated. */
|
||
from?: <<integer>>
|
||
pass:[/**] @property gap_policy The policy to apply when gaps are found in the data. */
|
||
gap_policy?: <<AggregationsGapPolicy>>
|
||
pass:[/**] @property size The number of buckets to return. Defaults to all buckets of the parent aggregation. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property sort The list of fields to sort on. */
|
||
sort?: <<Sort>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBuckets]]
|
||
=== AggregationsBuckets
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsBuckets<TBucket = unknown> = Record<string, TBucket> | TBucket[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsBucketsPath]]
|
||
=== AggregationsBucketsPath
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsBucketsPath = string | string[] | Record<string, string>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCalendarInterval]]
|
||
=== AggregationsCalendarInterval
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1y'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCardinalityAggregate]]
|
||
=== AggregationsCardinalityAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCardinalityAggregate extends <<AggregationsAggregateBase>> {
|
||
value: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCardinalityAggregation]]
|
||
=== AggregationsCardinalityAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCardinalityAggregation extends <<AggregationsMetricAggregationBase>> {
|
||
pass:[/**] @property precision_threshold A unique count below which counts are expected to be close to accurate. This allows to trade memory for accuracy. */
|
||
precision_threshold?: <<integer>>
|
||
rehash?: boolean
|
||
pass:[/**] @property execution_hint Mechanism by which cardinality aggregations is run. */
|
||
execution_hint?: <<AggregationsCardinalityExecutionMode>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCardinalityExecutionMode]]
|
||
=== AggregationsCardinalityExecutionMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCategorizeTextAggregation]]
|
||
=== AggregationsCategorizeTextAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCategorizeTextAggregation {
|
||
pass:[/**] @property field The semi-structured text field to categorize. */
|
||
field: <<Field>>
|
||
pass:[/**] @property max_unique_tokens The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1. Smaller values use less memory and create fewer categories. Larger values will use more memory and create narrower categories. Max allowed value is 100. */
|
||
max_unique_tokens?: <<integer>>
|
||
pass:[/**] @property max_matched_tokens The maximum number of token positions to match on before attempting to merge categories. Larger values will use more memory and create narrower categories. Max allowed value is 100. */
|
||
max_matched_tokens?: <<integer>>
|
||
pass:[/**] @property similarity_threshold The minimum percentage of tokens that must match for text to be added to the category bucket. Must be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory usage and create narrower categories. */
|
||
similarity_threshold?: <<integer>>
|
||
pass:[/**] @property categorization_filters This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. You can use this functionality to fine tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, use the categorization_analyzer property instead and include the filters as pattern_replace character filters. */
|
||
categorization_filters?: string[]
|
||
pass:[/**] @property categorization_analyzer The categorization analyzer specifies how the text is analyzed and tokenized before being categorized. The syntax is very similar to that used to define the analyzer in the [Analyze endpoint](https://www.elastic.co/guide/en/elasticsearch/reference/8.0/indices-analyze.html). This property cannot be used at the same time as categorization_filters. */
|
||
categorization_analyzer?: <<AggregationsCategorizeTextAnalyzer>>
|
||
pass:[/**] @property shard_size The number of categorization buckets to return from each shard before merging all the results. */
|
||
shard_size?: <<integer>>
|
||
pass:[/**] @property size The number of buckets to return. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property min_doc_count The minimum number of documents in a bucket to be returned to the results. */
|
||
min_doc_count?: <<integer>>
|
||
pass:[/**] @property shard_min_doc_count The minimum number of documents in a bucket to be returned from the shard before merging. */
|
||
shard_min_doc_count?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCategorizeTextAnalyzer]]
|
||
=== AggregationsCategorizeTextAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsCategorizeTextAnalyzer = string | <<AggregationsCustomCategorizeTextAnalyzer>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsChiSquareHeuristic]]
|
||
=== AggregationsChiSquareHeuristic
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsChiSquareHeuristic {
|
||
pass:[/**] @property background_is_superset Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */
|
||
background_is_superset: boolean
|
||
pass:[/**] @property include_negatives Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */
|
||
include_negatives: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsChildrenAggregate]]
|
||
=== AggregationsChildrenAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsChildrenAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {
|
||
}
|
||
type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsChildrenAggregation]]
|
||
=== AggregationsChildrenAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsChildrenAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property type The child type that should be selected. */
|
||
type?: <<RelationName>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCompositeAggregate]]
|
||
=== AggregationsCompositeAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCompositeAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsCompositeBucket>>> {
|
||
after_key?: <<AggregationsCompositeAggregateKey>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCompositeAggregateKey]]
|
||
=== AggregationsCompositeAggregateKey
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsCompositeAggregateKey = Record<<<Field>>, <<FieldValue>>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCompositeAggregation]]
|
||
=== AggregationsCompositeAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCompositeAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property after When paginating, use the `after_key` value returned in the previous response to retrieve the next page. */
|
||
after?: <<AggregationsCompositeAggregateKey>>
|
||
pass:[/**] @property size The number of composite buckets that should be returned. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property sources The value sources used to build composite buckets. Keys are returned in the order of the `sources` definition. */
|
||
sources?: Record<string, <<AggregationsCompositeAggregationSource>>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCompositeAggregationBase]]
|
||
=== AggregationsCompositeAggregationBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCompositeAggregationBase {
|
||
pass:[/**] @property field Either `field` or `script` must be present */
|
||
field?: <<Field>>
|
||
missing_bucket?: boolean
|
||
missing_order?: <<AggregationsMissingOrder>>
|
||
pass:[/**] @property script Either `field` or `script` must be present */
|
||
script?: <<Script>> | string
|
||
value_type?: <<AggregationsValueType>>
|
||
order?: <<SortOrder>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCompositeAggregationSource]]
|
||
=== AggregationsCompositeAggregationSource
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCompositeAggregationSource {
|
||
pass:[/**] @property terms A terms aggregation. */
|
||
terms?: <<AggregationsCompositeTermsAggregation>>
|
||
pass:[/**] @property histogram A histogram aggregation. */
|
||
histogram?: <<AggregationsCompositeHistogramAggregation>>
|
||
pass:[/**] @property date_histogram A date histogram aggregation. */
|
||
date_histogram?: <<AggregationsCompositeDateHistogramAggregation>>
|
||
pass:[/**] @property geotile_grid A geotile grid aggregation. */
|
||
geotile_grid?: <<AggregationsCompositeGeoTileGridAggregation>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCompositeBucket]]
|
||
=== AggregationsCompositeBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCompositeBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key: <<AggregationsCompositeAggregateKey>>
|
||
}
|
||
type AggregationsCompositeBucket = AggregationsCompositeBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<AggregationsCompositeAggregateKey>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCompositeDateHistogramAggregation]]
|
||
=== AggregationsCompositeDateHistogramAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCompositeDateHistogramAggregation extends <<AggregationsCompositeAggregationBase>> {
|
||
format?: string
|
||
pass:[/**] @property calendar_interval Either `calendar_interval` or `fixed_interval` must be present */
|
||
calendar_interval?: <<DurationLarge>>
|
||
pass:[/**] @property fixed_interval Either `calendar_interval` or `fixed_interval` must be present */
|
||
fixed_interval?: <<DurationLarge>>
|
||
offset?: <<Duration>>
|
||
time_zone?: <<TimeZone>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCompositeGeoTileGridAggregation]]
|
||
=== AggregationsCompositeGeoTileGridAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCompositeGeoTileGridAggregation extends <<AggregationsCompositeAggregationBase>> {
|
||
precision?: <<integer>>
|
||
bounds?: <<GeoBounds>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCompositeHistogramAggregation]]
|
||
=== AggregationsCompositeHistogramAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCompositeHistogramAggregation extends <<AggregationsCompositeAggregationBase>> {
|
||
interval: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCompositeTermsAggregation]]
|
||
=== AggregationsCompositeTermsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCompositeTermsAggregation extends <<AggregationsCompositeAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCumulativeCardinalityAggregate]]
|
||
=== AggregationsCumulativeCardinalityAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCumulativeCardinalityAggregate extends <<AggregationsAggregateBase>> {
|
||
value: <<long>>
|
||
value_as_string?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCumulativeCardinalityAggregation]]
|
||
=== AggregationsCumulativeCardinalityAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCumulativeCardinalityAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCumulativeSumAggregation]]
|
||
=== AggregationsCumulativeSumAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCumulativeSumAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsCustomCategorizeTextAnalyzer]]
|
||
=== AggregationsCustomCategorizeTextAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsCustomCategorizeTextAnalyzer {
|
||
char_filter?: string[]
|
||
tokenizer?: string
|
||
filter?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDateHistogramAggregate]]
|
||
=== AggregationsDateHistogramAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDateHistogramAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsDateHistogramBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDateHistogramAggregation]]
|
||
=== AggregationsDateHistogramAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDateHistogramAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property calendar_interval Calendar-aware interval. Can be specified using the unit name, such as `month`, or as a single unit quantity, such as `1M`. */
|
||
calendar_interval?: <<AggregationsCalendarInterval>>
|
||
pass:[/**] @property extended_bounds Enables extending the bounds of the histogram beyond the data itself. */
|
||
extended_bounds?: <<AggregationsExtendedBounds>><<<AggregationsFieldDateMath>>>
|
||
pass:[/**] @property hard_bounds Limits the histogram to specified bounds. */
|
||
hard_bounds?: <<AggregationsExtendedBounds>><<<AggregationsFieldDateMath>>>
|
||
pass:[/**] @property field The date field whose values are use to build a histogram. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property fixed_interval Fixed intervals: a fixed number of SI units and never deviate, regardless of where they fall on the calendar. */
|
||
fixed_interval?: <<Duration>>
|
||
pass:[/**] @property format The date format used to format `key_as_string` in the response. If no `format` is specified, the first date format specified in the field mapping is used. */
|
||
format?: string
|
||
interval?: <<Duration>>
|
||
pass:[/**] @property min_doc_count Only returns buckets that have `min_doc_count` number of documents. By default, all buckets between the first bucket that matches documents and the last one are returned. */
|
||
min_doc_count?: <<integer>>
|
||
pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */
|
||
missing?: <<DateTime>>
|
||
pass:[/**] @property offset Changes the start value of each bucket by the specified positive (`+`) or negative offset (`-`) duration. */
|
||
offset?: <<Duration>>
|
||
pass:[/**] @property order The sort order of the returned buckets. */
|
||
order?: <<AggregationsAggregateOrder>>
|
||
params?: Record<string, any>
|
||
script?: <<Script>> | string
|
||
pass:[/**] @property time_zone Time zone used for bucketing and rounding. Defaults to Coordinated Universal Time (UTC). */
|
||
time_zone?: <<TimeZone>>
|
||
pass:[/**] @property keyed Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */
|
||
keyed?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDateHistogramBucket]]
|
||
=== AggregationsDateHistogramBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDateHistogramBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key_as_string?: string
|
||
key: <<EpochTime>><<<UnitMillis>>>
|
||
}
|
||
type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | string | <<EpochTime>><<<UnitMillis>>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDateRangeAggregate]]
|
||
=== AggregationsDateRangeAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDateRangeAggregate extends <<AggregationsRangeAggregate>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDateRangeAggregation]]
|
||
=== AggregationsDateRangeAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDateRangeAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property field The date field whose values are use to build ranges. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property format The date format used to format `from` and `to` in the response. */
|
||
format?: string
|
||
pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */
|
||
missing?: <<AggregationsMissing>>
|
||
pass:[/**] @property ranges Array of date ranges. */
|
||
ranges?: <<AggregationsDateRangeExpression>>[]
|
||
pass:[/**] @property time_zone Time zone used to convert dates from another time zone to UTC. */
|
||
time_zone?: <<TimeZone>>
|
||
pass:[/**] @property keyed Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */
|
||
keyed?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDateRangeExpression]]
|
||
=== AggregationsDateRangeExpression
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDateRangeExpression {
|
||
pass:[/**] @property from Start of the range (inclusive). */
|
||
from?: <<AggregationsFieldDateMath>>
|
||
pass:[/**] @property key Custom key to return the range with. */
|
||
key?: string
|
||
pass:[/**] @property to End of the range (exclusive). */
|
||
to?: <<AggregationsFieldDateMath>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDerivativeAggregate]]
|
||
=== AggregationsDerivativeAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDerivativeAggregate extends <<AggregationsSingleMetricAggregateBase>> {
|
||
normalized_value?: <<double>>
|
||
normalized_value_as_string?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDerivativeAggregation]]
|
||
=== AggregationsDerivativeAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDerivativeAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDiversifiedSamplerAggregation]]
|
||
=== AggregationsDiversifiedSamplerAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDiversifiedSamplerAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property execution_hint The type of value used for de-duplication. */
|
||
execution_hint?: <<AggregationsSamplerAggregationExecutionHint>>
|
||
pass:[/**] @property max_docs_per_value Limits how many documents are permitted per choice of de-duplicating value. */
|
||
max_docs_per_value?: <<integer>>
|
||
script?: <<Script>> | string
|
||
pass:[/**] @property shard_size Limits how many top-scoring documents are collected in the sample processed on each shard. */
|
||
shard_size?: <<integer>>
|
||
pass:[/**] @property field The field used to provide values used for de-duplication. */
|
||
field?: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDoubleTermsAggregate]]
|
||
=== AggregationsDoubleTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDoubleTermsAggregate extends <<AggregationsTermsAggregateBase>><<<AggregationsDoubleTermsBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsDoubleTermsBucket]]
|
||
=== AggregationsDoubleTermsBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsDoubleTermsBucketKeys extends <<AggregationsTermsBucketBase>> {
|
||
key: <<double>>
|
||
key_as_string?: string
|
||
}
|
||
type AggregationsDoubleTermsBucket = AggregationsDoubleTermsBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<double>> | string | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsEwmaModelSettings]]
|
||
=== AggregationsEwmaModelSettings
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsEwmaModelSettings {
|
||
alpha?: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsEwmaMovingAverageAggregation]]
|
||
=== AggregationsEwmaMovingAverageAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsEwmaMovingAverageAggregation extends <<AggregationsMovingAverageAggregationBase>> {
|
||
model: 'ewma'
|
||
settings: <<AggregationsEwmaModelSettings>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsExtendedBounds]]
|
||
=== AggregationsExtendedBounds
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsExtendedBounds<T = unknown> {
|
||
pass:[/**] @property max Maximum value for the bound. */
|
||
max?: T
|
||
pass:[/**] @property min Minimum value for the bound. */
|
||
min?: T
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsExtendedStatsAggregate]]
|
||
=== AggregationsExtendedStatsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsExtendedStatsAggregate extends <<AggregationsStatsAggregate>> {
|
||
sum_of_squares: <<double>> | null
|
||
variance: <<double>> | null
|
||
variance_population: <<double>> | null
|
||
variance_sampling: <<double>> | null
|
||
std_deviation: <<double>> | null
|
||
std_deviation_population: <<double>> | null
|
||
std_deviation_sampling: <<double>> | null
|
||
std_deviation_bounds?: <<AggregationsStandardDeviationBounds>>
|
||
sum_of_squares_as_string?: string
|
||
variance_as_string?: string
|
||
variance_population_as_string?: string
|
||
variance_sampling_as_string?: string
|
||
std_deviation_as_string?: string
|
||
std_deviation_bounds_as_string?: <<AggregationsStandardDeviationBoundsAsString>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsExtendedStatsAggregation]]
|
||
=== AggregationsExtendedStatsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsExtendedStatsAggregation extends <<AggregationsFormatMetricAggregationBase>> {
|
||
pass:[/**] @property sigma The number of standard deviations above/below the mean to display. */
|
||
sigma?: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsExtendedStatsBucketAggregate]]
|
||
=== AggregationsExtendedStatsBucketAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsExtendedStatsBucketAggregate extends <<AggregationsExtendedStatsAggregate>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsExtendedStatsBucketAggregation]]
|
||
=== AggregationsExtendedStatsBucketAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsExtendedStatsBucketAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
pass:[/**] @property sigma The number of standard deviations above/below the mean to display. */
|
||
sigma?: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFieldDateMath]]
|
||
=== AggregationsFieldDateMath
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsFieldDateMath = <<DateMath>> | <<double>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFilterAggregate]]
|
||
=== AggregationsFilterAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsFilterAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {
|
||
}
|
||
type AggregationsFilterAggregate = AggregationsFilterAggregateKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFiltersAggregate]]
|
||
=== AggregationsFiltersAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsFiltersAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsFiltersBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFiltersAggregation]]
|
||
=== AggregationsFiltersAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsFiltersAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property filters Collection of queries from which to build buckets. */
|
||
filters?: <<AggregationsBuckets>><<<QueryDslQueryContainer>>>
|
||
pass:[/**] @property other_bucket Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */
|
||
other_bucket?: boolean
|
||
pass:[/**] @property other_bucket_key The key with which the other bucket is returned. */
|
||
other_bucket_key?: string
|
||
pass:[/**] @property keyed By default, the named filters aggregation returns the buckets as an object. Set to `false` to return the buckets as an array of objects. */
|
||
keyed?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFiltersBucket]]
|
||
=== AggregationsFiltersBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsFiltersBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
}
|
||
type AggregationsFiltersBucket = AggregationsFiltersBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFormatMetricAggregationBase]]
|
||
=== AggregationsFormatMetricAggregationBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsFormatMetricAggregationBase extends <<AggregationsMetricAggregationBase>> {
|
||
format?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFormattableMetricAggregation]]
|
||
=== AggregationsFormattableMetricAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsFormattableMetricAggregation extends <<AggregationsMetricAggregationBase>> {
|
||
format?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFrequentItemSetsAggregate]]
|
||
=== AggregationsFrequentItemSetsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsFrequentItemSetsAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsFrequentItemSetsBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFrequentItemSetsAggregation]]
|
||
=== AggregationsFrequentItemSetsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsFrequentItemSetsAggregation {
|
||
pass:[/**] @property fields <<Fields>> to analyze. */
|
||
fields: <<AggregationsFrequentItemSetsField>>[]
|
||
pass:[/**] @property minimum_set_size The minimum size of one item set. */
|
||
minimum_set_size?: <<integer>>
|
||
pass:[/**] @property minimum_support The minimum support of one item set. */
|
||
minimum_support?: <<double>>
|
||
pass:[/**] @property size The number of top item sets to return. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property filter <<Query>> that filters documents from analysis. */
|
||
filter?: <<QueryDslQueryContainer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFrequentItemSetsBucket]]
|
||
=== AggregationsFrequentItemSetsBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsFrequentItemSetsBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key: Record<<<Field>>, string[]>
|
||
support: <<double>>
|
||
}
|
||
type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | Record<<<Field>>, string[]> | <<double>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsFrequentItemSetsField]]
|
||
=== AggregationsFrequentItemSetsField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsFrequentItemSetsField {
|
||
field: <<Field>>
|
||
pass:[/**] @property exclude Values to exclude. Can be regular expression strings or arrays of strings of exact terms. */
|
||
exclude?: <<AggregationsTermsExclude>>
|
||
pass:[/**] @property include Values to include. Can be regular expression strings or arrays of strings of exact terms. */
|
||
include?: <<AggregationsTermsInclude>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGapPolicy]]
|
||
=== AggregationsGapPolicy
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsGapPolicy = 'skip' | 'insert_zeros' | 'keep_values'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoBoundsAggregate]]
|
||
=== AggregationsGeoBoundsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoBoundsAggregate extends <<AggregationsAggregateBase>> {
|
||
bounds?: <<GeoBounds>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoBoundsAggregation]]
|
||
=== AggregationsGeoBoundsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoBoundsAggregation extends <<AggregationsMetricAggregationBase>> {
|
||
pass:[/**] @property wrap_longitude Specifies whether the bounding box should be allowed to overlap the international date line. */
|
||
wrap_longitude?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoCentroidAggregate]]
|
||
=== AggregationsGeoCentroidAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoCentroidAggregate extends <<AggregationsAggregateBase>> {
|
||
count: <<long>>
|
||
location?: <<GeoLocation>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoCentroidAggregation]]
|
||
=== AggregationsGeoCentroidAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoCentroidAggregation extends <<AggregationsMetricAggregationBase>> {
|
||
count?: <<long>>
|
||
location?: <<GeoLocation>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoDistanceAggregate]]
|
||
=== AggregationsGeoDistanceAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoDistanceAggregate extends <<AggregationsRangeAggregate>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoDistanceAggregation]]
|
||
=== AggregationsGeoDistanceAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoDistanceAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property distance_type The distance calculation type. */
|
||
distance_type?: <<GeoDistanceType>>
|
||
pass:[/**] @property field A field of type `geo_point` used to evaluate the distance. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property origin The origin used to evaluate the distance. */
|
||
origin?: <<GeoLocation>>
|
||
pass:[/**] @property ranges An array of ranges used to bucket documents. */
|
||
ranges?: <<AggregationsAggregationRange>>[]
|
||
pass:[/**] @property unit The distance unit. */
|
||
unit?: <<DistanceUnit>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoHashGridAggregate]]
|
||
=== AggregationsGeoHashGridAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoHashGridAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsGeoHashGridBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoHashGridAggregation]]
|
||
=== AggregationsGeoHashGridAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoHashGridAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property bounds The bounding box to filter the points in each bucket. */
|
||
bounds?: <<GeoBounds>>
|
||
pass:[/**] @property field <<Field>> containing indexed `geo_point` or `geo_shape` values. If the field contains an array, `geohash_grid` aggregates all array values. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property precision The string length of the geohashes used to define cells/buckets in the results. */
|
||
precision?: <<GeoHashPrecision>>
|
||
pass:[/**] @property shard_size Allows for more accurate counting of the top cells returned in the final result the aggregation. Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */
|
||
shard_size?: <<integer>>
|
||
pass:[/**] @property size The maximum number of geohash buckets to return. */
|
||
size?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoHashGridBucket]]
|
||
=== AggregationsGeoHashGridBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoHashGridBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key: <<GeoHash>>
|
||
}
|
||
type AggregationsGeoHashGridBucket = AggregationsGeoHashGridBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<GeoHash>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoHexGridAggregate]]
|
||
=== AggregationsGeoHexGridAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoHexGridAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsGeoHexGridBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoHexGridBucket]]
|
||
=== AggregationsGeoHexGridBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoHexGridBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key: <<GeoHexCell>>
|
||
}
|
||
type AggregationsGeoHexGridBucket = AggregationsGeoHexGridBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<GeoHexCell>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoLineAggregate]]
|
||
=== AggregationsGeoLineAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoLineAggregate extends <<AggregationsAggregateBase>> {
|
||
type: string
|
||
geometry: <<GeoLine>>
|
||
properties: any
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoLineAggregation]]
|
||
=== AggregationsGeoLineAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoLineAggregation {
|
||
pass:[/**] @property point The name of the geo_point field. */
|
||
point: <<AggregationsGeoLinePoint>>
|
||
pass:[/**] @property sort The name of the numeric field to use as the sort key for ordering the points. When the `geo_line` aggregation is nested inside a `time_series` aggregation, this field defaults to `@timestamp`, and any other value will result in error. */
|
||
sort: <<AggregationsGeoLineSort>>
|
||
pass:[/**] @property include_sort When `true`, returns an additional array of the sort values in the feature properties. */
|
||
include_sort?: boolean
|
||
pass:[/**] @property sort_order The order in which the line is sorted (ascending or descending). */
|
||
sort_order?: <<SortOrder>>
|
||
pass:[/**] @property size The maximum length of the line represented in the aggregation. Valid sizes are between 1 and 10000. */
|
||
size?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoLinePoint]]
|
||
=== AggregationsGeoLinePoint
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoLinePoint {
|
||
pass:[/**] @property field The name of the geo_point field. */
|
||
field: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoLineSort]]
|
||
=== AggregationsGeoLineSort
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoLineSort {
|
||
pass:[/**] @property field The name of the numeric field to use as the sort key for ordering the points. */
|
||
field: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoTileGridAggregate]]
|
||
=== AggregationsGeoTileGridAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoTileGridAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsGeoTileGridBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoTileGridAggregation]]
|
||
=== AggregationsGeoTileGridAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoTileGridAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property field <<Field>> containing indexed `geo_point` or `geo_shape` values. If the field contains an array, `geotile_grid` aggregates all array values. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property precision Integer zoom of the key used to define cells/buckets in the results. Values outside of the range [0,29] will be rejected. */
|
||
precision?: <<GeoTilePrecision>>
|
||
pass:[/**] @property shard_size Allows for more accurate counting of the top cells returned in the final result the aggregation. Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */
|
||
shard_size?: <<integer>>
|
||
pass:[/**] @property size The maximum number of buckets to return. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property bounds A bounding box to filter the geo-points or geo-shapes in each bucket. */
|
||
bounds?: <<GeoBounds>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeoTileGridBucket]]
|
||
=== AggregationsGeoTileGridBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeoTileGridBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key: <<GeoTile>>
|
||
}
|
||
type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<GeoTile>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGeohexGridAggregation]]
|
||
=== AggregationsGeohexGridAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGeohexGridAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property field <<Field>> containing indexed `geo_point` or `geo_shape` values. If the field contains an array, `geohex_grid` aggregates all array values. */
|
||
field: <<Field>>
|
||
pass:[/**] @property precision Integer zoom of the key used to defined cells or buckets in the results. Value should be between 0-15. */
|
||
precision?: <<integer>>
|
||
pass:[/**] @property bounds Bounding box used to filter the geo-points in each bucket. */
|
||
bounds?: <<GeoBounds>>
|
||
pass:[/**] @property size Maximum number of buckets to return. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property shard_size Number of buckets returned from each shard. */
|
||
shard_size?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGlobalAggregate]]
|
||
=== AggregationsGlobalAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGlobalAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {
|
||
}
|
||
type AggregationsGlobalAggregate = AggregationsGlobalAggregateKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGlobalAggregation]]
|
||
=== AggregationsGlobalAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGlobalAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsGoogleNormalizedDistanceHeuristic]]
|
||
=== AggregationsGoogleNormalizedDistanceHeuristic
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsGoogleNormalizedDistanceHeuristic {
|
||
pass:[/**] @property background_is_superset Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */
|
||
background_is_superset?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHdrMethod]]
|
||
=== AggregationsHdrMethod
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsHdrMethod {
|
||
pass:[/**] @property number_of_significant_value_digits Specifies the resolution of values for the histogram in number of significant digits. */
|
||
number_of_significant_value_digits?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHdrPercentileRanksAggregate]]
|
||
=== AggregationsHdrPercentileRanksAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsHdrPercentileRanksAggregate extends <<AggregationsPercentilesAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHdrPercentilesAggregate]]
|
||
=== AggregationsHdrPercentilesAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsHdrPercentilesAggregate extends <<AggregationsPercentilesAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHistogramAggregate]]
|
||
=== AggregationsHistogramAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsHistogramAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsHistogramBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHistogramAggregation]]
|
||
=== AggregationsHistogramAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsHistogramAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property extended_bounds Enables extending the bounds of the histogram beyond the data itself. */
|
||
extended_bounds?: <<AggregationsExtendedBounds>><<<double>>>
|
||
pass:[/**] @property hard_bounds Limits the range of buckets in the histogram. It is particularly useful in the case of open data ranges that can result in a very large number of buckets. */
|
||
hard_bounds?: <<AggregationsExtendedBounds>><<<double>>>
|
||
pass:[/**] @property field The name of the field to aggregate on. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property interval The interval for the buckets. Must be a positive decimal. */
|
||
interval?: <<double>>
|
||
pass:[/**] @property min_doc_count Only returns buckets that have `min_doc_count` number of documents. By default, the response will fill gaps in the histogram with empty buckets. */
|
||
min_doc_count?: <<integer>>
|
||
pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */
|
||
missing?: <<double>>
|
||
pass:[/**] @property offset By default, the bucket keys start with 0 and then continue in even spaced steps of `interval`. The bucket boundaries can be shifted by using the `offset` option. */
|
||
offset?: <<double>>
|
||
pass:[/**] @property order The sort order of the returned buckets. By default, the returned buckets are sorted by their key ascending. */
|
||
order?: <<AggregationsAggregateOrder>>
|
||
script?: <<Script>> | string
|
||
format?: string
|
||
pass:[/**] @property keyed If `true`, returns buckets as a hash instead of an array, keyed by the bucket keys. */
|
||
keyed?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHistogramBucket]]
|
||
=== AggregationsHistogramBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsHistogramBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key_as_string?: string
|
||
key: <<double>>
|
||
}
|
||
type AggregationsHistogramBucket = AggregationsHistogramBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | string | <<double>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHoltLinearModelSettings]]
|
||
=== AggregationsHoltLinearModelSettings
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsHoltLinearModelSettings {
|
||
alpha?: <<float>>
|
||
beta?: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHoltMovingAverageAggregation]]
|
||
=== AggregationsHoltMovingAverageAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsHoltMovingAverageAggregation extends <<AggregationsMovingAverageAggregationBase>> {
|
||
model: 'holt'
|
||
settings: <<AggregationsHoltLinearModelSettings>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHoltWintersModelSettings]]
|
||
=== AggregationsHoltWintersModelSettings
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsHoltWintersModelSettings {
|
||
alpha?: <<float>>
|
||
beta?: <<float>>
|
||
gamma?: <<float>>
|
||
pad?: boolean
|
||
period?: <<integer>>
|
||
type?: <<AggregationsHoltWintersType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHoltWintersMovingAverageAggregation]]
|
||
=== AggregationsHoltWintersMovingAverageAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsHoltWintersMovingAverageAggregation extends <<AggregationsMovingAverageAggregationBase>> {
|
||
model: 'holt_winters'
|
||
settings: <<AggregationsHoltWintersModelSettings>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsHoltWintersType]]
|
||
=== AggregationsHoltWintersType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsHoltWintersType = 'add' | 'mult'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsInferenceAggregate]]
|
||
=== AggregationsInferenceAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsInferenceAggregateKeys extends <<AggregationsAggregateBase>> {
|
||
value?: <<FieldValue>>
|
||
feature_importance?: <<AggregationsInferenceFeatureImportance>>[]
|
||
top_classes?: <<AggregationsInferenceTopClassEntry>>[]
|
||
warning?: string
|
||
}
|
||
type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys
|
||
& { [property: string]: any }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsInferenceAggregation]]
|
||
=== AggregationsInferenceAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsInferenceAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
pass:[/**] @property model_id The ID or alias for the trained model. */
|
||
model_id: <<Name>>
|
||
pass:[/**] @property inference_config Contains the inference type and its options. */
|
||
inference_config?: <<AggregationsInferenceConfigContainer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsInferenceClassImportance]]
|
||
=== AggregationsInferenceClassImportance
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsInferenceClassImportance {
|
||
class_name: string
|
||
importance: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsInferenceConfigContainer]]
|
||
=== AggregationsInferenceConfigContainer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsInferenceConfigContainer {
|
||
pass:[/**] @property regression Regression configuration for inference. */
|
||
regression?: <<MlRegressionInferenceOptions>>
|
||
pass:[/**] @property classification Classification configuration for inference. */
|
||
classification?: <<MlClassificationInferenceOptions>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsInferenceFeatureImportance]]
|
||
=== AggregationsInferenceFeatureImportance
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsInferenceFeatureImportance {
|
||
feature_name: string
|
||
importance?: <<double>>
|
||
classes?: <<AggregationsInferenceClassImportance>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsInferenceTopClassEntry]]
|
||
=== AggregationsInferenceTopClassEntry
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsInferenceTopClassEntry {
|
||
class_name: <<FieldValue>>
|
||
class_probability: <<double>>
|
||
class_score: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsIpPrefixAggregate]]
|
||
=== AggregationsIpPrefixAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsIpPrefixAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsIpPrefixBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsIpPrefixAggregation]]
|
||
=== AggregationsIpPrefixAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsIpPrefixAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property field The IP address field to aggregation on. The field mapping type must be `ip`. */
|
||
field: <<Field>>
|
||
pass:[/**] @property prefix_length Length of the network prefix. For IPv4 addresses the accepted range is [0, 32]. For IPv6 addresses the accepted range is [0, 128]. */
|
||
prefix_length: <<integer>>
|
||
pass:[/**] @property is_ipv6 Defines whether the prefix applies to IPv6 addresses. */
|
||
is_ipv6?: boolean
|
||
pass:[/**] @property append_prefix_length Defines whether the prefix length is appended to IP address keys in the response. */
|
||
append_prefix_length?: boolean
|
||
pass:[/**] @property keyed Defines whether buckets are returned as a hash rather than an array in the response. */
|
||
keyed?: boolean
|
||
pass:[/**] @property min_doc_count Minimum number of documents in a bucket for it to be included in the response. */
|
||
min_doc_count?: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsIpPrefixBucket]]
|
||
=== AggregationsIpPrefixBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsIpPrefixBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
is_ipv6: boolean
|
||
key: string
|
||
prefix_length: <<integer>>
|
||
netmask?: string
|
||
}
|
||
type AggregationsIpPrefixBucket = AggregationsIpPrefixBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | boolean | string | <<integer>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsIpRangeAggregate]]
|
||
=== AggregationsIpRangeAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsIpRangeAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsIpRangeBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsIpRangeAggregation]]
|
||
=== AggregationsIpRangeAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsIpRangeAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property field The date field whose values are used to build ranges. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property ranges Array of IP ranges. */
|
||
ranges?: <<AggregationsIpRangeAggregationRange>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsIpRangeAggregationRange]]
|
||
=== AggregationsIpRangeAggregationRange
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsIpRangeAggregationRange {
|
||
pass:[/**] @property from Start of the range. */
|
||
from?: string | null
|
||
pass:[/**] @property mask IP range defined as a CIDR mask. */
|
||
mask?: string
|
||
pass:[/**] @property to End of the range. */
|
||
to?: string | null
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsIpRangeBucket]]
|
||
=== AggregationsIpRangeBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsIpRangeBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key?: string
|
||
from?: string
|
||
to?: string
|
||
}
|
||
type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | string | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsKeyedPercentiles]]
|
||
=== AggregationsKeyedPercentiles
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsKeyedPercentiles = Record<string, string | <<long>> | null>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsLinearMovingAverageAggregation]]
|
||
=== AggregationsLinearMovingAverageAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsLinearMovingAverageAggregation extends <<AggregationsMovingAverageAggregationBase>> {
|
||
model: 'linear'
|
||
settings: <<EmptyObject>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsLongRareTermsAggregate]]
|
||
=== AggregationsLongRareTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsLongRareTermsAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsLongRareTermsBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsLongRareTermsBucket]]
|
||
=== AggregationsLongRareTermsBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsLongRareTermsBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key: <<long>>
|
||
key_as_string?: string
|
||
}
|
||
type AggregationsLongRareTermsBucket = AggregationsLongRareTermsBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | string }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsLongTermsAggregate]]
|
||
=== AggregationsLongTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsLongTermsAggregate extends <<AggregationsTermsAggregateBase>><<<AggregationsLongTermsBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsLongTermsBucket]]
|
||
=== AggregationsLongTermsBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsLongTermsBucketKeys extends <<AggregationsTermsBucketBase>> {
|
||
key: <<long>>
|
||
key_as_string?: string
|
||
}
|
||
type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | string }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMatrixAggregation]]
|
||
=== AggregationsMatrixAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMatrixAggregation {
|
||
pass:[/**] @property fields An array of fields for computing the statistics. */
|
||
fields?: <<Fields>>
|
||
pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */
|
||
missing?: Record<<<Field>>, <<double>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMatrixStatsAggregate]]
|
||
=== AggregationsMatrixStatsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMatrixStatsAggregate extends <<AggregationsAggregateBase>> {
|
||
doc_count: <<long>>
|
||
fields?: <<AggregationsMatrixStatsFields>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMatrixStatsAggregation]]
|
||
=== AggregationsMatrixStatsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMatrixStatsAggregation extends <<AggregationsMatrixAggregation>> {
|
||
pass:[/**] @property mode Array value the aggregation will use for array or multi-valued fields. */
|
||
mode?: <<SortMode>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMatrixStatsFields]]
|
||
=== AggregationsMatrixStatsFields
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMatrixStatsFields {
|
||
name: <<Field>>
|
||
count: <<long>>
|
||
mean: <<double>>
|
||
variance: <<double>>
|
||
skewness: <<double>>
|
||
kurtosis: <<double>>
|
||
covariance: Record<<<Field>>, <<double>>>
|
||
correlation: Record<<<Field>>, <<double>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMaxAggregate]]
|
||
=== AggregationsMaxAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMaxAggregate extends <<AggregationsSingleMetricAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMaxAggregation]]
|
||
=== AggregationsMaxAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMaxAggregation extends <<AggregationsFormatMetricAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMaxBucketAggregation]]
|
||
=== AggregationsMaxBucketAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMaxBucketAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMedianAbsoluteDeviationAggregate]]
|
||
=== AggregationsMedianAbsoluteDeviationAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMedianAbsoluteDeviationAggregate extends <<AggregationsSingleMetricAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMedianAbsoluteDeviationAggregation]]
|
||
=== AggregationsMedianAbsoluteDeviationAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMedianAbsoluteDeviationAggregation extends <<AggregationsFormatMetricAggregationBase>> {
|
||
pass:[/**] @property compression Limits the maximum number of nodes used by the underlying <<TDigest>> algorithm to `20 * compression`, enabling control of memory usage and approximation error. */
|
||
compression?: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMetricAggregationBase]]
|
||
=== AggregationsMetricAggregationBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMetricAggregationBase {
|
||
pass:[/**] @property field The field on which to run the aggregation. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */
|
||
missing?: <<AggregationsMissing>>
|
||
script?: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMinAggregate]]
|
||
=== AggregationsMinAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMinAggregate extends <<AggregationsSingleMetricAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMinAggregation]]
|
||
=== AggregationsMinAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMinAggregation extends <<AggregationsFormatMetricAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMinBucketAggregation]]
|
||
=== AggregationsMinBucketAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMinBucketAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMinimumInterval]]
|
||
=== AggregationsMinimumInterval
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | 'month' | 'year'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMissing]]
|
||
=== AggregationsMissing
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsMissing = string | <<integer>> | <<double>> | boolean
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMissingAggregate]]
|
||
=== AggregationsMissingAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMissingAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {
|
||
}
|
||
type AggregationsMissingAggregate = AggregationsMissingAggregateKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMissingAggregation]]
|
||
=== AggregationsMissingAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMissingAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property field The name of the field. */
|
||
field?: <<Field>>
|
||
missing?: <<AggregationsMissing>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMissingOrder]]
|
||
=== AggregationsMissingOrder
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsMissingOrder = 'first' | 'last' | 'default'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMovingAverageAggregation]]
|
||
=== AggregationsMovingAverageAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsMovingAverageAggregation = <<AggregationsLinearMovingAverageAggregation>> | <<AggregationsSimpleMovingAverageAggregation>> | <<AggregationsEwmaMovingAverageAggregation>> | <<AggregationsHoltMovingAverageAggregation>> | <<AggregationsHoltWintersMovingAverageAggregation>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMovingAverageAggregationBase]]
|
||
=== AggregationsMovingAverageAggregationBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMovingAverageAggregationBase extends <<AggregationsPipelineAggregationBase>> {
|
||
minimize?: boolean
|
||
predict?: <<integer>>
|
||
window?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMovingFunctionAggregation]]
|
||
=== AggregationsMovingFunctionAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMovingFunctionAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
pass:[/**] @property script The script that should be executed on each window of data. */
|
||
script?: string
|
||
pass:[/**] @property shift By default, the window consists of the last n values excluding the current bucket. Increasing `shift` by 1, moves the starting window position by 1 to the right. */
|
||
shift?: <<integer>>
|
||
pass:[/**] @property window The size of window to "slide" across the histogram. */
|
||
window?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMovingPercentilesAggregation]]
|
||
=== AggregationsMovingPercentilesAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMovingPercentilesAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
pass:[/**] @property window The size of window to "slide" across the histogram. */
|
||
window?: <<integer>>
|
||
pass:[/**] @property shift By default, the window consists of the last n values excluding the current bucket. Increasing `shift` by 1, moves the starting window position by 1 to the right. */
|
||
shift?: <<integer>>
|
||
keyed?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMultiBucketAggregateBase]]
|
||
=== AggregationsMultiBucketAggregateBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMultiBucketAggregateBase<TBucket = unknown> extends <<AggregationsAggregateBase>> {
|
||
buckets: <<AggregationsBuckets>><TBucket>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMultiBucketBase]]
|
||
=== AggregationsMultiBucketBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMultiBucketBase {
|
||
doc_count: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMultiTermLookup]]
|
||
=== AggregationsMultiTermLookup
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMultiTermLookup {
|
||
pass:[/**] @property field A fields from which to retrieve terms. */
|
||
field: <<Field>>
|
||
pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */
|
||
missing?: <<AggregationsMissing>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMultiTermsAggregate]]
|
||
=== AggregationsMultiTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMultiTermsAggregate extends <<AggregationsTermsAggregateBase>><<<AggregationsMultiTermsBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMultiTermsAggregation]]
|
||
=== AggregationsMultiTermsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMultiTermsAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property collect_mode Specifies the strategy for data collection. */
|
||
collect_mode?: <<AggregationsTermsAggregationCollectMode>>
|
||
pass:[/**] @property order Specifies the sort order of the buckets. Defaults to sorting by descending document count. */
|
||
order?: <<AggregationsAggregateOrder>>
|
||
pass:[/**] @property min_doc_count The minimum number of documents in a bucket for it to be returned. */
|
||
min_doc_count?: <<long>>
|
||
pass:[/**] @property shard_min_doc_count The minimum number of documents in a bucket on each shard for it to be returned. */
|
||
shard_min_doc_count?: <<long>>
|
||
pass:[/**] @property shard_size The number of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */
|
||
shard_size?: <<integer>>
|
||
pass:[/**] @property show_term_doc_count_error Calculates the doc count error on per term basis. */
|
||
show_term_doc_count_error?: boolean
|
||
pass:[/**] @property size The number of term buckets should be returned out of the overall terms list. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property terms The field from which to generate sets of terms. */
|
||
terms: <<AggregationsMultiTermLookup>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMultiTermsBucket]]
|
||
=== AggregationsMultiTermsBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMultiTermsBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key: <<FieldValue>>[]
|
||
key_as_string?: string
|
||
doc_count_error_upper_bound?: <<long>>
|
||
}
|
||
type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<FieldValue>>[] | string | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsMutualInformationHeuristic]]
|
||
=== AggregationsMutualInformationHeuristic
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsMutualInformationHeuristic {
|
||
pass:[/**] @property background_is_superset Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */
|
||
background_is_superset?: boolean
|
||
pass:[/**] @property include_negatives Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */
|
||
include_negatives?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsNestedAggregate]]
|
||
=== AggregationsNestedAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsNestedAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {
|
||
}
|
||
type AggregationsNestedAggregate = AggregationsNestedAggregateKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsNestedAggregation]]
|
||
=== AggregationsNestedAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsNestedAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property path The path to the field of type `nested`. */
|
||
path?: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsNormalizeAggregation]]
|
||
=== AggregationsNormalizeAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsNormalizeAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
pass:[/**] @property method The specific method to apply. */
|
||
method?: <<AggregationsNormalizeMethod>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsNormalizeMethod]]
|
||
=== AggregationsNormalizeMethod
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsParentAggregate]]
|
||
=== AggregationsParentAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsParentAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {
|
||
}
|
||
type AggregationsParentAggregate = AggregationsParentAggregateKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsParentAggregation]]
|
||
=== AggregationsParentAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsParentAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property type The child type that should be selected. */
|
||
type?: <<RelationName>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsPercentageScoreHeuristic]]
|
||
=== AggregationsPercentageScoreHeuristic
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsPercentageScoreHeuristic {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsPercentileRanksAggregation]]
|
||
=== AggregationsPercentileRanksAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsPercentileRanksAggregation extends <<AggregationsFormatMetricAggregationBase>> {
|
||
pass:[/**] @property keyed By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. Set to `false` to disable this behavior. */
|
||
keyed?: boolean
|
||
pass:[/**] @property values An array of values for which to calculate the percentile ranks. */
|
||
values?: <<double>>[] | null
|
||
pass:[/**] @property hdr Uses the alternative High Dynamic Range Histogram algorithm to calculate percentile ranks. */
|
||
hdr?: <<AggregationsHdrMethod>>
|
||
pass:[/**] @property tdigest Sets parameters for the default <<TDigest>> algorithm used to calculate percentile ranks. */
|
||
tdigest?: <<AggregationsTDigest>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsPercentiles]]
|
||
=== AggregationsPercentiles
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsPercentiles = <<AggregationsKeyedPercentiles>> | <<AggregationsArrayPercentilesItem>>[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsPercentilesAggregateBase]]
|
||
=== AggregationsPercentilesAggregateBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsPercentilesAggregateBase extends <<AggregationsAggregateBase>> {
|
||
values: <<AggregationsPercentiles>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsPercentilesAggregation]]
|
||
=== AggregationsPercentilesAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsPercentilesAggregation extends <<AggregationsFormatMetricAggregationBase>> {
|
||
pass:[/**] @property keyed By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. Set to `false` to disable this behavior. */
|
||
keyed?: boolean
|
||
pass:[/**] @property percents The percentiles to calculate. */
|
||
percents?: <<double>>[]
|
||
pass:[/**] @property hdr Uses the alternative High Dynamic Range Histogram algorithm to calculate percentiles. */
|
||
hdr?: <<AggregationsHdrMethod>>
|
||
pass:[/**] @property tdigest Sets parameters for the default <<TDigest>> algorithm used to calculate percentiles. */
|
||
tdigest?: <<AggregationsTDigest>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsPercentilesBucketAggregate]]
|
||
=== AggregationsPercentilesBucketAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsPercentilesBucketAggregate extends <<AggregationsPercentilesAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsPercentilesBucketAggregation]]
|
||
=== AggregationsPercentilesBucketAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsPercentilesBucketAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
pass:[/**] @property percents The list of percentiles to calculate. */
|
||
percents?: <<double>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsPipelineAggregationBase]]
|
||
=== AggregationsPipelineAggregationBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsPipelineAggregationBase extends <<AggregationsBucketPathAggregation>> {
|
||
pass:[/**] @property format `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. */
|
||
format?: string
|
||
pass:[/**] @property gap_policy <<Policy>> to apply when gaps are found in the data. */
|
||
gap_policy?: <<AggregationsGapPolicy>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsRandomSamplerAggregation]]
|
||
=== AggregationsRandomSamplerAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsRandomSamplerAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property probability The probability that a document will be included in the aggregated data. Must be greater than 0, less than 0.5, or exactly 1. The lower the probability, the fewer documents are matched. */
|
||
probability: <<double>>
|
||
pass:[/**] @property seed The seed to generate the random sampling of documents. When a seed is provided, the random subset of documents is the same between calls. */
|
||
seed?: <<integer>>
|
||
pass:[/**] @property shard_seed When combined with seed, setting shard_seed ensures 100% consistent sampling over shards where data is exactly the same. */
|
||
shard_seed?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsRangeAggregate]]
|
||
=== AggregationsRangeAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsRangeAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsRangeBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsRangeAggregation]]
|
||
=== AggregationsRangeAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsRangeAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property field The date field whose values are use to build ranges. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */
|
||
missing?: <<integer>>
|
||
pass:[/**] @property ranges An array of ranges used to bucket documents. */
|
||
ranges?: <<AggregationsAggregationRange>>[]
|
||
script?: <<Script>> | string
|
||
pass:[/**] @property keyed Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */
|
||
keyed?: boolean
|
||
format?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsRangeBucket]]
|
||
=== AggregationsRangeBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsRangeBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
from?: <<double>>
|
||
to?: <<double>>
|
||
from_as_string?: string
|
||
to_as_string?: string
|
||
key?: string
|
||
}
|
||
type AggregationsRangeBucket = AggregationsRangeBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<double>> | string | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsRareTermsAggregation]]
|
||
=== AggregationsRareTermsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsRareTermsAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property exclude Terms that should be excluded from the aggregation. */
|
||
exclude?: <<AggregationsTermsExclude>>
|
||
pass:[/**] @property field The field from which to return rare terms. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property include Terms that should be included in the aggregation. */
|
||
include?: <<AggregationsTermsInclude>>
|
||
pass:[/**] @property max_doc_count The maximum number of documents a term should appear in. */
|
||
max_doc_count?: <<long>>
|
||
pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */
|
||
missing?: <<AggregationsMissing>>
|
||
pass:[/**] @property precision The precision of the internal CuckooFilters. Smaller precision leads to better approximation, but higher memory usage. */
|
||
precision?: <<double>>
|
||
value_type?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsRateAggregate]]
|
||
=== AggregationsRateAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsRateAggregate extends <<AggregationsAggregateBase>> {
|
||
value: <<double>>
|
||
value_as_string?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsRateAggregation]]
|
||
=== AggregationsRateAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsRateAggregation extends <<AggregationsFormatMetricAggregationBase>> {
|
||
pass:[/**] @property unit The interval used to calculate the rate. By default, the interval of the `date_histogram` is used. */
|
||
unit?: <<AggregationsCalendarInterval>>
|
||
pass:[/**] @property mode How the rate is calculated. */
|
||
mode?: <<AggregationsRateMode>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsRateMode]]
|
||
=== AggregationsRateMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsRateMode = 'sum' | 'value_count'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsReverseNestedAggregate]]
|
||
=== AggregationsReverseNestedAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsReverseNestedAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {
|
||
}
|
||
type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggregateKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsReverseNestedAggregation]]
|
||
=== AggregationsReverseNestedAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsReverseNestedAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property path Defines the nested object field that should be joined back to. The default is empty, which means that it joins back to the root/main document level. */
|
||
path?: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSamplerAggregate]]
|
||
=== AggregationsSamplerAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSamplerAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {
|
||
}
|
||
type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSamplerAggregation]]
|
||
=== AggregationsSamplerAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSamplerAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property shard_size Limits how many top-scoring documents are collected in the sample processed on each shard. */
|
||
shard_size?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSamplerAggregationExecutionHint]]
|
||
=== AggregationsSamplerAggregationExecutionHint
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsScriptedHeuristic]]
|
||
=== AggregationsScriptedHeuristic
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsScriptedHeuristic {
|
||
script: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsScriptedMetricAggregate]]
|
||
=== AggregationsScriptedMetricAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsScriptedMetricAggregate extends <<AggregationsAggregateBase>> {
|
||
value: any
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsScriptedMetricAggregation]]
|
||
=== AggregationsScriptedMetricAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsScriptedMetricAggregation extends <<AggregationsMetricAggregationBase>> {
|
||
pass:[/**] @property combine_script Runs once on each shard after document collection is complete. Allows the aggregation to consolidate the state returned from each shard. */
|
||
combine_script?: <<Script>> | string
|
||
pass:[/**] @property init_script Runs prior to any collection of documents. Allows the aggregation to set up any initial state. */
|
||
init_script?: <<Script>> | string
|
||
pass:[/**] @property map_script Run once per document collected. If no `combine_script` is specified, the resulting state needs to be stored in the `state` object. */
|
||
map_script?: <<Script>> | string
|
||
pass:[/**] @property params A global object with script parameters for `init`, `map` and `combine` scripts. It is shared between the scripts. */
|
||
params?: Record<string, any>
|
||
pass:[/**] @property reduce_script Runs once on the coordinating node after all shards have returned their results. The script is provided with access to a variable `states`, which is an array of the result of the `combine_script` on each shard. */
|
||
reduce_script?: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSerialDifferencingAggregation]]
|
||
=== AggregationsSerialDifferencingAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSerialDifferencingAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
pass:[/**] @property lag The historical bucket to subtract from the current value. Must be a positive, non-zero <<integer>>. */
|
||
lag?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSignificantLongTermsAggregate]]
|
||
=== AggregationsSignificantLongTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSignificantLongTermsAggregate extends <<AggregationsSignificantTermsAggregateBase>><<<AggregationsSignificantLongTermsBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSignificantLongTermsBucket]]
|
||
=== AggregationsSignificantLongTermsBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSignificantLongTermsBucketKeys extends <<AggregationsSignificantTermsBucketBase>> {
|
||
key: <<long>>
|
||
key_as_string?: string
|
||
}
|
||
type AggregationsSignificantLongTermsBucket = AggregationsSignificantLongTermsBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | string | <<double>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSignificantStringTermsAggregate]]
|
||
=== AggregationsSignificantStringTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSignificantStringTermsAggregate extends <<AggregationsSignificantTermsAggregateBase>><<<AggregationsSignificantStringTermsBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSignificantStringTermsBucket]]
|
||
=== AggregationsSignificantStringTermsBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSignificantStringTermsBucketKeys extends <<AggregationsSignificantTermsBucketBase>> {
|
||
key: string
|
||
}
|
||
type AggregationsSignificantStringTermsBucket = AggregationsSignificantStringTermsBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | string | <<double>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSignificantTermsAggregateBase]]
|
||
=== AggregationsSignificantTermsAggregateBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSignificantTermsAggregateBase<T = unknown> extends <<AggregationsMultiBucketAggregateBase>><T> {
|
||
bg_count?: <<long>>
|
||
doc_count?: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSignificantTermsAggregation]]
|
||
=== AggregationsSignificantTermsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSignificantTermsAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property background_filter A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */
|
||
background_filter?: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property chi_square Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */
|
||
chi_square?: <<AggregationsChiSquareHeuristic>>
|
||
pass:[/**] @property exclude Terms to exclude. */
|
||
exclude?: <<AggregationsTermsExclude>>
|
||
pass:[/**] @property execution_hint Mechanism by which the aggregation should be executed: using field values directly or using global ordinals. */
|
||
execution_hint?: <<AggregationsTermsAggregationExecutionHint>>
|
||
pass:[/**] @property field The field from which to return significant terms. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property gnd Use Google normalized distance as described in "The Google Similarity <<Distance>>", Cilibrasi and Vitanyi, 2007, as the significance score. */
|
||
gnd?: <<AggregationsGoogleNormalizedDistanceHeuristic>>
|
||
pass:[/**] @property include Terms to include. */
|
||
include?: <<AggregationsTermsInclude>>
|
||
pass:[/**] @property jlh Use JLH score as the significance score. */
|
||
jlh?: <<EmptyObject>>
|
||
pass:[/**] @property min_doc_count Only return terms that are found in more than `min_doc_count` hits. */
|
||
min_doc_count?: <<long>>
|
||
pass:[/**] @property mutual_information Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */
|
||
mutual_information?: <<AggregationsMutualInformationHeuristic>>
|
||
pass:[/**] @property percentage A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */
|
||
percentage?: <<AggregationsPercentageScoreHeuristic>>
|
||
pass:[/**] @property script_heuristic Customized score, implemented via a script. */
|
||
script_heuristic?: <<AggregationsScriptedHeuristic>>
|
||
pass:[/**] @property shard_min_doc_count Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */
|
||
shard_min_doc_count?: <<long>>
|
||
pass:[/**] @property shard_size Can be used to control the volumes of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */
|
||
shard_size?: <<integer>>
|
||
pass:[/**] @property size The number of buckets returned out of the overall terms list. */
|
||
size?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSignificantTermsBucketBase]]
|
||
=== AggregationsSignificantTermsBucketBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSignificantTermsBucketBase extends <<AggregationsMultiBucketBase>> {
|
||
score: <<double>>
|
||
bg_count: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSignificantTextAggregation]]
|
||
=== AggregationsSignificantTextAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSignificantTextAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property background_filter A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */
|
||
background_filter?: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property chi_square Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */
|
||
chi_square?: <<AggregationsChiSquareHeuristic>>
|
||
pass:[/**] @property exclude Values to exclude. */
|
||
exclude?: <<AggregationsTermsExclude>>
|
||
pass:[/**] @property execution_hint Determines whether the aggregation will use field values directly or global ordinals. */
|
||
execution_hint?: <<AggregationsTermsAggregationExecutionHint>>
|
||
pass:[/**] @property field The field from which to return significant text. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property filter_duplicate_text Whether to out duplicate text to deal with noisy data. */
|
||
filter_duplicate_text?: boolean
|
||
pass:[/**] @property gnd Use Google normalized distance as described in "The Google Similarity <<Distance>>", Cilibrasi and Vitanyi, 2007, as the significance score. */
|
||
gnd?: <<AggregationsGoogleNormalizedDistanceHeuristic>>
|
||
pass:[/**] @property include Values to include. */
|
||
include?: <<AggregationsTermsInclude>>
|
||
pass:[/**] @property jlh Use JLH score as the significance score. */
|
||
jlh?: <<EmptyObject>>
|
||
pass:[/**] @property min_doc_count Only return values that are found in more than `min_doc_count` hits. */
|
||
min_doc_count?: <<long>>
|
||
pass:[/**] @property mutual_information Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */
|
||
mutual_information?: <<AggregationsMutualInformationHeuristic>>
|
||
pass:[/**] @property percentage A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */
|
||
percentage?: <<AggregationsPercentageScoreHeuristic>>
|
||
pass:[/**] @property script_heuristic Customized score, implemented via a script. */
|
||
script_heuristic?: <<AggregationsScriptedHeuristic>>
|
||
pass:[/**] @property shard_min_doc_count Regulates the certainty a shard has if the values should actually be added to the candidate list or not with respect to the min_doc_count. Values will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */
|
||
shard_min_doc_count?: <<long>>
|
||
pass:[/**] @property shard_size The number of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */
|
||
shard_size?: <<integer>>
|
||
pass:[/**] @property size The number of buckets returned out of the overall terms list. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property source_fields Overrides the JSON `_source` fields from which text will be analyzed. */
|
||
source_fields?: <<Fields>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSimpleMovingAverageAggregation]]
|
||
=== AggregationsSimpleMovingAverageAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSimpleMovingAverageAggregation extends <<AggregationsMovingAverageAggregationBase>> {
|
||
model: 'simple'
|
||
settings: <<EmptyObject>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSimpleValueAggregate]]
|
||
=== AggregationsSimpleValueAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSimpleValueAggregate extends <<AggregationsSingleMetricAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSingleBucketAggregateBase]]
|
||
=== AggregationsSingleBucketAggregateBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSingleBucketAggregateBase extends <<AggregationsAggregateBase>> {
|
||
doc_count: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSingleMetricAggregateBase]]
|
||
=== AggregationsSingleMetricAggregateBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSingleMetricAggregateBase extends <<AggregationsAggregateBase>> {
|
||
pass:[/**] @property value The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. */
|
||
value: <<double>> | null
|
||
value_as_string?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStandardDeviationBounds]]
|
||
=== AggregationsStandardDeviationBounds
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStandardDeviationBounds {
|
||
upper: <<double>> | null
|
||
lower: <<double>> | null
|
||
upper_population: <<double>> | null
|
||
lower_population: <<double>> | null
|
||
upper_sampling: <<double>> | null
|
||
lower_sampling: <<double>> | null
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStandardDeviationBoundsAsString]]
|
||
=== AggregationsStandardDeviationBoundsAsString
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStandardDeviationBoundsAsString {
|
||
upper: string
|
||
lower: string
|
||
upper_population: string
|
||
lower_population: string
|
||
upper_sampling: string
|
||
lower_sampling: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStatsAggregate]]
|
||
=== AggregationsStatsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStatsAggregate extends <<AggregationsAggregateBase>> {
|
||
count: <<long>>
|
||
min: <<double>> | null
|
||
max: <<double>> | null
|
||
avg: <<double>> | null
|
||
sum: <<double>>
|
||
min_as_string?: string
|
||
max_as_string?: string
|
||
avg_as_string?: string
|
||
sum_as_string?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStatsAggregation]]
|
||
=== AggregationsStatsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStatsAggregation extends <<AggregationsFormatMetricAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStatsBucketAggregate]]
|
||
=== AggregationsStatsBucketAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStatsBucketAggregate extends <<AggregationsStatsAggregate>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStatsBucketAggregation]]
|
||
=== AggregationsStatsBucketAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStatsBucketAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStringRareTermsAggregate]]
|
||
=== AggregationsStringRareTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStringRareTermsAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsStringRareTermsBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStringRareTermsBucket]]
|
||
=== AggregationsStringRareTermsBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStringRareTermsBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key: string
|
||
}
|
||
type AggregationsStringRareTermsBucket = AggregationsStringRareTermsBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | string | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStringStatsAggregate]]
|
||
=== AggregationsStringStatsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStringStatsAggregate extends <<AggregationsAggregateBase>> {
|
||
count: <<long>>
|
||
min_length: <<integer>> | null
|
||
max_length: <<integer>> | null
|
||
avg_length: <<double>> | null
|
||
entropy: <<double>> | null
|
||
distribution?: Record<string, <<double>>> | null
|
||
min_length_as_string?: string
|
||
max_length_as_string?: string
|
||
avg_length_as_string?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStringStatsAggregation]]
|
||
=== AggregationsStringStatsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStringStatsAggregation extends <<AggregationsMetricAggregationBase>> {
|
||
pass:[/**] @property show_distribution Shows the probability distribution for all characters. */
|
||
show_distribution?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStringTermsAggregate]]
|
||
=== AggregationsStringTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStringTermsAggregate extends <<AggregationsTermsAggregateBase>><<<AggregationsStringTermsBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsStringTermsBucket]]
|
||
=== AggregationsStringTermsBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsStringTermsBucketKeys extends <<AggregationsTermsBucketBase>> {
|
||
key: <<FieldValue>>
|
||
}
|
||
type AggregationsStringTermsBucket = AggregationsStringTermsBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<FieldValue>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSumAggregate]]
|
||
=== AggregationsSumAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSumAggregate extends <<AggregationsSingleMetricAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSumAggregation]]
|
||
=== AggregationsSumAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSumAggregation extends <<AggregationsFormatMetricAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsSumBucketAggregation]]
|
||
=== AggregationsSumBucketAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsSumBucketAggregation extends <<AggregationsPipelineAggregationBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTDigest]]
|
||
=== AggregationsTDigest
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTDigest {
|
||
pass:[/**] @property compression Limits the maximum number of nodes used by the underlying <<TDigest>> algorithm to `20 * compression`, enabling control of memory usage and approximation error. */
|
||
compression?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTDigestPercentileRanksAggregate]]
|
||
=== AggregationsTDigestPercentileRanksAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTDigestPercentileRanksAggregate extends <<AggregationsPercentilesAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTDigestPercentilesAggregate]]
|
||
=== AggregationsTDigestPercentilesAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTDigestPercentilesAggregate extends <<AggregationsPercentilesAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTTestAggregate]]
|
||
=== AggregationsTTestAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTTestAggregate extends <<AggregationsAggregateBase>> {
|
||
value: <<double>> | null
|
||
value_as_string?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTTestAggregation]]
|
||
=== AggregationsTTestAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTTestAggregation {
|
||
pass:[/**] @property a Test population A. */
|
||
a?: <<AggregationsTestPopulation>>
|
||
pass:[/**] @property b Test population B. */
|
||
b?: <<AggregationsTestPopulation>>
|
||
pass:[/**] @property type The type of test. */
|
||
type?: <<AggregationsTTestType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTTestType]]
|
||
=== AggregationsTTestType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTermsAggregateBase]]
|
||
=== AggregationsTermsAggregateBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTermsAggregateBase<TBucket = unknown> extends <<AggregationsMultiBucketAggregateBase>><TBucket> {
|
||
doc_count_error_upper_bound?: <<long>>
|
||
sum_other_doc_count?: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTermsAggregation]]
|
||
=== AggregationsTermsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTermsAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property collect_mode Determines how child aggregations should be calculated: breadth-first or depth-first. */
|
||
collect_mode?: <<AggregationsTermsAggregationCollectMode>>
|
||
pass:[/**] @property exclude Values to exclude. Accepts regular expressions and partitions. */
|
||
exclude?: <<AggregationsTermsExclude>>
|
||
pass:[/**] @property execution_hint Determines whether the aggregation will use field values directly or global ordinals. */
|
||
execution_hint?: <<AggregationsTermsAggregationExecutionHint>>
|
||
pass:[/**] @property field The field from which to return terms. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property include Values to include. Accepts regular expressions and partitions. */
|
||
include?: <<AggregationsTermsInclude>>
|
||
pass:[/**] @property min_doc_count Only return values that are found in more than `min_doc_count` hits. */
|
||
min_doc_count?: <<integer>>
|
||
pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */
|
||
missing?: <<AggregationsMissing>>
|
||
missing_order?: <<AggregationsMissingOrder>>
|
||
missing_bucket?: boolean
|
||
pass:[/**] @property value_type Coerced unmapped fields into the specified type. */
|
||
value_type?: string
|
||
pass:[/**] @property order Specifies the sort order of the buckets. Defaults to sorting by descending document count. */
|
||
order?: <<AggregationsAggregateOrder>>
|
||
script?: <<Script>> | string
|
||
pass:[/**] @property shard_min_doc_count Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */
|
||
shard_min_doc_count?: <<long>>
|
||
pass:[/**] @property shard_size The number of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */
|
||
shard_size?: <<integer>>
|
||
pass:[/**] @property show_term_doc_count_error Set to `true` to return the `doc_count_error_upper_bound`, which is an upper bound to the error on the `doc_count` returned by each shard. */
|
||
show_term_doc_count_error?: boolean
|
||
pass:[/**] @property size The number of buckets returned out of the overall terms list. */
|
||
size?: <<integer>>
|
||
format?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTermsAggregationCollectMode]]
|
||
=== AggregationsTermsAggregationCollectMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_first'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTermsAggregationExecutionHint]]
|
||
=== AggregationsTermsAggregationExecutionHint
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTermsBucketBase]]
|
||
=== AggregationsTermsBucketBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTermsBucketBase extends <<AggregationsMultiBucketBase>> {
|
||
doc_count_error_upper_bound?: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTermsExclude]]
|
||
=== AggregationsTermsExclude
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsTermsExclude = string | string[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTermsInclude]]
|
||
=== AggregationsTermsInclude
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsTermsInclude = string | string[] | <<AggregationsTermsPartition>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTermsPartition]]
|
||
=== AggregationsTermsPartition
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTermsPartition {
|
||
pass:[/**] @property num_partitions The number of partitions. */
|
||
num_partitions: <<long>>
|
||
pass:[/**] @property partition The partition number for this request. */
|
||
partition: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTestPopulation]]
|
||
=== AggregationsTestPopulation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTestPopulation {
|
||
pass:[/**] @property field The field to aggregate. */
|
||
field: <<Field>>
|
||
script?: <<Script>> | string
|
||
pass:[/**] @property filter A filter used to define a set of records to run unpaired t-test on. */
|
||
filter?: <<QueryDslQueryContainer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTimeSeriesAggregate]]
|
||
=== AggregationsTimeSeriesAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTimeSeriesAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsTimeSeriesBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTimeSeriesAggregation]]
|
||
=== AggregationsTimeSeriesAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTimeSeriesAggregation extends <<AggregationsBucketAggregationBase>> {
|
||
pass:[/**] @property size The maximum number of results to return. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property keyed Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */
|
||
keyed?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTimeSeriesBucket]]
|
||
=== AggregationsTimeSeriesBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTimeSeriesBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
key: Record<<<Field>>, <<FieldValue>>>
|
||
}
|
||
type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | Record<<<Field>>, <<FieldValue>>> | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTopHitsAggregate]]
|
||
=== AggregationsTopHitsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTopHitsAggregate extends <<AggregationsAggregateBase>> {
|
||
hits: <<SearchHitsMetadata>><any>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTopHitsAggregation]]
|
||
=== AggregationsTopHitsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTopHitsAggregation extends <<AggregationsMetricAggregationBase>> {
|
||
pass:[/**] @property docvalue_fields <<Fields>> for which to return doc values. */
|
||
docvalue_fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[]
|
||
pass:[/**] @property explain If `true`, returns detailed information about score computation as part of a hit. */
|
||
explain?: boolean
|
||
pass:[/**] @property fields Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. */
|
||
fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[]
|
||
pass:[/**] @property from Starting document offset. */
|
||
from?: <<integer>>
|
||
pass:[/**] @property highlight Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in the search results. */
|
||
highlight?: <<SearchHighlight>>
|
||
pass:[/**] @property script_fields Returns the result of one or more script evaluations for each hit. */
|
||
script_fields?: Record<string, <<ScriptField>>>
|
||
pass:[/**] @property size The maximum number of top matching hits to return per bucket. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property sort <<Sort>> order of the top matching hits. By default, the hits are sorted by the score of the main query. */
|
||
sort?: <<Sort>>
|
||
pass:[/**] @property _source Selects the fields of the source that are returned. */
|
||
_source?: <<SearchSourceConfig>>
|
||
pass:[/**] @property stored_fields Returns values for the specified stored fields (fields that use the `store` mapping option). */
|
||
stored_fields?: <<Fields>>
|
||
pass:[/**] @property track_scores If `true`, calculates and returns document scores, even if the scores are not used for sorting. */
|
||
track_scores?: boolean
|
||
pass:[/**] @property version If `true`, returns document version as part of a hit. */
|
||
version?: boolean
|
||
pass:[/**] @property seq_no_primary_term If `true`, returns sequence number and primary term of the last modification of each hit. */
|
||
seq_no_primary_term?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTopMetrics]]
|
||
=== AggregationsTopMetrics
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTopMetrics {
|
||
sort: (<<FieldValue>> | null)[]
|
||
metrics: Record<string, <<FieldValue>> | null>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTopMetricsAggregate]]
|
||
=== AggregationsTopMetricsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTopMetricsAggregate extends <<AggregationsAggregateBase>> {
|
||
top: <<AggregationsTopMetrics>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTopMetricsAggregation]]
|
||
=== AggregationsTopMetricsAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTopMetricsAggregation extends <<AggregationsMetricAggregationBase>> {
|
||
pass:[/**] @property metrics The fields of the top document to return. */
|
||
metrics?: <<AggregationsTopMetricsValue>> | <<AggregationsTopMetricsValue>>[]
|
||
pass:[/**] @property size The number of top documents from which to return metrics. */
|
||
size?: <<integer>>
|
||
pass:[/**] @property sort The sort order of the documents. */
|
||
sort?: <<Sort>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsTopMetricsValue]]
|
||
=== AggregationsTopMetricsValue
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsTopMetricsValue {
|
||
pass:[/**] @property field A field to return as a metric. */
|
||
field: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsUnmappedRareTermsAggregate]]
|
||
=== AggregationsUnmappedRareTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsUnmappedRareTermsAggregate extends <<AggregationsMultiBucketAggregateBase>><void> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsUnmappedSamplerAggregate]]
|
||
=== AggregationsUnmappedSamplerAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsUnmappedSamplerAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {
|
||
}
|
||
type AggregationsUnmappedSamplerAggregate = AggregationsUnmappedSamplerAggregateKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsUnmappedSignificantTermsAggregate]]
|
||
=== AggregationsUnmappedSignificantTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsUnmappedSignificantTermsAggregate extends <<AggregationsSignificantTermsAggregateBase>><void> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsUnmappedTermsAggregate]]
|
||
=== AggregationsUnmappedTermsAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsUnmappedTermsAggregate extends <<AggregationsTermsAggregateBase>><void> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsValueCountAggregate]]
|
||
=== AggregationsValueCountAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsValueCountAggregate extends <<AggregationsSingleMetricAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsValueCountAggregation]]
|
||
=== AggregationsValueCountAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsValueCountAggregation extends <<AggregationsFormattableMetricAggregation>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsValueType]]
|
||
=== AggregationsValueType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AggregationsValueType = 'string' | '<<long>>' | '<<double>>' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsVariableWidthHistogramAggregate]]
|
||
=== AggregationsVariableWidthHistogramAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsVariableWidthHistogramAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsVariableWidthHistogramBucket>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsVariableWidthHistogramAggregation]]
|
||
=== AggregationsVariableWidthHistogramAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsVariableWidthHistogramAggregation {
|
||
pass:[/**] @property field The name of the field. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property buckets The target number of buckets. */
|
||
buckets?: <<integer>>
|
||
pass:[/**] @property shard_size The number of buckets that the coordinating node will request from each shard. Defaults to `buckets * 50`. */
|
||
shard_size?: <<integer>>
|
||
pass:[/**] @property initial_buffer Specifies the number of individual documents that will be stored in memory on a shard before the initial bucketing algorithm is run. Defaults to `min(10 * shard_size, 50000)`. */
|
||
initial_buffer?: <<integer>>
|
||
script?: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsVariableWidthHistogramBucket]]
|
||
=== AggregationsVariableWidthHistogramBucket
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsVariableWidthHistogramBucketKeys extends <<AggregationsMultiBucketBase>> {
|
||
min: <<double>>
|
||
key: <<double>>
|
||
max: <<double>>
|
||
min_as_string?: string
|
||
key_as_string?: string
|
||
max_as_string?: string
|
||
}
|
||
type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidthHistogramBucketKeys
|
||
& { [property: string]: <<AggregationsAggregate>> | <<double>> | string | <<long>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsWeightedAverageAggregation]]
|
||
=== AggregationsWeightedAverageAggregation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsWeightedAverageAggregation {
|
||
pass:[/**] @property format A numeric response formatter. */
|
||
format?: string
|
||
pass:[/**] @property value <<Configuration>> for the field that provides the values. */
|
||
value?: <<AggregationsWeightedAverageValue>>
|
||
value_type?: <<AggregationsValueType>>
|
||
pass:[/**] @property weight <<Configuration>> for the field or script that provides the weights. */
|
||
weight?: <<AggregationsWeightedAverageValue>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsWeightedAverageValue]]
|
||
=== AggregationsWeightedAverageValue
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsWeightedAverageValue {
|
||
pass:[/**] @property field The field from which to extract the values or weights. */
|
||
field?: <<Field>>
|
||
pass:[/**] @property missing A value or weight to use if the field is missing. */
|
||
missing?: <<double>>
|
||
script?: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AggregationsWeightedAvgAggregate]]
|
||
=== AggregationsWeightedAvgAggregate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AggregationsWeightedAvgAggregate extends <<AggregationsSingleMetricAggregateBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisAnalyzer]]
|
||
=== AnalysisAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisAnalyzer = <<AnalysisCustomAnalyzer>> | <<AnalysisFingerprintAnalyzer>> | <<AnalysisKeywordAnalyzer>> | <<AnalysisLanguageAnalyzer>> | <<AnalysisNoriAnalyzer>> | <<AnalysisPatternAnalyzer>> | <<AnalysisSimpleAnalyzer>> | <<AnalysisStandardAnalyzer>> | <<AnalysisStopAnalyzer>> | <<AnalysisWhitespaceAnalyzer>> | <<AnalysisIcuAnalyzer>> | <<AnalysisKuromojiAnalyzer>> | <<AnalysisSnowballAnalyzer>> | <<AnalysisArabicAnalyzer>> | <<AnalysisArmenianAnalyzer>> | <<AnalysisBasqueAnalyzer>> | <<AnalysisBengaliAnalyzer>> | <<AnalysisBrazilianAnalyzer>> | <<AnalysisBulgarianAnalyzer>> | <<AnalysisCatalanAnalyzer>> | <<AnalysisChineseAnalyzer>> | <<AnalysisCjkAnalyzer>> | <<AnalysisCzechAnalyzer>> | <<AnalysisDanishAnalyzer>> | <<AnalysisDutchAnalyzer>> | <<AnalysisEnglishAnalyzer>> | <<AnalysisEstonianAnalyzer>> | <<AnalysisFinnishAnalyzer>> | <<AnalysisFrenchAnalyzer>> | <<AnalysisGalicianAnalyzer>> | <<AnalysisGermanAnalyzer>> | <<AnalysisGreekAnalyzer>> | <<AnalysisHindiAnalyzer>> | <<AnalysisHungarianAnalyzer>> | <<AnalysisIndonesianAnalyzer>> | <<AnalysisIrishAnalyzer>> | <<AnalysisItalianAnalyzer>> | <<AnalysisLatvianAnalyzer>> | <<AnalysisLithuanianAnalyzer>> | <<AnalysisNorwegianAnalyzer>> | <<AnalysisPersianAnalyzer>> | <<AnalysisPortugueseAnalyzer>> | <<AnalysisRomanianAnalyzer>> | <<AnalysisRussianAnalyzer>> | <<AnalysisSerbianAnalyzer>> | <<AnalysisSoraniAnalyzer>> | <<AnalysisSpanishAnalyzer>> | <<AnalysisSwedishAnalyzer>> | <<AnalysisTurkishAnalyzer>> | <<AnalysisThaiAnalyzer>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisArabicAnalyzer]]
|
||
=== AnalysisArabicAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisArabicAnalyzer {
|
||
type: 'arabic'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisArmenianAnalyzer]]
|
||
=== AnalysisArmenianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisArmenianAnalyzer {
|
||
type: 'armenian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisAsciiFoldingTokenFilter]]
|
||
=== AnalysisAsciiFoldingTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisAsciiFoldingTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'asciifolding'
|
||
preserve_original?: <<SpecUtilsStringified>><boolean>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisBasqueAnalyzer]]
|
||
=== AnalysisBasqueAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisBasqueAnalyzer {
|
||
type: 'basque'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisBengaliAnalyzer]]
|
||
=== AnalysisBengaliAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisBengaliAnalyzer {
|
||
type: 'bengali'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisBrazilianAnalyzer]]
|
||
=== AnalysisBrazilianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisBrazilianAnalyzer {
|
||
type: 'brazilian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisBulgarianAnalyzer]]
|
||
=== AnalysisBulgarianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisBulgarianAnalyzer {
|
||
type: 'bulgarian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCatalanAnalyzer]]
|
||
=== AnalysisCatalanAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisCatalanAnalyzer {
|
||
type: 'catalan'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCharFilter]]
|
||
=== AnalysisCharFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisCharFilter = string | <<AnalysisCharFilterDefinition>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCharFilterBase]]
|
||
=== AnalysisCharFilterBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisCharFilterBase {
|
||
version?: <<VersionString>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCharFilterDefinition]]
|
||
=== AnalysisCharFilterDefinition
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisCharFilterDefinition = <<AnalysisHtmlStripCharFilter>> | <<AnalysisMappingCharFilter>> | <<AnalysisPatternReplaceCharFilter>> | <<AnalysisIcuNormalizationCharFilter>> | <<AnalysisKuromojiIterationMarkCharFilter>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCharGroupTokenizer]]
|
||
=== AnalysisCharGroupTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisCharGroupTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'char_group'
|
||
tokenize_on_chars: string[]
|
||
max_token_length?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisChineseAnalyzer]]
|
||
=== AnalysisChineseAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisChineseAnalyzer {
|
||
type: 'chinese'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCjkAnalyzer]]
|
||
=== AnalysisCjkAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisCjkAnalyzer {
|
||
type: 'cjk'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisClassicTokenizer]]
|
||
=== AnalysisClassicTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisClassicTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'classic'
|
||
max_token_length?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCommonGramsTokenFilter]]
|
||
=== AnalysisCommonGramsTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisCommonGramsTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'common_grams'
|
||
common_words?: string[]
|
||
common_words_path?: string
|
||
ignore_case?: boolean
|
||
query_mode?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCompoundWordTokenFilterBase]]
|
||
=== AnalysisCompoundWordTokenFilterBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisCompoundWordTokenFilterBase extends <<AnalysisTokenFilterBase>> {
|
||
hyphenation_patterns_path?: string
|
||
max_subword_size?: <<integer>>
|
||
min_subword_size?: <<integer>>
|
||
min_word_size?: <<integer>>
|
||
only_longest_match?: boolean
|
||
word_list?: string[]
|
||
word_list_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisConditionTokenFilter]]
|
||
=== AnalysisConditionTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisConditionTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'condition'
|
||
filter: string[]
|
||
script: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCustomAnalyzer]]
|
||
=== AnalysisCustomAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisCustomAnalyzer {
|
||
type: 'custom'
|
||
char_filter?: string | string[]
|
||
filter?: string | string[]
|
||
position_increment_gap?: <<integer>>
|
||
position_offset_gap?: <<integer>>
|
||
tokenizer: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCustomNormalizer]]
|
||
=== AnalysisCustomNormalizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisCustomNormalizer {
|
||
type: 'custom'
|
||
char_filter?: string[]
|
||
filter?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisCzechAnalyzer]]
|
||
=== AnalysisCzechAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisCzechAnalyzer {
|
||
type: 'czech'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisDanishAnalyzer]]
|
||
=== AnalysisDanishAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisDanishAnalyzer {
|
||
type: 'danish'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisDelimitedPayloadEncoding]]
|
||
=== AnalysisDelimitedPayloadEncoding
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisDelimitedPayloadEncoding = 'int' | '<<float>>' | 'identity'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisDelimitedPayloadTokenFilter]]
|
||
=== AnalysisDelimitedPayloadTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisDelimitedPayloadTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'delimited_payload'
|
||
delimiter?: string
|
||
encoding?: <<AnalysisDelimitedPayloadEncoding>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisDictionaryDecompounderTokenFilter]]
|
||
=== AnalysisDictionaryDecompounderTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisDictionaryDecompounderTokenFilter extends <<AnalysisCompoundWordTokenFilterBase>> {
|
||
type: 'dictionary_decompounder'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisDutchAnalyzer]]
|
||
=== AnalysisDutchAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisDutchAnalyzer {
|
||
type: 'dutch'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisEdgeNGramSide]]
|
||
=== AnalysisEdgeNGramSide
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisEdgeNGramSide = 'front' | 'back'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisEdgeNGramTokenFilter]]
|
||
=== AnalysisEdgeNGramTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisEdgeNGramTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'edge_ngram'
|
||
max_gram?: <<integer>>
|
||
min_gram?: <<integer>>
|
||
side?: <<AnalysisEdgeNGramSide>>
|
||
preserve_original?: <<SpecUtilsStringified>><boolean>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisEdgeNGramTokenizer]]
|
||
=== AnalysisEdgeNGramTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisEdgeNGramTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'edge_ngram'
|
||
custom_token_chars?: string
|
||
max_gram?: <<integer>>
|
||
min_gram?: <<integer>>
|
||
token_chars?: <<AnalysisTokenChar>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisElisionTokenFilter]]
|
||
=== AnalysisElisionTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisElisionTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'elision'
|
||
articles?: string[]
|
||
articles_path?: string
|
||
articles_case?: <<SpecUtilsStringified>><boolean>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisEnglishAnalyzer]]
|
||
=== AnalysisEnglishAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisEnglishAnalyzer {
|
||
type: 'english'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisEstonianAnalyzer]]
|
||
=== AnalysisEstonianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisEstonianAnalyzer {
|
||
type: 'estonian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisFingerprintAnalyzer]]
|
||
=== AnalysisFingerprintAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisFingerprintAnalyzer {
|
||
type: 'fingerprint'
|
||
version?: <<VersionString>>
|
||
max_output_size: <<integer>>
|
||
preserve_original: boolean
|
||
separator: string
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisFingerprintTokenFilter]]
|
||
=== AnalysisFingerprintTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisFingerprintTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'fingerprint'
|
||
max_output_size?: <<integer>>
|
||
separator?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisFinnishAnalyzer]]
|
||
=== AnalysisFinnishAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisFinnishAnalyzer {
|
||
type: 'finnish'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisFrenchAnalyzer]]
|
||
=== AnalysisFrenchAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisFrenchAnalyzer {
|
||
type: 'french'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisGalicianAnalyzer]]
|
||
=== AnalysisGalicianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisGalicianAnalyzer {
|
||
type: 'galician'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisGermanAnalyzer]]
|
||
=== AnalysisGermanAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisGermanAnalyzer {
|
||
type: 'german'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisGreekAnalyzer]]
|
||
=== AnalysisGreekAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisGreekAnalyzer {
|
||
type: 'greek'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisHindiAnalyzer]]
|
||
=== AnalysisHindiAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisHindiAnalyzer {
|
||
type: 'hindi'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisHtmlStripCharFilter]]
|
||
=== AnalysisHtmlStripCharFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisHtmlStripCharFilter extends <<AnalysisCharFilterBase>> {
|
||
type: 'html_strip'
|
||
escaped_tags?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisHungarianAnalyzer]]
|
||
=== AnalysisHungarianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisHungarianAnalyzer {
|
||
type: 'hungarian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisHunspellTokenFilter]]
|
||
=== AnalysisHunspellTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisHunspellTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'hunspell'
|
||
dedup?: boolean
|
||
dictionary?: string
|
||
locale: string
|
||
longest_only?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisHyphenationDecompounderTokenFilter]]
|
||
=== AnalysisHyphenationDecompounderTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisHyphenationDecompounderTokenFilter extends <<AnalysisCompoundWordTokenFilterBase>> {
|
||
type: 'hyphenation_decompounder'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuAnalyzer]]
|
||
=== AnalysisIcuAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisIcuAnalyzer {
|
||
type: 'icu_analyzer'
|
||
method: <<AnalysisIcuNormalizationType>>
|
||
mode: <<AnalysisIcuNormalizationMode>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuCollationAlternate]]
|
||
=== AnalysisIcuCollationAlternate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisIcuCollationAlternate = 'shifted' | 'non-ignorable'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuCollationCaseFirst]]
|
||
=== AnalysisIcuCollationCaseFirst
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisIcuCollationCaseFirst = 'lower' | 'upper'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuCollationDecomposition]]
|
||
=== AnalysisIcuCollationDecomposition
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisIcuCollationDecomposition = 'no' | 'identical'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuCollationStrength]]
|
||
=== AnalysisIcuCollationStrength
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' | 'quaternary' | 'identical'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuCollationTokenFilter]]
|
||
=== AnalysisIcuCollationTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisIcuCollationTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'icu_collation'
|
||
alternate?: <<AnalysisIcuCollationAlternate>>
|
||
case_first?: <<AnalysisIcuCollationCaseFirst>>
|
||
case_level?: boolean
|
||
country?: string
|
||
decomposition?: <<AnalysisIcuCollationDecomposition>>
|
||
hiragana_quaternary_mode?: boolean
|
||
language?: string
|
||
numeric?: boolean
|
||
rules?: string
|
||
strength?: <<AnalysisIcuCollationStrength>>
|
||
variable_top?: string
|
||
variant?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuFoldingTokenFilter]]
|
||
=== AnalysisIcuFoldingTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisIcuFoldingTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'icu_folding'
|
||
unicode_set_filter: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuNormalizationCharFilter]]
|
||
=== AnalysisIcuNormalizationCharFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisIcuNormalizationCharFilter extends <<AnalysisCharFilterBase>> {
|
||
type: 'icu_normalizer'
|
||
mode?: <<AnalysisIcuNormalizationMode>>
|
||
name?: <<AnalysisIcuNormalizationType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuNormalizationMode]]
|
||
=== AnalysisIcuNormalizationMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisIcuNormalizationMode = 'decompose' | 'compose'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuNormalizationTokenFilter]]
|
||
=== AnalysisIcuNormalizationTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisIcuNormalizationTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'icu_normalizer'
|
||
name: <<AnalysisIcuNormalizationType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuNormalizationType]]
|
||
=== AnalysisIcuNormalizationType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisIcuNormalizationType = 'nfc' | 'nfkc' | 'nfkc_cf'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuTokenizer]]
|
||
=== AnalysisIcuTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisIcuTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'icu_tokenizer'
|
||
rule_files: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuTransformDirection]]
|
||
=== AnalysisIcuTransformDirection
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisIcuTransformDirection = 'forward' | 'reverse'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIcuTransformTokenFilter]]
|
||
=== AnalysisIcuTransformTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisIcuTransformTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'icu_transform'
|
||
dir?: <<AnalysisIcuTransformDirection>>
|
||
id: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIndonesianAnalyzer]]
|
||
=== AnalysisIndonesianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisIndonesianAnalyzer {
|
||
type: 'indonesian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisIrishAnalyzer]]
|
||
=== AnalysisIrishAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisIrishAnalyzer {
|
||
type: 'irish'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisItalianAnalyzer]]
|
||
=== AnalysisItalianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisItalianAnalyzer {
|
||
type: 'italian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKStemTokenFilter]]
|
||
=== AnalysisKStemTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKStemTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'kstem'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKeepTypesMode]]
|
||
=== AnalysisKeepTypesMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisKeepTypesMode = 'include' | 'exclude'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKeepTypesTokenFilter]]
|
||
=== AnalysisKeepTypesTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKeepTypesTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'keep_types'
|
||
mode?: <<AnalysisKeepTypesMode>>
|
||
types?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKeepWordsTokenFilter]]
|
||
=== AnalysisKeepWordsTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKeepWordsTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'keep'
|
||
keep_words?: string[]
|
||
keep_words_case?: boolean
|
||
keep_words_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKeywordAnalyzer]]
|
||
=== AnalysisKeywordAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKeywordAnalyzer {
|
||
type: 'keyword'
|
||
version?: <<VersionString>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKeywordMarkerTokenFilter]]
|
||
=== AnalysisKeywordMarkerTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKeywordMarkerTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'keyword_marker'
|
||
ignore_case?: boolean
|
||
keywords?: string | string[]
|
||
keywords_path?: string
|
||
keywords_pattern?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKeywordTokenizer]]
|
||
=== AnalysisKeywordTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKeywordTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'keyword'
|
||
buffer_size?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKuromojiAnalyzer]]
|
||
=== AnalysisKuromojiAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKuromojiAnalyzer {
|
||
type: 'kuromoji'
|
||
mode: <<AnalysisKuromojiTokenizationMode>>
|
||
user_dictionary?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKuromojiIterationMarkCharFilter]]
|
||
=== AnalysisKuromojiIterationMarkCharFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKuromojiIterationMarkCharFilter extends <<AnalysisCharFilterBase>> {
|
||
type: 'kuromoji_iteration_mark'
|
||
normalize_kana: boolean
|
||
normalize_kanji: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKuromojiPartOfSpeechTokenFilter]]
|
||
=== AnalysisKuromojiPartOfSpeechTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKuromojiPartOfSpeechTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'kuromoji_part_of_speech'
|
||
stoptags: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKuromojiReadingFormTokenFilter]]
|
||
=== AnalysisKuromojiReadingFormTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKuromojiReadingFormTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'kuromoji_readingform'
|
||
use_romaji: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKuromojiStemmerTokenFilter]]
|
||
=== AnalysisKuromojiStemmerTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKuromojiStemmerTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'kuromoji_stemmer'
|
||
minimum_length: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKuromojiTokenizationMode]]
|
||
=== AnalysisKuromojiTokenizationMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisKuromojiTokenizationMode = 'normal' | 'search' | 'extended'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisKuromojiTokenizer]]
|
||
=== AnalysisKuromojiTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisKuromojiTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'kuromoji_tokenizer'
|
||
discard_punctuation?: boolean
|
||
mode: <<AnalysisKuromojiTokenizationMode>>
|
||
nbest_cost?: <<integer>>
|
||
nbest_examples?: string
|
||
user_dictionary?: string
|
||
user_dictionary_rules?: string[]
|
||
discard_compound_token?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisLanguage]]
|
||
=== AnalysisLanguage
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisLanguageAnalyzer]]
|
||
=== AnalysisLanguageAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisLanguageAnalyzer {
|
||
type: 'language'
|
||
version?: <<VersionString>>
|
||
language: <<AnalysisLanguage>>
|
||
stem_exclusion: string[]
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisLatvianAnalyzer]]
|
||
=== AnalysisLatvianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisLatvianAnalyzer {
|
||
type: 'latvian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisLengthTokenFilter]]
|
||
=== AnalysisLengthTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisLengthTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'length'
|
||
max?: <<integer>>
|
||
min?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisLetterTokenizer]]
|
||
=== AnalysisLetterTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisLetterTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'letter'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisLimitTokenCountTokenFilter]]
|
||
=== AnalysisLimitTokenCountTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisLimitTokenCountTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'limit'
|
||
consume_all_tokens?: boolean
|
||
max_token_count?: <<SpecUtilsStringified>><<<integer>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisLithuanianAnalyzer]]
|
||
=== AnalysisLithuanianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisLithuanianAnalyzer {
|
||
type: 'lithuanian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisLowercaseNormalizer]]
|
||
=== AnalysisLowercaseNormalizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisLowercaseNormalizer {
|
||
type: 'lowercase'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisLowercaseTokenFilter]]
|
||
=== AnalysisLowercaseTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisLowercaseTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'lowercase'
|
||
language?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisLowercaseTokenizer]]
|
||
=== AnalysisLowercaseTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisLowercaseTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'lowercase'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisMappingCharFilter]]
|
||
=== AnalysisMappingCharFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisMappingCharFilter extends <<AnalysisCharFilterBase>> {
|
||
type: 'mapping'
|
||
mappings?: string[]
|
||
mappings_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisMultiplexerTokenFilter]]
|
||
=== AnalysisMultiplexerTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisMultiplexerTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'multiplexer'
|
||
filters: string[]
|
||
preserve_original?: <<SpecUtilsStringified>><boolean>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisNGramTokenFilter]]
|
||
=== AnalysisNGramTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisNGramTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'ngram'
|
||
max_gram?: <<integer>>
|
||
min_gram?: <<integer>>
|
||
preserve_original?: <<SpecUtilsStringified>><boolean>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisNGramTokenizer]]
|
||
=== AnalysisNGramTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisNGramTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'ngram'
|
||
custom_token_chars?: string
|
||
max_gram?: <<integer>>
|
||
min_gram?: <<integer>>
|
||
token_chars?: <<AnalysisTokenChar>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisNoriAnalyzer]]
|
||
=== AnalysisNoriAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisNoriAnalyzer {
|
||
type: 'nori'
|
||
version?: <<VersionString>>
|
||
decompound_mode?: <<AnalysisNoriDecompoundMode>>
|
||
stoptags?: string[]
|
||
user_dictionary?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisNoriDecompoundMode]]
|
||
=== AnalysisNoriDecompoundMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisNoriPartOfSpeechTokenFilter]]
|
||
=== AnalysisNoriPartOfSpeechTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisNoriPartOfSpeechTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'nori_part_of_speech'
|
||
stoptags?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisNoriTokenizer]]
|
||
=== AnalysisNoriTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisNoriTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'nori_tokenizer'
|
||
decompound_mode?: <<AnalysisNoriDecompoundMode>>
|
||
discard_punctuation?: boolean
|
||
user_dictionary?: string
|
||
user_dictionary_rules?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisNormalizer]]
|
||
=== AnalysisNormalizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisNormalizer = <<AnalysisLowercaseNormalizer>> | <<AnalysisCustomNormalizer>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisNorwegianAnalyzer]]
|
||
=== AnalysisNorwegianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisNorwegianAnalyzer {
|
||
type: 'norwegian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPathHierarchyTokenizer]]
|
||
=== AnalysisPathHierarchyTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPathHierarchyTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'path_hierarchy'
|
||
buffer_size?: <<SpecUtilsStringified>><<<integer>>>
|
||
delimiter?: string
|
||
replacement?: string
|
||
reverse?: <<SpecUtilsStringified>><boolean>
|
||
skip?: <<SpecUtilsStringified>><<<integer>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPatternAnalyzer]]
|
||
=== AnalysisPatternAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPatternAnalyzer {
|
||
type: 'pattern'
|
||
version?: <<VersionString>>
|
||
flags?: string
|
||
lowercase?: boolean
|
||
pattern: string
|
||
stopwords?: <<AnalysisStopWords>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPatternCaptureTokenFilter]]
|
||
=== AnalysisPatternCaptureTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPatternCaptureTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'pattern_capture'
|
||
patterns: string[]
|
||
preserve_original?: <<SpecUtilsStringified>><boolean>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPatternReplaceCharFilter]]
|
||
=== AnalysisPatternReplaceCharFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPatternReplaceCharFilter extends <<AnalysisCharFilterBase>> {
|
||
type: 'pattern_replace'
|
||
flags?: string
|
||
pattern: string
|
||
replacement?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPatternReplaceTokenFilter]]
|
||
=== AnalysisPatternReplaceTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPatternReplaceTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'pattern_replace'
|
||
all?: boolean
|
||
flags?: string
|
||
pattern: string
|
||
replacement?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPatternTokenizer]]
|
||
=== AnalysisPatternTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPatternTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'pattern'
|
||
flags?: string
|
||
group?: <<integer>>
|
||
pattern?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPersianAnalyzer]]
|
||
=== AnalysisPersianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPersianAnalyzer {
|
||
type: 'persian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPhoneticEncoder]]
|
||
=== AnalysisPhoneticEncoder
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPhoneticLanguage]]
|
||
=== AnalysisPhoneticLanguage
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPhoneticNameType]]
|
||
=== AnalysisPhoneticNameType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisPhoneticNameType = 'generic' | 'ashkenazi' | 'sephardic'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPhoneticRuleType]]
|
||
=== AnalysisPhoneticRuleType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisPhoneticRuleType = 'approx' | 'exact'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPhoneticTokenFilter]]
|
||
=== AnalysisPhoneticTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPhoneticTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'phonetic'
|
||
encoder: <<AnalysisPhoneticEncoder>>
|
||
languageset?: <<AnalysisPhoneticLanguage>> | <<AnalysisPhoneticLanguage>>[]
|
||
max_code_len?: <<integer>>
|
||
name_type?: <<AnalysisPhoneticNameType>>
|
||
replace?: boolean
|
||
rule_type?: <<AnalysisPhoneticRuleType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPorterStemTokenFilter]]
|
||
=== AnalysisPorterStemTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPorterStemTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'porter_stem'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPortugueseAnalyzer]]
|
||
=== AnalysisPortugueseAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPortugueseAnalyzer {
|
||
type: 'portuguese'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisPredicateTokenFilter]]
|
||
=== AnalysisPredicateTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisPredicateTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'predicate_token_filter'
|
||
script: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisRemoveDuplicatesTokenFilter]]
|
||
=== AnalysisRemoveDuplicatesTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisRemoveDuplicatesTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'remove_duplicates'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisReverseTokenFilter]]
|
||
=== AnalysisReverseTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisReverseTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'reverse'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisRomanianAnalyzer]]
|
||
=== AnalysisRomanianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisRomanianAnalyzer {
|
||
type: 'romanian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisRussianAnalyzer]]
|
||
=== AnalysisRussianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisRussianAnalyzer {
|
||
type: 'russian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSerbianAnalyzer]]
|
||
=== AnalysisSerbianAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSerbianAnalyzer {
|
||
type: 'serbian'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisShingleTokenFilter]]
|
||
=== AnalysisShingleTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisShingleTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'shingle'
|
||
filler_token?: string
|
||
max_shingle_size?: <<integer>> | string
|
||
min_shingle_size?: <<integer>> | string
|
||
output_unigrams?: boolean
|
||
output_unigrams_if_no_shingles?: boolean
|
||
token_separator?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSimpleAnalyzer]]
|
||
=== AnalysisSimpleAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSimpleAnalyzer {
|
||
type: 'simple'
|
||
version?: <<VersionString>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSimplePatternSplitTokenizer]]
|
||
=== AnalysisSimplePatternSplitTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSimplePatternSplitTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'simple_pattern_split'
|
||
pattern?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSimplePatternTokenizer]]
|
||
=== AnalysisSimplePatternTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSimplePatternTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'simple_pattern'
|
||
pattern?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSnowballAnalyzer]]
|
||
=== AnalysisSnowballAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSnowballAnalyzer {
|
||
type: 'snowball'
|
||
version?: <<VersionString>>
|
||
language: <<AnalysisSnowballLanguage>>
|
||
stopwords?: <<AnalysisStopWords>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSnowballLanguage]]
|
||
=== AnalysisSnowballLanguage
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSnowballTokenFilter]]
|
||
=== AnalysisSnowballTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSnowballTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'snowball'
|
||
language?: <<AnalysisSnowballLanguage>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSoraniAnalyzer]]
|
||
=== AnalysisSoraniAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSoraniAnalyzer {
|
||
type: 'sorani'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSpanishAnalyzer]]
|
||
=== AnalysisSpanishAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSpanishAnalyzer {
|
||
type: 'spanish'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisStandardAnalyzer]]
|
||
=== AnalysisStandardAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisStandardAnalyzer {
|
||
type: 'standard'
|
||
max_token_length?: <<integer>>
|
||
stopwords?: <<AnalysisStopWords>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisStandardTokenizer]]
|
||
=== AnalysisStandardTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisStandardTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'standard'
|
||
max_token_length?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisStemmerOverrideTokenFilter]]
|
||
=== AnalysisStemmerOverrideTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisStemmerOverrideTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'stemmer_override'
|
||
rules?: string[]
|
||
rules_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisStemmerTokenFilter]]
|
||
=== AnalysisStemmerTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisStemmerTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'stemmer'
|
||
language?: string
|
||
name?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisStopAnalyzer]]
|
||
=== AnalysisStopAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisStopAnalyzer {
|
||
type: 'stop'
|
||
version?: <<VersionString>>
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisStopTokenFilter]]
|
||
=== AnalysisStopTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisStopTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'stop'
|
||
ignore_case?: boolean
|
||
remove_trailing?: boolean
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisStopWords]]
|
||
=== AnalysisStopWords
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisStopWords = string | string[]
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSwedishAnalyzer]]
|
||
=== AnalysisSwedishAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSwedishAnalyzer {
|
||
type: 'swedish'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSynonymFormat]]
|
||
=== AnalysisSynonymFormat
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisSynonymFormat = 'solr' | 'wordnet'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSynonymGraphTokenFilter]]
|
||
=== AnalysisSynonymGraphTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSynonymGraphTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'synonym_graph'
|
||
expand?: boolean
|
||
format?: <<AnalysisSynonymFormat>>
|
||
lenient?: boolean
|
||
synonyms?: string[]
|
||
synonyms_path?: string
|
||
synonyms_set?: string
|
||
tokenizer?: string
|
||
updateable?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisSynonymTokenFilter]]
|
||
=== AnalysisSynonymTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisSynonymTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'synonym'
|
||
expand?: boolean
|
||
format?: <<AnalysisSynonymFormat>>
|
||
lenient?: boolean
|
||
synonyms?: string[]
|
||
synonyms_path?: string
|
||
synonyms_set?: string
|
||
tokenizer?: string
|
||
updateable?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisThaiAnalyzer]]
|
||
=== AnalysisThaiAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisThaiAnalyzer {
|
||
type: 'thai'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisThaiTokenizer]]
|
||
=== AnalysisThaiTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisThaiTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'thai'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisTokenChar]]
|
||
=== AnalysisTokenChar
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisTokenFilter]]
|
||
=== AnalysisTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisTokenFilter = string | <<AnalysisTokenFilterDefinition>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisTokenFilterBase]]
|
||
=== AnalysisTokenFilterBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisTokenFilterBase {
|
||
version?: <<VersionString>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisTokenFilterDefinition]]
|
||
=== AnalysisTokenFilterDefinition
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisTokenFilterDefinition = <<AnalysisAsciiFoldingTokenFilter>> | <<AnalysisCommonGramsTokenFilter>> | <<AnalysisConditionTokenFilter>> | <<AnalysisDelimitedPayloadTokenFilter>> | <<AnalysisEdgeNGramTokenFilter>> | <<AnalysisElisionTokenFilter>> | <<AnalysisFingerprintTokenFilter>> | <<AnalysisHunspellTokenFilter>> | <<AnalysisHyphenationDecompounderTokenFilter>> | <<AnalysisKeepTypesTokenFilter>> | <<AnalysisKeepWordsTokenFilter>> | <<AnalysisKeywordMarkerTokenFilter>> | <<AnalysisKStemTokenFilter>> | <<AnalysisLengthTokenFilter>> | <<AnalysisLimitTokenCountTokenFilter>> | <<AnalysisLowercaseTokenFilter>> | <<AnalysisMultiplexerTokenFilter>> | <<AnalysisNGramTokenFilter>> | <<AnalysisNoriPartOfSpeechTokenFilter>> | <<AnalysisPatternCaptureTokenFilter>> | <<AnalysisPatternReplaceTokenFilter>> | <<AnalysisPorterStemTokenFilter>> | <<AnalysisPredicateTokenFilter>> | <<AnalysisRemoveDuplicatesTokenFilter>> | <<AnalysisReverseTokenFilter>> | <<AnalysisShingleTokenFilter>> | <<AnalysisSnowballTokenFilter>> | <<AnalysisStemmerOverrideTokenFilter>> | <<AnalysisStemmerTokenFilter>> | <<AnalysisStopTokenFilter>> | <<AnalysisSynonymGraphTokenFilter>> | <<AnalysisSynonymTokenFilter>> | <<AnalysisTrimTokenFilter>> | <<AnalysisTruncateTokenFilter>> | <<AnalysisUniqueTokenFilter>> | <<AnalysisUppercaseTokenFilter>> | <<AnalysisWordDelimiterGraphTokenFilter>> | <<AnalysisWordDelimiterTokenFilter>> | <<AnalysisKuromojiStemmerTokenFilter>> | <<AnalysisKuromojiReadingFormTokenFilter>> | <<AnalysisKuromojiPartOfSpeechTokenFilter>> | <<AnalysisIcuCollationTokenFilter>> | <<AnalysisIcuFoldingTokenFilter>> | <<AnalysisIcuNormalizationTokenFilter>> | <<AnalysisIcuTransformTokenFilter>> | <<AnalysisPhoneticTokenFilter>> | <<AnalysisDictionaryDecompounderTokenFilter>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisTokenizer]]
|
||
=== AnalysisTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisTokenizer = string | <<AnalysisTokenizerDefinition>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisTokenizerBase]]
|
||
=== AnalysisTokenizerBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisTokenizerBase {
|
||
version?: <<VersionString>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisTokenizerDefinition]]
|
||
=== AnalysisTokenizerDefinition
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type AnalysisTokenizerDefinition = <<AnalysisCharGroupTokenizer>> | <<AnalysisClassicTokenizer>> | <<AnalysisEdgeNGramTokenizer>> | <<AnalysisKeywordTokenizer>> | <<AnalysisLetterTokenizer>> | <<AnalysisLowercaseTokenizer>> | <<AnalysisNGramTokenizer>> | <<AnalysisPathHierarchyTokenizer>> | <<AnalysisPatternTokenizer>> | <<AnalysisSimplePatternTokenizer>> | <<AnalysisSimplePatternSplitTokenizer>> | <<AnalysisStandardTokenizer>> | <<AnalysisThaiTokenizer>> | <<AnalysisUaxEmailUrlTokenizer>> | <<AnalysisWhitespaceTokenizer>> | <<AnalysisIcuTokenizer>> | <<AnalysisKuromojiTokenizer>> | <<AnalysisNoriTokenizer>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisTrimTokenFilter]]
|
||
=== AnalysisTrimTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisTrimTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'trim'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisTruncateTokenFilter]]
|
||
=== AnalysisTruncateTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisTruncateTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'truncate'
|
||
length?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisTurkishAnalyzer]]
|
||
=== AnalysisTurkishAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisTurkishAnalyzer {
|
||
type: 'turkish'
|
||
stopwords?: <<AnalysisStopWords>>
|
||
stopwords_path?: string
|
||
stem_exclusion?: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisUaxEmailUrlTokenizer]]
|
||
=== AnalysisUaxEmailUrlTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisUaxEmailUrlTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'uax_url_email'
|
||
max_token_length?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisUniqueTokenFilter]]
|
||
=== AnalysisUniqueTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisUniqueTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'unique'
|
||
only_on_same_position?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisUppercaseTokenFilter]]
|
||
=== AnalysisUppercaseTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisUppercaseTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'uppercase'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisWhitespaceAnalyzer]]
|
||
=== AnalysisWhitespaceAnalyzer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisWhitespaceAnalyzer {
|
||
type: 'whitespace'
|
||
version?: <<VersionString>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisWhitespaceTokenizer]]
|
||
=== AnalysisWhitespaceTokenizer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisWhitespaceTokenizer extends <<AnalysisTokenizerBase>> {
|
||
type: 'whitespace'
|
||
max_token_length?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisWordDelimiterGraphTokenFilter]]
|
||
=== AnalysisWordDelimiterGraphTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisWordDelimiterGraphTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'word_delimiter_graph'
|
||
adjust_offsets?: boolean
|
||
catenate_all?: boolean
|
||
catenate_numbers?: boolean
|
||
catenate_words?: boolean
|
||
generate_number_parts?: boolean
|
||
generate_word_parts?: boolean
|
||
ignore_keywords?: boolean
|
||
preserve_original?: <<SpecUtilsStringified>><boolean>
|
||
protected_words?: string[]
|
||
protected_words_path?: string
|
||
split_on_case_change?: boolean
|
||
split_on_numerics?: boolean
|
||
stem_english_possessive?: boolean
|
||
type_table?: string[]
|
||
type_table_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[AnalysisWordDelimiterTokenFilter]]
|
||
=== AnalysisWordDelimiterTokenFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface AnalysisWordDelimiterTokenFilter extends <<AnalysisTokenFilterBase>> {
|
||
type: 'word_delimiter'
|
||
catenate_all?: boolean
|
||
catenate_numbers?: boolean
|
||
catenate_words?: boolean
|
||
generate_number_parts?: boolean
|
||
generate_word_parts?: boolean
|
||
preserve_original?: <<SpecUtilsStringified>><boolean>
|
||
protected_words?: string[]
|
||
protected_words_path?: string
|
||
split_on_case_change?: boolean
|
||
split_on_numerics?: boolean
|
||
stem_english_possessive?: boolean
|
||
type_table?: string[]
|
||
type_table_path?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingAggregateMetricDoubleProperty]]
|
||
=== MappingAggregateMetricDoubleProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingAggregateMetricDoubleProperty extends <<MappingPropertyBase>> {
|
||
type: 'aggregate_metric_double'
|
||
default_metric: string
|
||
metrics: string[]
|
||
time_series_metric?: <<MappingTimeSeriesMetricType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingAllField]]
|
||
=== MappingAllField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingAllField {
|
||
analyzer: string
|
||
enabled: boolean
|
||
omit_norms: boolean
|
||
search_analyzer: string
|
||
similarity: string
|
||
store: boolean
|
||
store_term_vector_offsets: boolean
|
||
store_term_vector_payloads: boolean
|
||
store_term_vector_positions: boolean
|
||
store_term_vectors: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingBinaryProperty]]
|
||
=== MappingBinaryProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingBinaryProperty extends <<MappingDocValuesPropertyBase>> {
|
||
type: 'binary'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingBooleanProperty]]
|
||
=== MappingBooleanProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingBooleanProperty extends <<MappingDocValuesPropertyBase>> {
|
||
boost?: <<double>>
|
||
fielddata?: <<IndicesNumericFielddata>>
|
||
index?: boolean
|
||
null_value?: boolean
|
||
type: 'boolean'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingByteNumberProperty]]
|
||
=== MappingByteNumberProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingByteNumberProperty extends <<MappingNumberPropertyBase>> {
|
||
type: '<<byte>>'
|
||
null_value?: <<byte>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingCompletionProperty]]
|
||
=== MappingCompletionProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingCompletionProperty extends <<MappingDocValuesPropertyBase>> {
|
||
analyzer?: string
|
||
contexts?: <<MappingSuggestContext>>[]
|
||
max_input_length?: <<integer>>
|
||
preserve_position_increments?: boolean
|
||
preserve_separators?: boolean
|
||
search_analyzer?: string
|
||
type: 'completion'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingCompositeSubField]]
|
||
=== MappingCompositeSubField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingCompositeSubField {
|
||
type: <<MappingRuntimeFieldType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingConstantKeywordProperty]]
|
||
=== MappingConstantKeywordProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingConstantKeywordProperty extends <<MappingPropertyBase>> {
|
||
value?: any
|
||
type: 'constant_keyword'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingCorePropertyBase]]
|
||
=== MappingCorePropertyBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingCorePropertyBase extends <<MappingPropertyBase>> {
|
||
copy_to?: <<Fields>>
|
||
store?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDataStreamTimestamp]]
|
||
=== MappingDataStreamTimestamp
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDataStreamTimestamp {
|
||
enabled: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDateNanosProperty]]
|
||
=== MappingDateNanosProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDateNanosProperty extends <<MappingDocValuesPropertyBase>> {
|
||
boost?: <<double>>
|
||
format?: string
|
||
ignore_malformed?: boolean
|
||
index?: boolean
|
||
null_value?: <<DateTime>>
|
||
precision_step?: <<integer>>
|
||
type: 'date_nanos'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDateProperty]]
|
||
=== MappingDateProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDateProperty extends <<MappingDocValuesPropertyBase>> {
|
||
boost?: <<double>>
|
||
fielddata?: <<IndicesNumericFielddata>>
|
||
format?: string
|
||
ignore_malformed?: boolean
|
||
index?: boolean
|
||
null_value?: <<DateTime>>
|
||
precision_step?: <<integer>>
|
||
locale?: string
|
||
type: 'date'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDateRangeProperty]]
|
||
=== MappingDateRangeProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDateRangeProperty extends <<MappingRangePropertyBase>> {
|
||
format?: string
|
||
type: 'date_range'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDenseVectorIndexOptions]]
|
||
=== MappingDenseVectorIndexOptions
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDenseVectorIndexOptions {
|
||
type: string
|
||
m?: <<integer>>
|
||
ef_construction?: <<integer>>
|
||
confidence_interval?: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDenseVectorProperty]]
|
||
=== MappingDenseVectorProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDenseVectorProperty extends <<MappingPropertyBase>> {
|
||
type: 'dense_vector'
|
||
element_type?: string
|
||
dims?: <<integer>>
|
||
similarity?: string
|
||
index?: boolean
|
||
index_options?: <<MappingDenseVectorIndexOptions>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDocValuesPropertyBase]]
|
||
=== MappingDocValuesPropertyBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDocValuesPropertyBase extends <<MappingCorePropertyBase>> {
|
||
doc_values?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDoubleNumberProperty]]
|
||
=== MappingDoubleNumberProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDoubleNumberProperty extends <<MappingNumberPropertyBase>> {
|
||
type: '<<double>>'
|
||
null_value?: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDoubleRangeProperty]]
|
||
=== MappingDoubleRangeProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDoubleRangeProperty extends <<MappingRangePropertyBase>> {
|
||
type: 'double_range'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDynamicMapping]]
|
||
=== MappingDynamicMapping
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDynamicProperty]]
|
||
=== MappingDynamicProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDynamicProperty extends <<MappingDocValuesPropertyBase>> {
|
||
type: '{dynamic_type}'
|
||
enabled?: boolean
|
||
null_value?: <<FieldValue>>
|
||
boost?: <<double>>
|
||
coerce?: boolean
|
||
script?: <<Script>> | string
|
||
on_script_error?: <<MappingOnScriptError>>
|
||
ignore_malformed?: boolean
|
||
time_series_metric?: <<MappingTimeSeriesMetricType>>
|
||
analyzer?: string
|
||
eager_global_ordinals?: boolean
|
||
index?: boolean
|
||
index_options?: <<MappingIndexOptions>>
|
||
index_phrases?: boolean
|
||
index_prefixes?: <<MappingTextIndexPrefixes>> | null
|
||
norms?: boolean
|
||
position_increment_gap?: <<integer>>
|
||
search_analyzer?: string
|
||
search_quote_analyzer?: string
|
||
term_vector?: <<MappingTermVectorOption>>
|
||
format?: string
|
||
precision_step?: <<integer>>
|
||
locale?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingDynamicTemplate]]
|
||
=== MappingDynamicTemplate
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingDynamicTemplate {
|
||
mapping?: <<MappingProperty>>
|
||
runtime?: <<MappingProperty>>
|
||
match?: string | string[]
|
||
path_match?: string | string[]
|
||
unmatch?: string | string[]
|
||
path_unmatch?: string | string[]
|
||
match_mapping_type?: string | string[]
|
||
unmatch_mapping_type?: string | string[]
|
||
match_pattern?: <<MappingMatchType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingFieldAliasProperty]]
|
||
=== MappingFieldAliasProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingFieldAliasProperty extends <<MappingPropertyBase>> {
|
||
path?: <<Field>>
|
||
type: 'alias'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingFieldMapping]]
|
||
=== MappingFieldMapping
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingFieldMapping {
|
||
full_name: string
|
||
mapping: Partial<Record<<<Field>>, <<MappingProperty>>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingFieldNamesField]]
|
||
=== MappingFieldNamesField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingFieldNamesField {
|
||
enabled: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingFieldType]]
|
||
=== MappingFieldType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | '<<integer>>' | '<<long>>' | '<<short>>' | '<<byte>>' | '<<float>>' | 'half_float' | 'scaled_float' | '<<double>>' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingFlattenedProperty]]
|
||
=== MappingFlattenedProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingFlattenedProperty extends <<MappingPropertyBase>> {
|
||
boost?: <<double>>
|
||
depth_limit?: <<integer>>
|
||
doc_values?: boolean
|
||
eager_global_ordinals?: boolean
|
||
index?: boolean
|
||
index_options?: <<MappingIndexOptions>>
|
||
null_value?: string
|
||
similarity?: string
|
||
split_queries_on_whitespace?: boolean
|
||
type: 'flattened'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingFloatNumberProperty]]
|
||
=== MappingFloatNumberProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingFloatNumberProperty extends <<MappingNumberPropertyBase>> {
|
||
type: '<<float>>'
|
||
null_value?: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingFloatRangeProperty]]
|
||
=== MappingFloatRangeProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingFloatRangeProperty extends <<MappingRangePropertyBase>> {
|
||
type: 'float_range'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingGeoOrientation]]
|
||
=== MappingGeoOrientation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingGeoPointProperty]]
|
||
=== MappingGeoPointProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingGeoPointProperty extends <<MappingDocValuesPropertyBase>> {
|
||
ignore_malformed?: boolean
|
||
ignore_z_value?: boolean
|
||
null_value?: <<GeoLocation>>
|
||
index?: boolean
|
||
on_script_error?: <<MappingOnScriptError>>
|
||
script?: <<Script>> | string
|
||
type: 'geo_point'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingGeoShapeProperty]]
|
||
=== MappingGeoShapeProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingGeoShapeProperty extends <<MappingDocValuesPropertyBase>> {
|
||
coerce?: boolean
|
||
ignore_malformed?: boolean
|
||
ignore_z_value?: boolean
|
||
orientation?: <<MappingGeoOrientation>>
|
||
strategy?: <<MappingGeoStrategy>>
|
||
type: 'geo_shape'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingGeoStrategy]]
|
||
=== MappingGeoStrategy
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingGeoStrategy = 'recursive' | 'term'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingHalfFloatNumberProperty]]
|
||
=== MappingHalfFloatNumberProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingHalfFloatNumberProperty extends <<MappingNumberPropertyBase>> {
|
||
type: 'half_float'
|
||
null_value?: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingHistogramProperty]]
|
||
=== MappingHistogramProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingHistogramProperty extends <<MappingPropertyBase>> {
|
||
ignore_malformed?: boolean
|
||
type: 'histogram'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingIcuCollationProperty]]
|
||
=== MappingIcuCollationProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingIcuCollationProperty extends <<MappingDocValuesPropertyBase>> {
|
||
type: 'icu_collation_keyword'
|
||
norms?: boolean
|
||
index_options?: <<MappingIndexOptions>>
|
||
pass:[/**] @property index Should the field be searchable? */
|
||
index?: boolean
|
||
pass:[/**] @property null_value Accepts a string value which is substituted for any explicit null values. Defaults to null, which means the field is treated as missing. */
|
||
null_value?: string
|
||
rules?: string
|
||
language?: string
|
||
country?: string
|
||
variant?: string
|
||
strength?: <<AnalysisIcuCollationStrength>>
|
||
decomposition?: <<AnalysisIcuCollationDecomposition>>
|
||
alternate?: <<AnalysisIcuCollationAlternate>>
|
||
case_level?: boolean
|
||
case_first?: <<AnalysisIcuCollationCaseFirst>>
|
||
numeric?: boolean
|
||
variable_top?: string
|
||
hiragana_quaternary_mode?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingIndexField]]
|
||
=== MappingIndexField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingIndexField {
|
||
enabled: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingIndexOptions]]
|
||
=== MappingIndexOptions
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingIntegerNumberProperty]]
|
||
=== MappingIntegerNumberProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingIntegerNumberProperty extends <<MappingNumberPropertyBase>> {
|
||
type: '<<integer>>'
|
||
null_value?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingIntegerRangeProperty]]
|
||
=== MappingIntegerRangeProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingIntegerRangeProperty extends <<MappingRangePropertyBase>> {
|
||
type: 'integer_range'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingIpProperty]]
|
||
=== MappingIpProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingIpProperty extends <<MappingDocValuesPropertyBase>> {
|
||
boost?: <<double>>
|
||
index?: boolean
|
||
ignore_malformed?: boolean
|
||
null_value?: string
|
||
on_script_error?: <<MappingOnScriptError>>
|
||
script?: <<Script>> | string
|
||
pass:[/**] @property time_series_dimension For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. */
|
||
time_series_dimension?: boolean
|
||
type: 'ip'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingIpRangeProperty]]
|
||
=== MappingIpRangeProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingIpRangeProperty extends <<MappingRangePropertyBase>> {
|
||
type: 'ip_range'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingJoinProperty]]
|
||
=== MappingJoinProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingJoinProperty extends <<MappingPropertyBase>> {
|
||
relations?: Record<<<RelationName>>, <<RelationName>> | <<RelationName>>[]>
|
||
eager_global_ordinals?: boolean
|
||
type: 'join'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingKeywordProperty]]
|
||
=== MappingKeywordProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingKeywordProperty extends <<MappingDocValuesPropertyBase>> {
|
||
boost?: <<double>>
|
||
eager_global_ordinals?: boolean
|
||
index?: boolean
|
||
index_options?: <<MappingIndexOptions>>
|
||
script?: <<Script>> | string
|
||
on_script_error?: <<MappingOnScriptError>>
|
||
normalizer?: string
|
||
norms?: boolean
|
||
null_value?: string
|
||
similarity?: string | null
|
||
split_queries_on_whitespace?: boolean
|
||
pass:[/**] @property time_series_dimension For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. */
|
||
time_series_dimension?: boolean
|
||
type: 'keyword'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingLongNumberProperty]]
|
||
=== MappingLongNumberProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingLongNumberProperty extends <<MappingNumberPropertyBase>> {
|
||
type: '<<long>>'
|
||
null_value?: <<long>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingLongRangeProperty]]
|
||
=== MappingLongRangeProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingLongRangeProperty extends <<MappingRangePropertyBase>> {
|
||
type: 'long_range'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingMatchOnlyTextProperty]]
|
||
=== MappingMatchOnlyTextProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingMatchOnlyTextProperty {
|
||
type: 'match_only_text'
|
||
pass:[/**] @property fields Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one field for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers. */
|
||
fields?: Record<<<PropertyName>>, <<MappingProperty>>>
|
||
pass:[/**] @property meta <<Metadata>> about the field. */
|
||
meta?: Record<string, string>
|
||
pass:[/**] @property copy_to Allows you to copy the values of multiple fields into a group field, which can then be queried as a single field. */
|
||
copy_to?: <<Fields>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingMatchType]]
|
||
=== MappingMatchType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingMatchType = 'simple' | 'regex'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingMurmur3HashProperty]]
|
||
=== MappingMurmur3HashProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingMurmur3HashProperty extends <<MappingDocValuesPropertyBase>> {
|
||
type: 'murmur3'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingNestedProperty]]
|
||
=== MappingNestedProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingNestedProperty extends <<MappingCorePropertyBase>> {
|
||
enabled?: boolean
|
||
include_in_parent?: boolean
|
||
include_in_root?: boolean
|
||
type: 'nested'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingNumberPropertyBase]]
|
||
=== MappingNumberPropertyBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingNumberPropertyBase extends <<MappingDocValuesPropertyBase>> {
|
||
boost?: <<double>>
|
||
coerce?: boolean
|
||
ignore_malformed?: boolean
|
||
index?: boolean
|
||
on_script_error?: <<MappingOnScriptError>>
|
||
script?: <<Script>> | string
|
||
pass:[/**] @property time_series_metric For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. */
|
||
time_series_metric?: <<MappingTimeSeriesMetricType>>
|
||
pass:[/**] @property time_series_dimension For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. */
|
||
time_series_dimension?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingObjectProperty]]
|
||
=== MappingObjectProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingObjectProperty extends <<MappingCorePropertyBase>> {
|
||
enabled?: boolean
|
||
subobjects?: boolean
|
||
type?: 'object'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingOnScriptError]]
|
||
=== MappingOnScriptError
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingOnScriptError = 'fail' | 'continue'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingPercolatorProperty]]
|
||
=== MappingPercolatorProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingPercolatorProperty extends <<MappingPropertyBase>> {
|
||
type: 'percolator'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingPointProperty]]
|
||
=== MappingPointProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingPointProperty extends <<MappingDocValuesPropertyBase>> {
|
||
ignore_malformed?: boolean
|
||
ignore_z_value?: boolean
|
||
null_value?: string
|
||
type: 'point'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingProperty]]
|
||
=== MappingProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingProperty = <<MappingBinaryProperty>> | <<MappingBooleanProperty>> | <<MappingDynamicProperty>> | <<MappingJoinProperty>> | <<MappingKeywordProperty>> | <<MappingMatchOnlyTextProperty>> | <<MappingPercolatorProperty>> | <<MappingRankFeatureProperty>> | <<MappingRankFeaturesProperty>> | <<MappingSearchAsYouTypeProperty>> | <<MappingTextProperty>> | <<MappingVersionProperty>> | <<MappingWildcardProperty>> | <<MappingDateNanosProperty>> | <<MappingDateProperty>> | <<MappingAggregateMetricDoubleProperty>> | <<MappingDenseVectorProperty>> | <<MappingFlattenedProperty>> | <<MappingNestedProperty>> | <<MappingObjectProperty>> | <<MappingSemanticTextProperty>> | <<MappingSparseVectorProperty>> | <<MappingCompletionProperty>> | <<MappingConstantKeywordProperty>> | <<MappingFieldAliasProperty>> | <<MappingHistogramProperty>> | <<MappingIpProperty>> | MappingMurmur3HashProperty | <<MappingTokenCountProperty>> | <<MappingGeoPointProperty>> | <<MappingGeoShapeProperty>> | <<MappingPointProperty>> | <<MappingShapeProperty>> | <<MappingByteNumberProperty>> | <<MappingDoubleNumberProperty>> | <<MappingFloatNumberProperty>> | <<MappingHalfFloatNumberProperty>> | <<MappingIntegerNumberProperty>> | <<MappingLongNumberProperty>> | <<MappingScaledFloatNumberProperty>> | <<MappingShortNumberProperty>> | <<MappingUnsignedLongNumberProperty>> | <<MappingDateRangeProperty>> | <<MappingDoubleRangeProperty>> | <<MappingFloatRangeProperty>> | <<MappingIntegerRangeProperty>> | <<MappingIpRangeProperty>> | <<MappingLongRangeProperty>> | <<MappingIcuCollationProperty>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingPropertyBase]]
|
||
=== MappingPropertyBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingPropertyBase {
|
||
pass:[/**] @property meta <<Metadata>> about the field. */
|
||
meta?: Record<string, string>
|
||
properties?: Record<<<PropertyName>>, <<MappingProperty>>>
|
||
ignore_above?: <<integer>>
|
||
dynamic?: <<MappingDynamicMapping>>
|
||
fields?: Record<<<PropertyName>>, <<MappingProperty>>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingRangePropertyBase]]
|
||
=== MappingRangePropertyBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingRangePropertyBase extends <<MappingDocValuesPropertyBase>> {
|
||
boost?: <<double>>
|
||
coerce?: boolean
|
||
index?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingRankFeatureProperty]]
|
||
=== MappingRankFeatureProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingRankFeatureProperty extends <<MappingPropertyBase>> {
|
||
positive_score_impact?: boolean
|
||
type: 'rank_feature'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingRankFeaturesProperty]]
|
||
=== MappingRankFeaturesProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingRankFeaturesProperty extends <<MappingPropertyBase>> {
|
||
positive_score_impact?: boolean
|
||
type: 'rank_features'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingRoutingField]]
|
||
=== MappingRoutingField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingRoutingField {
|
||
required: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingRuntimeField]]
|
||
=== MappingRuntimeField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingRuntimeField {
|
||
pass:[/**] @property fields For type `composite` */
|
||
fields?: Record<string, <<MappingCompositeSubField>>>
|
||
pass:[/**] @property fetch_fields For type `lookup` */
|
||
fetch_fields?: (<<MappingRuntimeFieldFetchFields>> | <<Field>>)[]
|
||
pass:[/**] @property format A custom format for `date` type runtime fields. */
|
||
format?: string
|
||
pass:[/**] @property input_field For type `lookup` */
|
||
input_field?: <<Field>>
|
||
pass:[/**] @property target_field For type `lookup` */
|
||
target_field?: <<Field>>
|
||
pass:[/**] @property target_index For type `lookup` */
|
||
target_index?: <<IndexName>>
|
||
pass:[/**] @property script Painless script executed at query time. */
|
||
script?: <<Script>> | string
|
||
pass:[/**] @property type <<Field>> type, which can be: `boolean`, `composite`, `date`, `<<double>>`, `geo_point`, `ip`,`keyword`, `<<long>>`, or `lookup`. */
|
||
type: <<MappingRuntimeFieldType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingRuntimeFieldFetchFields]]
|
||
=== MappingRuntimeFieldFetchFields
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingRuntimeFieldFetchFields {
|
||
field: <<Field>>
|
||
format?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingRuntimeFieldType]]
|
||
=== MappingRuntimeFieldType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | '<<double>>' | 'geo_point' | 'ip' | 'keyword' | '<<long>>' | 'lookup'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingRuntimeFields]]
|
||
=== MappingRuntimeFields
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingRuntimeFields = Record<<<Field>>, <<MappingRuntimeField>>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingScaledFloatNumberProperty]]
|
||
=== MappingScaledFloatNumberProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingScaledFloatNumberProperty extends <<MappingNumberPropertyBase>> {
|
||
type: 'scaled_float'
|
||
null_value?: <<double>>
|
||
scaling_factor?: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingSearchAsYouTypeProperty]]
|
||
=== MappingSearchAsYouTypeProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingSearchAsYouTypeProperty extends <<MappingCorePropertyBase>> {
|
||
analyzer?: string
|
||
index?: boolean
|
||
index_options?: <<MappingIndexOptions>>
|
||
max_shingle_size?: <<integer>>
|
||
norms?: boolean
|
||
search_analyzer?: string
|
||
search_quote_analyzer?: string
|
||
similarity?: string | null
|
||
term_vector?: <<MappingTermVectorOption>>
|
||
type: 'search_as_you_type'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingSemanticTextProperty]]
|
||
=== MappingSemanticTextProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingSemanticTextProperty {
|
||
type: 'semantic_text'
|
||
meta?: Record<string, string>
|
||
inference_id: <<Id>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingShapeProperty]]
|
||
=== MappingShapeProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingShapeProperty extends <<MappingDocValuesPropertyBase>> {
|
||
coerce?: boolean
|
||
ignore_malformed?: boolean
|
||
ignore_z_value?: boolean
|
||
orientation?: <<MappingGeoOrientation>>
|
||
type: 'shape'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingShortNumberProperty]]
|
||
=== MappingShortNumberProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingShortNumberProperty extends <<MappingNumberPropertyBase>> {
|
||
type: '<<short>>'
|
||
null_value?: <<short>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingSizeField]]
|
||
=== MappingSizeField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingSizeField {
|
||
enabled: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingSourceField]]
|
||
=== MappingSourceField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingSourceField {
|
||
compress?: boolean
|
||
compress_threshold?: string
|
||
enabled?: boolean
|
||
excludes?: string[]
|
||
includes?: string[]
|
||
mode?: <<MappingSourceFieldMode>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingSourceFieldMode]]
|
||
=== MappingSourceFieldMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingSparseVectorProperty]]
|
||
=== MappingSparseVectorProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingSparseVectorProperty extends <<MappingPropertyBase>> {
|
||
type: 'sparse_vector'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingSuggestContext]]
|
||
=== MappingSuggestContext
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingSuggestContext {
|
||
name: <<Name>>
|
||
path?: <<Field>>
|
||
type: string
|
||
precision?: <<integer>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingTermVectorOption]]
|
||
=== MappingTermVectorOption
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingTextIndexPrefixes]]
|
||
=== MappingTextIndexPrefixes
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingTextIndexPrefixes {
|
||
max_chars: <<integer>>
|
||
min_chars: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingTextProperty]]
|
||
=== MappingTextProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingTextProperty extends <<MappingCorePropertyBase>> {
|
||
analyzer?: string
|
||
boost?: <<double>>
|
||
eager_global_ordinals?: boolean
|
||
fielddata?: boolean
|
||
fielddata_frequency_filter?: <<IndicesFielddataFrequencyFilter>>
|
||
index?: boolean
|
||
index_options?: <<MappingIndexOptions>>
|
||
index_phrases?: boolean
|
||
index_prefixes?: <<MappingTextIndexPrefixes>> | null
|
||
norms?: boolean
|
||
position_increment_gap?: <<integer>>
|
||
search_analyzer?: string
|
||
search_quote_analyzer?: string
|
||
similarity?: string | null
|
||
term_vector?: <<MappingTermVectorOption>>
|
||
type: 'text'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingTimeSeriesMetricType]]
|
||
=== MappingTimeSeriesMetricType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' | 'position'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingTokenCountProperty]]
|
||
=== MappingTokenCountProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingTokenCountProperty extends <<MappingDocValuesPropertyBase>> {
|
||
analyzer?: string
|
||
boost?: <<double>>
|
||
index?: boolean
|
||
null_value?: <<double>>
|
||
enable_position_increments?: boolean
|
||
type: 'token_count'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingTypeMapping]]
|
||
=== MappingTypeMapping
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingTypeMapping {
|
||
all_field?: <<MappingAllField>>
|
||
date_detection?: boolean
|
||
dynamic?: <<MappingDynamicMapping>>
|
||
dynamic_date_formats?: string[]
|
||
dynamic_templates?: Record<string, <<MappingDynamicTemplate>>>[]
|
||
_field_names?: <<MappingFieldNamesField>>
|
||
index_field?: <<MappingIndexField>>
|
||
_meta?: <<Metadata>>
|
||
numeric_detection?: boolean
|
||
properties?: Record<<<PropertyName>>, <<MappingProperty>>>
|
||
_routing?: <<MappingRoutingField>>
|
||
_size?: <<MappingSizeField>>
|
||
_source?: <<MappingSourceField>>
|
||
runtime?: Record<string, <<MappingRuntimeField>>>
|
||
enabled?: boolean
|
||
subobjects?: boolean
|
||
_data_stream_timestamp?: <<MappingDataStreamTimestamp>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingUnsignedLongNumberProperty]]
|
||
=== MappingUnsignedLongNumberProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingUnsignedLongNumberProperty extends <<MappingNumberPropertyBase>> {
|
||
type: 'unsigned_long'
|
||
null_value?: <<ulong>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingVersionProperty]]
|
||
=== MappingVersionProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingVersionProperty extends <<MappingDocValuesPropertyBase>> {
|
||
type: 'version'
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[MappingWildcardProperty]]
|
||
=== MappingWildcardProperty
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface MappingWildcardProperty extends <<MappingDocValuesPropertyBase>> {
|
||
type: 'wildcard'
|
||
null_value?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslBoolQuery]]
|
||
=== QueryDslBoolQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslBoolQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property filter The clause (query) must appear in matching documents. However, unlike `must`, the score of the query will be ignored. */
|
||
filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[]
|
||
pass:[/**] @property minimum_should_match Specifies the number or percentage of `should` clauses returned documents must match. */
|
||
minimum_should_match?: <<MinimumShouldMatch>>
|
||
pass:[/**] @property must The clause (query) must appear in matching documents and will contribute to the score. */
|
||
must?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[]
|
||
pass:[/**] @property must_not The clause (query) must not appear in the matching documents. Because scoring is ignored, a score of `0` is returned for all documents. */
|
||
must_not?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[]
|
||
pass:[/**] @property should The clause (query) should appear in the matching document. */
|
||
should?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslBoostingQuery]]
|
||
=== QueryDslBoostingQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslBoostingQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property negative_boost Floating point number between 0 and 1.0 used to decrease the relevance scores of documents matching the `negative` query. */
|
||
negative_boost: <<double>>
|
||
pass:[/**] @property negative <<Query>> used to decrease the relevance score of matching documents. */
|
||
negative: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property positive Any returned documents must match this query. */
|
||
positive: <<QueryDslQueryContainer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslChildScoreMode]]
|
||
=== QueryDslChildScoreMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslCombinedFieldsOperator]]
|
||
=== QueryDslCombinedFieldsOperator
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslCombinedFieldsOperator = 'or' | 'and'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslCombinedFieldsQuery]]
|
||
=== QueryDslCombinedFieldsQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslCombinedFieldsQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property fields List of fields to search. <<Field>> wildcard patterns are allowed. Only `text` fields are supported, and they must all have the same search `analyzer`. */
|
||
fields: <<Field>>[]
|
||
pass:[/**] @property query Text to search for in the provided `fields`. The `combined_fields` query analyzes the provided text before performing a search. */
|
||
query: string
|
||
pass:[/**] @property auto_generate_synonyms_phrase_query If true, match phrase queries are automatically created for multi-term synonyms. */
|
||
auto_generate_synonyms_phrase_query?: boolean
|
||
pass:[/**] @property operator Boolean logic used to interpret text in the query value. */
|
||
operator?: <<QueryDslCombinedFieldsOperator>>
|
||
pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. */
|
||
minimum_should_match?: <<MinimumShouldMatch>>
|
||
pass:[/**] @property zero_terms_query Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */
|
||
zero_terms_query?: <<QueryDslCombinedFieldsZeroTerms>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslCombinedFieldsZeroTerms]]
|
||
=== QueryDslCombinedFieldsZeroTerms
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslCombinedFieldsZeroTerms = 'none' | 'all'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslCommonTermsQuery]]
|
||
=== QueryDslCommonTermsQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslCommonTermsQuery extends <<QueryDslQueryBase>> {
|
||
analyzer?: string
|
||
cutoff_frequency?: <<double>>
|
||
high_freq_operator?: <<QueryDslOperator>>
|
||
low_freq_operator?: <<QueryDslOperator>>
|
||
minimum_should_match?: <<MinimumShouldMatch>>
|
||
query: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslConstantScoreQuery]]
|
||
=== QueryDslConstantScoreQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslConstantScoreQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property filter <<Filter>> query you wish to run. Any returned documents must match this query. <<Filter>> queries do not calculate relevance scores. To speed up performance, Elasticsearch automatically caches frequently used filter queries. */
|
||
filter: <<QueryDslQueryContainer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslDateDecayFunction]]
|
||
=== QueryDslDateDecayFunction
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslDateDecayFunctionKeys extends <<QueryDslDecayFunctionBase>><<<DateMath>>, <<Duration>>> {
|
||
}
|
||
type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys
|
||
& { [property: string]: <<QueryDslDecayPlacement>> | <<QueryDslMultiValueMode>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslDateDistanceFeatureQuery]]
|
||
=== QueryDslDateDistanceFeatureQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslDateDistanceFeatureQuery extends <<QueryDslDistanceFeatureQueryBase>><<<DateMath>>, <<Duration>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslDateRangeQuery]]
|
||
=== QueryDslDateRangeQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslDateRangeQuery extends <<QueryDslRangeQueryBase>><<<DateMath>>> {
|
||
pass:[/**] @property format Date format used to convert `date` values in the query. */
|
||
format?: <<DateFormat>>
|
||
pass:[/**] @property time_zone Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */
|
||
time_zone?: <<TimeZone>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslDecayFunction]]
|
||
=== QueryDslDecayFunction
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslDecayFunction = <<QueryDslUntypedDecayFunction>> | <<QueryDslDateDecayFunction>> | <<QueryDslNumericDecayFunction>> | <<QueryDslGeoDecayFunction>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslDecayFunctionBase]]
|
||
=== QueryDslDecayFunctionBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslDecayFunctionBase<TOrigin = unknown, TScale = unknown> {
|
||
pass:[/**] @property multi_value_mode Determines how the distance is calculated when a field used for computing the decay contains multiple values. */
|
||
multi_value_mode?: <<QueryDslMultiValueMode>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslDecayPlacement]]
|
||
=== QueryDslDecayPlacement
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslDecayPlacement<TOrigin = unknown, TScale = unknown> {
|
||
pass:[/**] @property decay Defines how documents are scored at the distance given at scale. */
|
||
decay?: <<double>>
|
||
pass:[/**] @property offset If defined, the decay function will only compute the decay function for documents with a distance greater than the defined `offset`. */
|
||
offset?: TScale
|
||
pass:[/**] @property scale Defines the distance from origin + offset at which the computed score will equal `decay` parameter. */
|
||
scale?: TScale
|
||
pass:[/**] @property origin The point of origin used for calculating distance. Must be given as a number for numeric field, date for date fields and geo point for geo fields. */
|
||
origin?: TOrigin
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslDisMaxQuery]]
|
||
=== QueryDslDisMaxQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslDisMaxQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property queries One or more query clauses. Returned documents must match one or more of these queries. If a document matches multiple queries, Elasticsearch uses the highest relevance score. */
|
||
queries: <<QueryDslQueryContainer>>[]
|
||
pass:[/**] @property tie_breaker Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses. */
|
||
tie_breaker?: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslDistanceFeatureQuery]]
|
||
=== QueryDslDistanceFeatureQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslDistanceFeatureQuery = <<QueryDslUntypedDistanceFeatureQuery>> | <<QueryDslGeoDistanceFeatureQuery>> | <<QueryDslDateDistanceFeatureQuery>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslDistanceFeatureQueryBase]]
|
||
=== QueryDslDistanceFeatureQueryBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslDistanceFeatureQueryBase<TOrigin = unknown, TDistance = unknown> extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property origin Date or point of origin used to calculate distances. If the `field` value is a `date` or `date_nanos` field, the `origin` value must be a date. Date Math, such as `now-1h`, is supported. If the field value is a `geo_point` field, the `origin` value must be a geopoint. */
|
||
origin: TOrigin
|
||
pass:[/**] @property pivot <<Distance>> from the `origin` at which relevance scores receive half of the `boost` value. If the `field` value is a `date` or `date_nanos` field, the `pivot` value must be a time unit, such as `1h` or `10d`. If the `field` value is a `geo_point` field, the `pivot` value must be a distance unit, such as `1km` or `12m`. */
|
||
pivot: TDistance
|
||
pass:[/**] @property field <<Name>> of the field used to calculate distances. This field must meet the following criteria: be a `date`, `date_nanos` or `geo_point` field; have an `index` mapping parameter value of `true`, which is the default; have an `doc_values` mapping parameter value of `true`, which is the default. */
|
||
field: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslExistsQuery]]
|
||
=== QueryDslExistsQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslExistsQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property field <<Name>> of the field you wish to search. */
|
||
field: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslFieldAndFormat]]
|
||
=== QueryDslFieldAndFormat
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslFieldAndFormat {
|
||
pass:[/**] @property field Wildcard pattern. The request returns values for field names matching this pattern. */
|
||
field: <<Field>>
|
||
pass:[/**] @property format Format in which the values are returned. */
|
||
format?: string
|
||
include_unmapped?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslFieldLookup]]
|
||
=== QueryDslFieldLookup
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslFieldLookup {
|
||
pass:[/**] @property id `id` of the document. */
|
||
id: <<Id>>
|
||
pass:[/**] @property index Index from which to retrieve the document. */
|
||
index?: <<IndexName>>
|
||
pass:[/**] @property path <<Name>> of the field. */
|
||
path?: <<Field>>
|
||
pass:[/**] @property routing Custom routing value. */
|
||
routing?: <<Routing>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslFieldValueFactorModifier]]
|
||
=== QueryDslFieldValueFactorModifier
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslFieldValueFactorScoreFunction]]
|
||
=== QueryDslFieldValueFactorScoreFunction
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslFieldValueFactorScoreFunction {
|
||
pass:[/**] @property field <<Field>> to be extracted from the document. */
|
||
field: <<Field>>
|
||
pass:[/**] @property factor Optional factor to multiply the field value with. */
|
||
factor?: <<double>>
|
||
pass:[/**] @property missing Value used if the document doesn’t have that field. The modifier and factor are still applied to it as though it were read from the document. */
|
||
missing?: <<double>>
|
||
pass:[/**] @property modifier Modifier to apply to the field value. */
|
||
modifier?: <<QueryDslFieldValueFactorModifier>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslFunctionBoostMode]]
|
||
=== QueryDslFunctionBoostMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslFunctionScoreContainer]]
|
||
=== QueryDslFunctionScoreContainer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslFunctionScoreContainer {
|
||
pass:[/**] @property exp Function that scores a document with a exponential decay, depending on the distance of a numeric field value of the document from an origin. */
|
||
exp?: <<QueryDslDecayFunction>>
|
||
pass:[/**] @property gauss Function that scores a document with a normal decay, depending on the distance of a numeric field value of the document from an origin. */
|
||
gauss?: <<QueryDslDecayFunction>>
|
||
pass:[/**] @property linear Function that scores a document with a linear decay, depending on the distance of a numeric field value of the document from an origin. */
|
||
linear?: <<QueryDslDecayFunction>>
|
||
pass:[/**] @property field_value_factor Function allows you to use a field from a document to influence the score. It’s similar to using the script_score function, however, it avoids the overhead of scripting. */
|
||
field_value_factor?: <<QueryDslFieldValueFactorScoreFunction>>
|
||
pass:[/**] @property random_score Generates scores that are uniformly distributed from 0 up to but not including 1. In case you want scores to be reproducible, it is possible to provide a `seed` and `field`. */
|
||
random_score?: <<QueryDslRandomScoreFunction>>
|
||
pass:[/**] @property script_score Enables you to wrap another query and customize the scoring of it optionally with a computation derived from other numeric field values in the doc using a script expression. */
|
||
script_score?: <<QueryDslScriptScoreFunction>>
|
||
filter?: <<QueryDslQueryContainer>>
|
||
weight?: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslFunctionScoreMode]]
|
||
=== QueryDslFunctionScoreMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslFunctionScoreQuery]]
|
||
=== QueryDslFunctionScoreQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslFunctionScoreQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property boost_mode Defines how he newly computed score is combined with the score of the query */
|
||
boost_mode?: <<QueryDslFunctionBoostMode>>
|
||
pass:[/**] @property functions One or more functions that compute a new score for each document returned by the query. */
|
||
functions?: <<QueryDslFunctionScoreContainer>>[]
|
||
pass:[/**] @property max_boost Restricts the new score to not exceed the provided limit. */
|
||
max_boost?: <<double>>
|
||
pass:[/**] @property min_score Excludes documents that do not meet the provided score threshold. */
|
||
min_score?: <<double>>
|
||
pass:[/**] @property query A query that determines the documents for which a new score is computed. */
|
||
query?: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property score_mode Specifies how the computed scores are combined */
|
||
score_mode?: <<QueryDslFunctionScoreMode>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslFuzzyQuery]]
|
||
=== QueryDslFuzzyQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslFuzzyQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property max_expansions Maximum number of variations created. */
|
||
max_expansions?: <<integer>>
|
||
pass:[/**] @property prefix_length Number of beginning characters left unchanged when creating expansions. */
|
||
prefix_length?: <<integer>>
|
||
pass:[/**] @property rewrite Number of beginning characters left unchanged when creating expansions. */
|
||
rewrite?: <<MultiTermQueryRewrite>>
|
||
pass:[/**] @property transpositions Indicates whether edits include transpositions of two adjacent characters (for example `ab` to `ba`). */
|
||
transpositions?: boolean
|
||
pass:[/**] @property fuzziness Maximum edit distance allowed for matching. */
|
||
fuzziness?: <<Fuzziness>>
|
||
pass:[/**] @property value <<Term>> you wish to find in the provided field. */
|
||
value: string | <<double>> | boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslGeoBoundingBoxQuery]]
|
||
=== QueryDslGeoBoundingBoxQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslGeoBoundingBoxQueryKeys extends <<QueryDslQueryBase>> {
|
||
type?: <<QueryDslGeoExecution>>
|
||
validation_method?: <<QueryDslGeoValidationMethod>>
|
||
ignore_unmapped?: boolean
|
||
}
|
||
type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys
|
||
& { [property: string]: <<GeoBounds>> | <<QueryDslGeoExecution>> | <<QueryDslGeoValidationMethod>> | boolean | <<float>> | string }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslGeoDecayFunction]]
|
||
=== QueryDslGeoDecayFunction
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslGeoDecayFunctionKeys extends <<QueryDslDecayFunctionBase>><<<GeoLocation>>, <<Distance>>> {
|
||
}
|
||
type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys
|
||
& { [property: string]: <<QueryDslDecayPlacement>> | <<QueryDslMultiValueMode>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslGeoDistanceFeatureQuery]]
|
||
=== QueryDslGeoDistanceFeatureQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslGeoDistanceFeatureQuery extends <<QueryDslDistanceFeatureQueryBase>><<<GeoLocation>>, <<Distance>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslGeoDistanceQuery]]
|
||
=== QueryDslGeoDistanceQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslGeoDistanceQueryKeys extends <<QueryDslQueryBase>> {
|
||
distance: <<Distance>>
|
||
distance_type?: <<GeoDistanceType>>
|
||
validation_method?: <<QueryDslGeoValidationMethod>>
|
||
ignore_unmapped?: boolean
|
||
}
|
||
type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys
|
||
& { [property: string]: <<GeoLocation>> | <<Distance>> | <<GeoDistanceType>> | <<QueryDslGeoValidationMethod>> | boolean | <<float>> | string }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslGeoExecution]]
|
||
=== QueryDslGeoExecution
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslGeoExecution = 'memory' | 'indexed'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslGeoPolygonPoints]]
|
||
=== QueryDslGeoPolygonPoints
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslGeoPolygonPoints {
|
||
points: <<GeoLocation>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslGeoPolygonQuery]]
|
||
=== QueryDslGeoPolygonQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslGeoPolygonQueryKeys extends <<QueryDslQueryBase>> {
|
||
validation_method?: <<QueryDslGeoValidationMethod>>
|
||
ignore_unmapped?: boolean
|
||
}
|
||
type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys
|
||
& { [property: string]: <<QueryDslGeoPolygonPoints>> | <<QueryDslGeoValidationMethod>> | boolean | <<float>> | string }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslGeoShapeFieldQuery]]
|
||
=== QueryDslGeoShapeFieldQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslGeoShapeFieldQuery {
|
||
shape?: <<GeoShape>>
|
||
pass:[/**] @property indexed_shape <<Query>> using an indexed shape retrieved from the the specified document and path. */
|
||
indexed_shape?: <<QueryDslFieldLookup>>
|
||
pass:[/**] @property relation Spatial relation operator used to search a geo field. */
|
||
relation?: <<GeoShapeRelation>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslGeoShapeQuery]]
|
||
=== QueryDslGeoShapeQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslGeoShapeQueryKeys extends <<QueryDslQueryBase>> {
|
||
ignore_unmapped?: boolean
|
||
}
|
||
type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys
|
||
& { [property: string]: <<QueryDslGeoShapeFieldQuery>> | boolean | <<float>> | string }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslGeoValidationMethod]]
|
||
=== QueryDslGeoValidationMethod
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslHasChildQuery]]
|
||
=== QueryDslHasChildQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslHasChildQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property ignore_unmapped Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */
|
||
ignore_unmapped?: boolean
|
||
pass:[/**] @property inner_hits If defined, each search hit will contain inner hits. */
|
||
inner_hits?: <<SearchInnerHits>>
|
||
pass:[/**] @property max_children Maximum number of child documents that match the query allowed for a returned parent document. If the parent document exceeds this limit, it is excluded from the search results. */
|
||
max_children?: <<integer>>
|
||
pass:[/**] @property min_children Minimum number of child documents that match the query required to match the query for a returned parent document. If the parent document does not meet this limit, it is excluded from the search results. */
|
||
min_children?: <<integer>>
|
||
pass:[/**] @property query <<Query>> you wish to run on child documents of the `type` field. If a child document matches the search, the query returns the parent document. */
|
||
query: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property score_mode Indicates how scores for matching child documents affect the root parent document’s relevance score. */
|
||
score_mode?: <<QueryDslChildScoreMode>>
|
||
pass:[/**] @property type <<Name>> of the child relationship mapped for the `join` field. */
|
||
type: <<RelationName>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslHasParentQuery]]
|
||
=== QueryDslHasParentQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslHasParentQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property ignore_unmapped Indicates whether to ignore an unmapped `parent_type` and not return any documents instead of an error. You can use this parameter to query multiple indices that may not contain the `parent_type`. */
|
||
ignore_unmapped?: boolean
|
||
pass:[/**] @property inner_hits If defined, each search hit will contain inner hits. */
|
||
inner_hits?: <<SearchInnerHits>>
|
||
pass:[/**] @property parent_type <<Name>> of the parent relationship mapped for the `join` field. */
|
||
parent_type: <<RelationName>>
|
||
pass:[/**] @property query <<Query>> you wish to run on parent documents of the `parent_type` field. If a parent document matches the search, the query returns its child documents. */
|
||
query: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property score Indicates whether the relevance score of a matching parent document is aggregated into its child documents. */
|
||
score?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslIdsQuery]]
|
||
=== QueryDslIdsQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslIdsQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property values An array of document IDs. */
|
||
values?: <<Ids>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslIntervalsAllOf]]
|
||
=== QueryDslIntervalsAllOf
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslIntervalsAllOf {
|
||
pass:[/**] @property intervals An array of rules to combine. All rules must produce a match in a document for the overall source to match. */
|
||
intervals: <<QueryDslIntervalsContainer>>[]
|
||
pass:[/**] @property max_gaps Maximum number of positions between the matching terms. Intervals produced by the rules further apart than this are not considered matches. */
|
||
max_gaps?: <<integer>>
|
||
pass:[/**] @property ordered If `true`, intervals produced by the rules should appear in the order in which they are specified. */
|
||
ordered?: boolean
|
||
pass:[/**] @property filter Rule used to filter returned intervals. */
|
||
filter?: <<QueryDslIntervalsFilter>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslIntervalsAnyOf]]
|
||
=== QueryDslIntervalsAnyOf
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslIntervalsAnyOf {
|
||
pass:[/**] @property intervals An array of rules to match. */
|
||
intervals: <<QueryDslIntervalsContainer>>[]
|
||
pass:[/**] @property filter Rule used to filter returned intervals. */
|
||
filter?: <<QueryDslIntervalsFilter>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslIntervalsContainer]]
|
||
=== QueryDslIntervalsContainer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslIntervalsContainer {
|
||
pass:[/**] @property all_of Returns matches that span a combination of other rules. */
|
||
all_of?: <<QueryDslIntervalsAllOf>>
|
||
pass:[/**] @property any_of Returns intervals produced by any of its sub-rules. */
|
||
any_of?: <<QueryDslIntervalsAnyOf>>
|
||
pass:[/**] @property fuzzy Matches analyzed text. */
|
||
fuzzy?: <<QueryDslIntervalsFuzzy>>
|
||
pass:[/**] @property match Matches analyzed text. */
|
||
match?: <<QueryDslIntervalsMatch>>
|
||
pass:[/**] @property prefix Matches terms that start with a specified set of characters. */
|
||
prefix?: <<QueryDslIntervalsPrefix>>
|
||
pass:[/**] @property wildcard Matches terms using a wildcard pattern. */
|
||
wildcard?: <<QueryDslIntervalsWildcard>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslIntervalsFilter]]
|
||
=== QueryDslIntervalsFilter
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslIntervalsFilter {
|
||
pass:[/**] @property after <<Query>> used to return intervals that follow an interval from the `filter` rule. */
|
||
after?: <<QueryDslIntervalsContainer>>
|
||
pass:[/**] @property before <<Query>> used to return intervals that occur before an interval from the `filter` rule. */
|
||
before?: <<QueryDslIntervalsContainer>>
|
||
pass:[/**] @property contained_by <<Query>> used to return intervals contained by an interval from the `filter` rule. */
|
||
contained_by?: <<QueryDslIntervalsContainer>>
|
||
pass:[/**] @property containing <<Query>> used to return intervals that contain an interval from the `filter` rule. */
|
||
containing?: <<QueryDslIntervalsContainer>>
|
||
pass:[/**] @property not_contained_by <<Query>> used to return intervals that are **not** contained by an interval from the `filter` rule. */
|
||
not_contained_by?: <<QueryDslIntervalsContainer>>
|
||
pass:[/**] @property not_containing <<Query>> used to return intervals that do **not** contain an interval from the `filter` rule. */
|
||
not_containing?: <<QueryDslIntervalsContainer>>
|
||
pass:[/**] @property not_overlapping <<Query>> used to return intervals that do **not** overlap with an interval from the `filter` rule. */
|
||
not_overlapping?: <<QueryDslIntervalsContainer>>
|
||
pass:[/**] @property overlapping <<Query>> used to return intervals that overlap with an interval from the `filter` rule. */
|
||
overlapping?: <<QueryDslIntervalsContainer>>
|
||
pass:[/**] @property script <<Script>> used to return matching documents. This script must return a boolean value: `true` or `false`. */
|
||
script?: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslIntervalsFuzzy]]
|
||
=== QueryDslIntervalsFuzzy
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslIntervalsFuzzy {
|
||
pass:[/**] @property analyzer <<Analyzer>> used to normalize the term. */
|
||
analyzer?: string
|
||
pass:[/**] @property fuzziness Maximum edit distance allowed for matching. */
|
||
fuzziness?: <<Fuzziness>>
|
||
pass:[/**] @property prefix_length Number of beginning characters left unchanged when creating expansions. */
|
||
prefix_length?: <<integer>>
|
||
pass:[/**] @property term The term to match. */
|
||
term: string
|
||
pass:[/**] @property transpositions Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`). */
|
||
transpositions?: boolean
|
||
pass:[/**] @property use_field If specified, match intervals from this field rather than the top-level field. The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */
|
||
use_field?: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslIntervalsMatch]]
|
||
=== QueryDslIntervalsMatch
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslIntervalsMatch {
|
||
pass:[/**] @property analyzer <<Analyzer>> used to analyze terms in the query. */
|
||
analyzer?: string
|
||
pass:[/**] @property max_gaps Maximum number of positions between the matching terms. Terms further apart than this are not considered matches. */
|
||
max_gaps?: <<integer>>
|
||
pass:[/**] @property ordered If `true`, matching terms must appear in their specified order. */
|
||
ordered?: boolean
|
||
pass:[/**] @property query Text you wish to find in the provided field. */
|
||
query: string
|
||
pass:[/**] @property use_field If specified, match intervals from this field rather than the top-level field. The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */
|
||
use_field?: <<Field>>
|
||
pass:[/**] @property filter An optional interval filter. */
|
||
filter?: <<QueryDslIntervalsFilter>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslIntervalsPrefix]]
|
||
=== QueryDslIntervalsPrefix
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslIntervalsPrefix {
|
||
pass:[/**] @property analyzer <<Analyzer>> used to analyze the `prefix`. */
|
||
analyzer?: string
|
||
pass:[/**] @property prefix Beginning characters of terms you wish to find in the top-level field. */
|
||
prefix: string
|
||
pass:[/**] @property use_field If specified, match intervals from this field rather than the top-level field. The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */
|
||
use_field?: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslIntervalsQuery]]
|
||
=== QueryDslIntervalsQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslIntervalsQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property all_of Returns matches that span a combination of other rules. */
|
||
all_of?: <<QueryDslIntervalsAllOf>>
|
||
pass:[/**] @property any_of Returns intervals produced by any of its sub-rules. */
|
||
any_of?: <<QueryDslIntervalsAnyOf>>
|
||
pass:[/**] @property fuzzy Matches terms that are similar to the provided term, within an edit distance defined by `fuzziness`. */
|
||
fuzzy?: <<QueryDslIntervalsFuzzy>>
|
||
pass:[/**] @property match Matches analyzed text. */
|
||
match?: <<QueryDslIntervalsMatch>>
|
||
pass:[/**] @property prefix Matches terms that start with a specified set of characters. */
|
||
prefix?: <<QueryDslIntervalsPrefix>>
|
||
pass:[/**] @property wildcard Matches terms using a wildcard pattern. */
|
||
wildcard?: <<QueryDslIntervalsWildcard>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslIntervalsWildcard]]
|
||
=== QueryDslIntervalsWildcard
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslIntervalsWildcard {
|
||
pass:[/**] @property analyzer <<Analyzer>> used to analyze the `pattern`. Defaults to the top-level field's analyzer. */
|
||
analyzer?: string
|
||
pass:[/**] @property pattern Wildcard pattern used to find matching terms. */
|
||
pattern: string
|
||
pass:[/**] @property use_field If specified, match intervals from this field rather than the top-level field. The `pattern` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */
|
||
use_field?: <<Field>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslLike]]
|
||
=== QueryDslLike
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslLike = string | <<QueryDslLikeDocument>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslLikeDocument]]
|
||
=== QueryDslLikeDocument
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslLikeDocument {
|
||
pass:[/**] @property doc A document not present in the index. */
|
||
doc?: any
|
||
fields?: <<Field>>[]
|
||
pass:[/**] @property _id ID of a document. */
|
||
_id?: <<Id>>
|
||
pass:[/**] @property _index Index of a document. */
|
||
_index?: <<IndexName>>
|
||
pass:[/**] @property per_field_analyzer Overrides the default analyzer. */
|
||
per_field_analyzer?: Record<<<Field>>, string>
|
||
routing?: <<Routing>>
|
||
version?: <<VersionNumber>>
|
||
version_type?: <<VersionType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslMatchAllQuery]]
|
||
=== QueryDslMatchAllQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslMatchAllQuery extends <<QueryDslQueryBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslMatchBoolPrefixQuery]]
|
||
=== QueryDslMatchBoolPrefixQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslMatchBoolPrefixQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property analyzer <<Analyzer>> used to convert the text in the query value into tokens. */
|
||
analyzer?: string
|
||
pass:[/**] @property fuzziness Maximum edit distance allowed for matching. Can be applied to the term subqueries constructed for all terms but the final term. */
|
||
fuzziness?: <<Fuzziness>>
|
||
pass:[/**] @property fuzzy_rewrite Method used to rewrite the query. Can be applied to the term subqueries constructed for all terms but the final term. */
|
||
fuzzy_rewrite?: <<MultiTermQueryRewrite>>
|
||
pass:[/**] @property fuzzy_transpositions If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). Can be applied to the term subqueries constructed for all terms but the final term. */
|
||
fuzzy_transpositions?: boolean
|
||
pass:[/**] @property max_expansions Maximum number of terms to which the query will expand. Can be applied to the term subqueries constructed for all terms but the final term. */
|
||
max_expansions?: <<integer>>
|
||
pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. Applied to the constructed bool query. */
|
||
minimum_should_match?: <<MinimumShouldMatch>>
|
||
pass:[/**] @property operator Boolean logic used to interpret text in the query value. Applied to the constructed bool query. */
|
||
operator?: <<QueryDslOperator>>
|
||
pass:[/**] @property prefix_length Number of beginning characters left unchanged for fuzzy matching. Can be applied to the term subqueries constructed for all terms but the final term. */
|
||
prefix_length?: <<integer>>
|
||
pass:[/**] @property query Terms you wish to find in the provided field. The last term is used in a prefix query. */
|
||
query: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslMatchNoneQuery]]
|
||
=== QueryDslMatchNoneQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslMatchNoneQuery extends <<QueryDslQueryBase>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslMatchPhrasePrefixQuery]]
|
||
=== QueryDslMatchPhrasePrefixQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslMatchPhrasePrefixQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property analyzer <<Analyzer>> used to convert text in the query value into tokens. */
|
||
analyzer?: string
|
||
pass:[/**] @property max_expansions Maximum number of terms to which the last provided term of the query value will expand. */
|
||
max_expansions?: <<integer>>
|
||
pass:[/**] @property query Text you wish to find in the provided field. */
|
||
query: string
|
||
pass:[/**] @property slop Maximum number of positions allowed between matching tokens. */
|
||
slop?: <<integer>>
|
||
pass:[/**] @property zero_terms_query Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */
|
||
zero_terms_query?: <<QueryDslZeroTermsQuery>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslMatchPhraseQuery]]
|
||
=== QueryDslMatchPhraseQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslMatchPhraseQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property analyzer <<Analyzer>> used to convert the text in the query value into tokens. */
|
||
analyzer?: string
|
||
pass:[/**] @property query <<Query>> terms that are analyzed and turned into a phrase query. */
|
||
query: string
|
||
pass:[/**] @property slop Maximum number of positions allowed between matching tokens. */
|
||
slop?: <<integer>>
|
||
pass:[/**] @property zero_terms_query Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */
|
||
zero_terms_query?: <<QueryDslZeroTermsQuery>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslMatchQuery]]
|
||
=== QueryDslMatchQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslMatchQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property analyzer <<Analyzer>> used to convert the text in the query value into tokens. */
|
||
analyzer?: string
|
||
pass:[/**] @property auto_generate_synonyms_phrase_query If `true`, match phrase queries are automatically created for multi-term synonyms. */
|
||
auto_generate_synonyms_phrase_query?: boolean
|
||
cutoff_frequency?: <<double>>
|
||
pass:[/**] @property fuzziness Maximum edit distance allowed for matching. */
|
||
fuzziness?: <<Fuzziness>>
|
||
pass:[/**] @property fuzzy_rewrite Method used to rewrite the query. */
|
||
fuzzy_rewrite?: <<MultiTermQueryRewrite>>
|
||
pass:[/**] @property fuzzy_transpositions If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */
|
||
fuzzy_transpositions?: boolean
|
||
pass:[/**] @property lenient If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */
|
||
lenient?: boolean
|
||
pass:[/**] @property max_expansions Maximum number of terms to which the query will expand. */
|
||
max_expansions?: <<integer>>
|
||
pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. */
|
||
minimum_should_match?: <<MinimumShouldMatch>>
|
||
pass:[/**] @property operator Boolean logic used to interpret text in the query value. */
|
||
operator?: <<QueryDslOperator>>
|
||
pass:[/**] @property prefix_length Number of beginning characters left unchanged for fuzzy matching. */
|
||
prefix_length?: <<integer>>
|
||
pass:[/**] @property query Text, number, boolean value or date you wish to find in the provided field. */
|
||
query: string | <<float>> | boolean
|
||
pass:[/**] @property zero_terms_query Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */
|
||
zero_terms_query?: <<QueryDslZeroTermsQuery>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslMoreLikeThisQuery]]
|
||
=== QueryDslMoreLikeThisQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslMoreLikeThisQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property analyzer The analyzer that is used to analyze the free form text. Defaults to the analyzer associated with the first field in fields. */
|
||
analyzer?: string
|
||
pass:[/**] @property boost_terms Each term in the formed query could be further boosted by their tf-idf score. This sets the boost factor to use when using this feature. Defaults to deactivated (0). */
|
||
boost_terms?: <<double>>
|
||
pass:[/**] @property fail_on_unsupported_field Controls whether the query should fail (throw an exception) if any of the specified fields are not of the supported types (`text` or `keyword`). */
|
||
fail_on_unsupported_field?: boolean
|
||
pass:[/**] @property fields A list of fields to fetch and analyze the text from. Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */
|
||
fields?: <<Field>>[]
|
||
pass:[/**] @property include Specifies whether the input documents should also be included in the search results returned. */
|
||
include?: boolean
|
||
pass:[/**] @property like Specifies free form text and/or a single or multiple documents for which you want to find similar documents. */
|
||
like: <<QueryDslLike>> | <<QueryDslLike>>[]
|
||
pass:[/**] @property max_doc_freq The maximum document frequency above which the terms are ignored from the input document. */
|
||
max_doc_freq?: <<integer>>
|
||
pass:[/**] @property max_query_terms The maximum number of query terms that can be selected. */
|
||
max_query_terms?: <<integer>>
|
||
pass:[/**] @property max_word_length The maximum word length above which the terms are ignored. Defaults to unbounded (`0`). */
|
||
max_word_length?: <<integer>>
|
||
pass:[/**] @property min_doc_freq The minimum document frequency below which the terms are ignored from the input document. */
|
||
min_doc_freq?: <<integer>>
|
||
pass:[/**] @property minimum_should_match After the disjunctive query has been formed, this parameter controls the number of terms that must match. */
|
||
minimum_should_match?: <<MinimumShouldMatch>>
|
||
pass:[/**] @property min_term_freq The minimum term frequency below which the terms are ignored from the input document. */
|
||
min_term_freq?: <<integer>>
|
||
pass:[/**] @property min_word_length The minimum word length below which the terms are ignored. */
|
||
min_word_length?: <<integer>>
|
||
routing?: <<Routing>>
|
||
pass:[/**] @property stop_words An array of stop words. Any word in this set is ignored. */
|
||
stop_words?: <<AnalysisStopWords>>
|
||
pass:[/**] @property unlike Used in combination with `like` to exclude documents that match a set of terms. */
|
||
unlike?: <<QueryDslLike>> | <<QueryDslLike>>[]
|
||
version?: <<VersionNumber>>
|
||
version_type?: <<VersionType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslMultiMatchQuery]]
|
||
=== QueryDslMultiMatchQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslMultiMatchQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property analyzer <<Analyzer>> used to convert the text in the query value into tokens. */
|
||
analyzer?: string
|
||
pass:[/**] @property auto_generate_synonyms_phrase_query If `true`, match phrase queries are automatically created for multi-term synonyms. */
|
||
auto_generate_synonyms_phrase_query?: boolean
|
||
cutoff_frequency?: <<double>>
|
||
pass:[/**] @property fields The fields to be queried. Defaults to the `index.query.default_field` index settings, which in turn defaults to `*`. */
|
||
fields?: <<Fields>>
|
||
pass:[/**] @property fuzziness Maximum edit distance allowed for matching. */
|
||
fuzziness?: <<Fuzziness>>
|
||
pass:[/**] @property fuzzy_rewrite Method used to rewrite the query. */
|
||
fuzzy_rewrite?: <<MultiTermQueryRewrite>>
|
||
pass:[/**] @property fuzzy_transpositions If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). Can be applied to the term subqueries constructed for all terms but the final term. */
|
||
fuzzy_transpositions?: boolean
|
||
pass:[/**] @property lenient If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */
|
||
lenient?: boolean
|
||
pass:[/**] @property max_expansions Maximum number of terms to which the query will expand. */
|
||
max_expansions?: <<integer>>
|
||
pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. */
|
||
minimum_should_match?: <<MinimumShouldMatch>>
|
||
pass:[/**] @property operator Boolean logic used to interpret text in the query value. */
|
||
operator?: <<QueryDslOperator>>
|
||
pass:[/**] @property prefix_length Number of beginning characters left unchanged for fuzzy matching. */
|
||
prefix_length?: <<integer>>
|
||
pass:[/**] @property query Text, number, boolean value or date you wish to find in the provided field. */
|
||
query: string
|
||
pass:[/**] @property slop Maximum number of positions allowed between matching tokens. */
|
||
slop?: <<integer>>
|
||
pass:[/**] @property tie_breaker Determines how scores for each per-term blended query and scores across groups are combined. */
|
||
tie_breaker?: <<double>>
|
||
pass:[/**] @property type How `the` multi_match query is executed internally. */
|
||
type?: <<QueryDslTextQueryType>>
|
||
pass:[/**] @property zero_terms_query Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */
|
||
zero_terms_query?: <<QueryDslZeroTermsQuery>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslMultiValueMode]]
|
||
=== QueryDslMultiValueMode
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslNestedQuery]]
|
||
=== QueryDslNestedQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslNestedQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property ignore_unmapped Indicates whether to ignore an unmapped path and not return any documents instead of an error. */
|
||
ignore_unmapped?: boolean
|
||
pass:[/**] @property inner_hits If defined, each search hit will contain inner hits. */
|
||
inner_hits?: <<SearchInnerHits>>
|
||
pass:[/**] @property path Path to the nested object you wish to search. */
|
||
path: <<Field>>
|
||
pass:[/**] @property query <<Query>> you wish to run on nested objects in the path. */
|
||
query: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property score_mode How scores for matching child objects affect the root parent document’s relevance score. */
|
||
score_mode?: <<QueryDslChildScoreMode>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslNumberRangeQuery]]
|
||
=== QueryDslNumberRangeQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslNumberRangeQuery extends <<QueryDslRangeQueryBase>><<<double>>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslNumericDecayFunction]]
|
||
=== QueryDslNumericDecayFunction
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslNumericDecayFunctionKeys extends <<QueryDslDecayFunctionBase>><<<double>>, <<double>>> {
|
||
}
|
||
type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys
|
||
& { [property: string]: <<QueryDslDecayPlacement>> | <<QueryDslMultiValueMode>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslOperator]]
|
||
=== QueryDslOperator
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslParentIdQuery]]
|
||
=== QueryDslParentIdQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslParentIdQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property id ID of the parent document. */
|
||
id?: <<Id>>
|
||
pass:[/**] @property ignore_unmapped Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */
|
||
ignore_unmapped?: boolean
|
||
pass:[/**] @property type <<Name>> of the child relationship mapped for the `join` field. */
|
||
type?: <<RelationName>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslPercolateQuery]]
|
||
=== QueryDslPercolateQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslPercolateQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property document The source of the document being percolated. */
|
||
document?: any
|
||
pass:[/**] @property documents An array of sources of the documents being percolated. */
|
||
documents?: any[]
|
||
pass:[/**] @property field <<Field>> that holds the indexed queries. The field must use the `percolator` mapping type. */
|
||
field: <<Field>>
|
||
pass:[/**] @property id The ID of a stored document to percolate. */
|
||
id?: <<Id>>
|
||
pass:[/**] @property index The index of a stored document to percolate. */
|
||
index?: <<IndexName>>
|
||
pass:[/**] @property name The suffix used for the `_percolator_document_slot` field when multiple `percolate` queries are specified. */
|
||
name?: string
|
||
pass:[/**] @property preference Preference used to fetch document to percolate. */
|
||
preference?: string
|
||
pass:[/**] @property routing <<Routing>> used to fetch document to percolate. */
|
||
routing?: <<Routing>>
|
||
pass:[/**] @property version The expected version of a stored document to percolate. */
|
||
version?: <<VersionNumber>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslPinnedDoc]]
|
||
=== QueryDslPinnedDoc
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslPinnedDoc {
|
||
pass:[/**] @property _id The unique document ID. */
|
||
_id: <<Id>>
|
||
pass:[/**] @property _index The index that contains the document. */
|
||
_index: <<IndexName>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslPinnedQuery]]
|
||
=== QueryDslPinnedQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslPinnedQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property organic Any choice of query used to rank documents which will be ranked below the "pinned" documents. */
|
||
organic: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property ids Document IDs listed in the order they are to appear in results. Required if `docs` is not specified. */
|
||
ids?: <<Id>>[]
|
||
pass:[/**] @property docs Documents listed in the order they are to appear in results. Required if `ids` is not specified. */
|
||
docs?: <<QueryDslPinnedDoc>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslPrefixQuery]]
|
||
=== QueryDslPrefixQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslPrefixQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property rewrite Method used to rewrite the query. */
|
||
rewrite?: <<MultiTermQueryRewrite>>
|
||
pass:[/**] @property value Beginning characters of terms you wish to find in the provided field. */
|
||
value: string
|
||
pass:[/**] @property case_insensitive Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. Default is `false` which means the case sensitivity of matching depends on the underlying field’s mapping. */
|
||
case_insensitive?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslQueryBase]]
|
||
=== QueryDslQueryBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslQueryBase {
|
||
pass:[/**] @property boost Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. */
|
||
boost?: <<float>>
|
||
_name?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslQueryContainer]]
|
||
=== QueryDslQueryContainer
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslQueryContainer {
|
||
pass:[/**] @property bool matches documents matching boolean combinations of other queries. */
|
||
bool?: <<QueryDslBoolQuery>>
|
||
pass:[/**] @property boosting Returns documents matching a `positive` query while reducing the relevance score of documents that also match a `negative` query. */
|
||
boosting?: <<QueryDslBoostingQuery>>
|
||
common?: Partial<Record<<<Field>>, <<QueryDslCommonTermsQuery>> | string>>
|
||
pass:[/**] @property combined_fields The `combined_fields` query supports searching multiple text fields as if their contents had been indexed into one combined field. */
|
||
combined_fields?: <<QueryDslCombinedFieldsQuery>>
|
||
pass:[/**] @property constant_score Wraps a filter query and returns every matching document with a relevance score equal to the `boost` parameter value. */
|
||
constant_score?: <<QueryDslConstantScoreQuery>>
|
||
pass:[/**] @property dis_max Returns documents matching one or more wrapped queries, called query clauses or clauses. If a returned document matches multiple query clauses, the `dis_max` query assigns the document the highest relevance score from any matching clause, plus a tie breaking increment for any additional matching subqueries. */
|
||
dis_max?: <<QueryDslDisMaxQuery>>
|
||
pass:[/**] @property distance_feature Boosts the relevance score of documents closer to a provided origin date or point. For example, you can use this query to give more weight to documents closer to a certain date or location. */
|
||
distance_feature?: <<QueryDslDistanceFeatureQuery>>
|
||
pass:[/**] @property exists Returns documents that contain an indexed value for a field. */
|
||
exists?: <<QueryDslExistsQuery>>
|
||
pass:[/**] @property function_score The `function_score` enables you to modify the score of documents that are retrieved by a query. */
|
||
function_score?: <<QueryDslFunctionScoreQuery>> | <<QueryDslFunctionScoreContainer>>[]
|
||
pass:[/**] @property fuzzy Returns documents that contain terms similar to the search term, as measured by a Levenshtein edit distance. */
|
||
fuzzy?: Partial<Record<<<Field>>, <<QueryDslFuzzyQuery>> | string | <<double>> | boolean>>
|
||
pass:[/**] @property geo_bounding_box Matches geo_point and geo_shape values that intersect a bounding box. */
|
||
geo_bounding_box?: <<QueryDslGeoBoundingBoxQuery>>
|
||
pass:[/**] @property geo_distance Matches `geo_point` and `geo_shape` values within a given distance of a geopoint. */
|
||
geo_distance?: <<QueryDslGeoDistanceQuery>>
|
||
geo_polygon?: <<QueryDslGeoPolygonQuery>>
|
||
pass:[/**] @property geo_shape <<Filter>> documents indexed using either the `geo_shape` or the `geo_point` type. */
|
||
geo_shape?: <<QueryDslGeoShapeQuery>>
|
||
pass:[/**] @property has_child Returns parent documents whose joined child documents match a provided query. */
|
||
has_child?: <<QueryDslHasChildQuery>>
|
||
pass:[/**] @property has_parent Returns child documents whose joined parent document matches a provided query. */
|
||
has_parent?: <<QueryDslHasParentQuery>>
|
||
pass:[/**] @property ids Returns documents based on their IDs. This query uses document IDs stored in the `_id` field. */
|
||
ids?: <<QueryDslIdsQuery>>
|
||
pass:[/**] @property intervals Returns documents based on the order and proximity of matching terms. */
|
||
intervals?: Partial<Record<<<Field>>, <<QueryDslIntervalsQuery>>>>
|
||
pass:[/**] @property knn Finds the k nearest vectors to a query vector, as measured by a similarity metric. knn query finds nearest vectors through approximate search on indexed dense_vectors. */
|
||
knn?: <<KnnQuery>>
|
||
pass:[/**] @property match Returns documents that match a provided text, number, date or boolean value. The provided text is analyzed before matching. */
|
||
match?: Partial<Record<<<Field>>, <<QueryDslMatchQuery>> | string | <<float>> | boolean>>
|
||
pass:[/**] @property match_all Matches all documents, giving them all a `_score` of 1.0. */
|
||
match_all?: <<QueryDslMatchAllQuery>>
|
||
pass:[/**] @property match_bool_prefix Analyzes its input and constructs a `bool` query from the terms. Each term except the last is used in a `term` query. The last term is used in a prefix query. */
|
||
match_bool_prefix?: Partial<Record<<<Field>>, <<QueryDslMatchBoolPrefixQuery>> | string>>
|
||
pass:[/**] @property match_none Matches no documents. */
|
||
match_none?: <<QueryDslMatchNoneQuery>>
|
||
pass:[/**] @property match_phrase Analyzes the text and creates a phrase query out of the analyzed text. */
|
||
match_phrase?: Partial<Record<<<Field>>, <<QueryDslMatchPhraseQuery>> | string>>
|
||
pass:[/**] @property match_phrase_prefix Returns documents that contain the words of a provided text, in the same order as provided. The last term of the provided text is treated as a prefix, matching any words that begin with that term. */
|
||
match_phrase_prefix?: Partial<Record<<<Field>>, <<QueryDslMatchPhrasePrefixQuery>> | string>>
|
||
pass:[/**] @property more_like_this Returns documents that are "like" a given set of documents. */
|
||
more_like_this?: <<QueryDslMoreLikeThisQuery>>
|
||
pass:[/**] @property multi_match Enables you to search for a provided text, number, date or boolean value across multiple fields. The provided text is analyzed before matching. */
|
||
multi_match?: <<QueryDslMultiMatchQuery>>
|
||
pass:[/**] @property nested Wraps another query to search nested fields. If an object matches the search, the nested query returns the root parent document. */
|
||
nested?: <<QueryDslNestedQuery>>
|
||
pass:[/**] @property parent_id Returns child documents joined to a specific parent document. */
|
||
parent_id?: <<QueryDslParentIdQuery>>
|
||
pass:[/**] @property percolate Matches queries stored in an index. */
|
||
percolate?: <<QueryDslPercolateQuery>>
|
||
pass:[/**] @property pinned Promotes selected documents to rank higher than those matching a given query. */
|
||
pinned?: <<QueryDslPinnedQuery>>
|
||
pass:[/**] @property prefix Returns documents that contain a specific prefix in a provided field. */
|
||
prefix?: Partial<Record<<<Field>>, <<QueryDslPrefixQuery>> | string>>
|
||
pass:[/**] @property query_string Returns documents based on a provided query string, using a parser with a strict syntax. */
|
||
query_string?: <<QueryDslQueryStringQuery>>
|
||
pass:[/**] @property range Returns documents that contain terms within a provided range. */
|
||
range?: Partial<Record<<<Field>>, <<QueryDslRangeQuery>>>>
|
||
pass:[/**] @property rank_feature Boosts the relevance score of documents based on the numeric value of a `rank_feature` or `rank_features` field. */
|
||
rank_feature?: <<QueryDslRankFeatureQuery>>
|
||
pass:[/**] @property regexp Returns documents that contain terms matching a regular expression. */
|
||
regexp?: Partial<Record<<<Field>>, <<QueryDslRegexpQuery>> | string>>
|
||
rule?: <<QueryDslRuleQuery>>
|
||
pass:[/**] @property script Filters documents based on a provided script. The script query is typically used in a filter context. */
|
||
script?: <<QueryDslScriptQuery>>
|
||
pass:[/**] @property script_score Uses a script to provide a custom score for returned documents. */
|
||
script_score?: <<QueryDslScriptScoreQuery>>
|
||
pass:[/**] @property semantic A semantic query to semantic_text field types */
|
||
semantic?: <<QueryDslSemanticQuery>>
|
||
pass:[/**] @property shape <<Queries>> documents that contain fields indexed using the `shape` type. */
|
||
shape?: <<QueryDslShapeQuery>>
|
||
pass:[/**] @property simple_query_string Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */
|
||
simple_query_string?: <<QueryDslSimpleQueryStringQuery>>
|
||
pass:[/**] @property span_containing Returns matches which enclose another span query. */
|
||
span_containing?: <<QueryDslSpanContainingQuery>>
|
||
pass:[/**] @property span_field_masking Wrapper to allow span queries to participate in composite single-field span queries by _lying_ about their search field. */
|
||
span_field_masking?: <<QueryDslSpanFieldMaskingQuery>>
|
||
pass:[/**] @property span_first Matches spans near the beginning of a field. */
|
||
span_first?: <<QueryDslSpanFirstQuery>>
|
||
pass:[/**] @property span_multi Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query) as a `span` query, so it can be nested. */
|
||
span_multi?: <<QueryDslSpanMultiTermQuery>>
|
||
pass:[/**] @property span_near Matches spans which are near one another. You can specify `slop`, the maximum number of intervening unmatched positions, as well as whether matches are required to be in-order. */
|
||
span_near?: <<QueryDslSpanNearQuery>>
|
||
pass:[/**] @property span_not Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens after (controlled by the parameter `post`) another span query. */
|
||
span_not?: <<QueryDslSpanNotQuery>>
|
||
pass:[/**] @property span_or Matches the union of its span clauses. */
|
||
span_or?: <<QueryDslSpanOrQuery>>
|
||
pass:[/**] @property span_term Matches spans containing a term. */
|
||
span_term?: Partial<Record<<<Field>>, <<QueryDslSpanTermQuery>> | string>>
|
||
pass:[/**] @property span_within Returns matches which are enclosed inside another span query. */
|
||
span_within?: <<QueryDslSpanWithinQuery>>
|
||
pass:[/**] @property sparse_vector Using input query vectors or a natural language processing model to convert a query into a list of token-weight pairs, queries against a sparse vector field. */
|
||
sparse_vector?: <<QueryDslSparseVectorQuery>>
|
||
pass:[/**] @property term Returns documents that contain an exact term in a provided field. To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */
|
||
term?: Partial<Record<<<Field>>, <<QueryDslTermQuery>> | <<FieldValue>>>>
|
||
pass:[/**] @property terms Returns documents that contain one or more exact terms in a provided field. To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */
|
||
terms?: <<QueryDslTermsQuery>>
|
||
pass:[/**] @property terms_set Returns documents that contain a minimum number of exact terms in a provided field. To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. */
|
||
terms_set?: Partial<Record<<<Field>>, <<QueryDslTermsSetQuery>>>>
|
||
pass:[/**] @property text_expansion Uses a natural language processing model to convert the query text into a list of token-weight pairs which are then used in a query against a sparse vector or rank features field. */
|
||
text_expansion?: Partial<Record<<<Field>>, <<QueryDslTextExpansionQuery>>>>
|
||
pass:[/**] @property weighted_tokens Supports returning text_expansion query results by sending in precomputed tokens with the query. */
|
||
weighted_tokens?: Partial<Record<<<Field>>, <<QueryDslWeightedTokensQuery>>>>
|
||
pass:[/**] @property wildcard Returns documents that contain terms matching a wildcard pattern. */
|
||
wildcard?: Partial<Record<<<Field>>, <<QueryDslWildcardQuery>> | string>>
|
||
pass:[/**] @property wrapper A query that accepts any other query as base64 encoded string. */
|
||
wrapper?: <<QueryDslWrapperQuery>>
|
||
type?: <<QueryDslTypeQuery>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslQueryStringQuery]]
|
||
=== QueryDslQueryStringQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslQueryStringQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property allow_leading_wildcard If `true`, the wildcard characters `*` and `?` are allowed as the first character of the query string. */
|
||
allow_leading_wildcard?: boolean
|
||
pass:[/**] @property analyzer <<Analyzer>> used to convert text in the query string into tokens. */
|
||
analyzer?: string
|
||
pass:[/**] @property analyze_wildcard If `true`, the query attempts to analyze wildcard terms in the query string. */
|
||
analyze_wildcard?: boolean
|
||
pass:[/**] @property auto_generate_synonyms_phrase_query If `true`, match phrase queries are automatically created for multi-term synonyms. */
|
||
auto_generate_synonyms_phrase_query?: boolean
|
||
pass:[/**] @property default_field Default field to search if no field is provided in the query string. Supports wildcards (`*`). Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */
|
||
default_field?: <<Field>>
|
||
pass:[/**] @property default_operator Default boolean logic used to interpret text in the query string if no operators are specified. */
|
||
default_operator?: <<QueryDslOperator>>
|
||
pass:[/**] @property enable_position_increments If `true`, enable position increments in queries constructed from a `query_string` search. */
|
||
enable_position_increments?: boolean
|
||
escape?: boolean
|
||
pass:[/**] @property fields Array of fields to search. Supports wildcards (`*`). */
|
||
fields?: <<Field>>[]
|
||
pass:[/**] @property fuzziness Maximum edit distance allowed for fuzzy matching. */
|
||
fuzziness?: <<Fuzziness>>
|
||
pass:[/**] @property fuzzy_max_expansions Maximum number of terms to which the query expands for fuzzy matching. */
|
||
fuzzy_max_expansions?: <<integer>>
|
||
pass:[/**] @property fuzzy_prefix_length Number of beginning characters left unchanged for fuzzy matching. */
|
||
fuzzy_prefix_length?: <<integer>>
|
||
pass:[/**] @property fuzzy_rewrite Method used to rewrite the query. */
|
||
fuzzy_rewrite?: <<MultiTermQueryRewrite>>
|
||
pass:[/**] @property fuzzy_transpositions If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */
|
||
fuzzy_transpositions?: boolean
|
||
pass:[/**] @property lenient If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */
|
||
lenient?: boolean
|
||
pass:[/**] @property max_determinized_states Maximum number of automaton states required for the query. */
|
||
max_determinized_states?: <<integer>>
|
||
pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. */
|
||
minimum_should_match?: <<MinimumShouldMatch>>
|
||
pass:[/**] @property phrase_slop Maximum number of positions allowed between matching tokens for phrases. */
|
||
phrase_slop?: <<double>>
|
||
pass:[/**] @property query <<Query>> string you wish to parse and use for search. */
|
||
query: string
|
||
pass:[/**] @property quote_analyzer <<Analyzer>> used to convert quoted text in the query string into tokens. For quoted text, this parameter overrides the analyzer specified in the `analyzer` parameter. */
|
||
quote_analyzer?: string
|
||
pass:[/**] @property quote_field_suffix Suffix appended to quoted text in the query string. You can use this suffix to use a different analysis method for exact matches. */
|
||
quote_field_suffix?: string
|
||
pass:[/**] @property rewrite Method used to rewrite the query. */
|
||
rewrite?: <<MultiTermQueryRewrite>>
|
||
pass:[/**] @property tie_breaker How to combine the queries generated from the individual search terms in the resulting `dis_max` query. */
|
||
tie_breaker?: <<double>>
|
||
pass:[/**] @property time_zone Coordinated Universal Time (UTC) offset or IANA time zone used to convert date values in the query string to UTC. */
|
||
time_zone?: <<TimeZone>>
|
||
pass:[/**] @property type Determines how the query matches and scores documents. */
|
||
type?: <<QueryDslTextQueryType>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRandomScoreFunction]]
|
||
=== QueryDslRandomScoreFunction
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslRandomScoreFunction {
|
||
field?: <<Field>>
|
||
seed?: <<long>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRangeQuery]]
|
||
=== QueryDslRangeQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslRangeQuery = <<QueryDslUntypedRangeQuery>> | <<QueryDslDateRangeQuery>> | <<QueryDslNumberRangeQuery>> | <<QueryDslTermRangeQuery>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRangeQueryBase]]
|
||
=== QueryDslRangeQueryBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslRangeQueryBase<T = unknown> extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property relation Indicates how the range query matches values for `range` fields. */
|
||
relation?: <<QueryDslRangeRelation>>
|
||
pass:[/**] @property gt Greater than. */
|
||
gt?: T
|
||
pass:[/**] @property gte Greater than or equal to. */
|
||
gte?: T
|
||
pass:[/**] @property lt Less than. */
|
||
lt?: T
|
||
pass:[/**] @property lte Less than or equal to. */
|
||
lte?: T
|
||
from?: T | null
|
||
to?: T | null
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRangeRelation]]
|
||
=== QueryDslRangeRelation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslRangeRelation = 'within' | 'contains' | 'intersects'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRankFeatureFunction]]
|
||
=== QueryDslRankFeatureFunction
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslRankFeatureFunction {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRankFeatureFunctionLinear]]
|
||
=== QueryDslRankFeatureFunctionLinear
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslRankFeatureFunctionLinear {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRankFeatureFunctionLogarithm]]
|
||
=== QueryDslRankFeatureFunctionLogarithm
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslRankFeatureFunctionLogarithm {
|
||
pass:[/**] @property scaling_factor Configurable scaling factor. */
|
||
scaling_factor: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRankFeatureFunctionSaturation]]
|
||
=== QueryDslRankFeatureFunctionSaturation
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslRankFeatureFunctionSaturation {
|
||
pass:[/**] @property pivot Configurable pivot value so that the result will be less than 0.5. */
|
||
pivot?: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRankFeatureFunctionSigmoid]]
|
||
=== QueryDslRankFeatureFunctionSigmoid
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslRankFeatureFunctionSigmoid {
|
||
pass:[/**] @property pivot Configurable pivot value so that the result will be less than 0.5. */
|
||
pivot: <<float>>
|
||
pass:[/**] @property exponent Configurable Exponent. */
|
||
exponent: <<float>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRankFeatureQuery]]
|
||
=== QueryDslRankFeatureQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslRankFeatureQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property field `rank_feature` or `rank_features` field used to boost relevance scores. */
|
||
field: <<Field>>
|
||
pass:[/**] @property saturation Saturation function used to boost relevance scores based on the value of the rank feature `field`. */
|
||
saturation?: <<QueryDslRankFeatureFunctionSaturation>>
|
||
pass:[/**] @property log Logarithmic function used to boost relevance scores based on the value of the rank feature `field`. */
|
||
log?: <<QueryDslRankFeatureFunctionLogarithm>>
|
||
pass:[/**] @property linear Linear function used to boost relevance scores based on the value of the rank feature `field`. */
|
||
linear?: <<QueryDslRankFeatureFunctionLinear>>
|
||
pass:[/**] @property sigmoid Sigmoid function used to boost relevance scores based on the value of the rank feature `field`. */
|
||
sigmoid?: <<QueryDslRankFeatureFunctionSigmoid>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRegexpQuery]]
|
||
=== QueryDslRegexpQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslRegexpQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property case_insensitive Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`. When `false`, case sensitivity of matching depends on the underlying field’s mapping. */
|
||
case_insensitive?: boolean
|
||
pass:[/**] @property flags Enables optional operators for the regular expression. */
|
||
flags?: string
|
||
pass:[/**] @property max_determinized_states Maximum number of automaton states required for the query. */
|
||
max_determinized_states?: <<integer>>
|
||
pass:[/**] @property rewrite Method used to rewrite the query. */
|
||
rewrite?: <<MultiTermQueryRewrite>>
|
||
pass:[/**] @property value Regular expression for terms you wish to find in the provided field. */
|
||
value: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslRuleQuery]]
|
||
=== QueryDslRuleQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslRuleQuery extends <<QueryDslQueryBase>> {
|
||
organic: <<QueryDslQueryContainer>>
|
||
ruleset_ids: <<Id>>[]
|
||
match_criteria: any
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslScriptQuery]]
|
||
=== QueryDslScriptQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslScriptQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property script Contains a script to run as a query. This script must return a boolean value, `true` or `false`. */
|
||
script: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslScriptScoreFunction]]
|
||
=== QueryDslScriptScoreFunction
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslScriptScoreFunction {
|
||
pass:[/**] @property script A script that computes a score. */
|
||
script: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslScriptScoreQuery]]
|
||
=== QueryDslScriptScoreQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslScriptScoreQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property min_score Documents with a score lower than this floating point number are excluded from the search results. */
|
||
min_score?: <<float>>
|
||
pass:[/**] @property query <<Query>> used to return documents. */
|
||
query: <<QueryDslQueryContainer>>
|
||
pass:[/**] @property script <<Script>> used to compute the score of documents returned by the query. Important: final relevance scores from the `script_score` query cannot be negative. */
|
||
script: <<Script>> | string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSemanticQuery]]
|
||
=== QueryDslSemanticQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSemanticQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property field The field to query, which must be a semantic_text field type */
|
||
field: string
|
||
pass:[/**] @property query The query text */
|
||
query: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslShapeFieldQuery]]
|
||
=== QueryDslShapeFieldQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslShapeFieldQuery {
|
||
pass:[/**] @property indexed_shape <<Queries>> using a pre-indexed shape. */
|
||
indexed_shape?: <<QueryDslFieldLookup>>
|
||
pass:[/**] @property relation Spatial relation between the query shape and the document shape. */
|
||
relation?: <<GeoShapeRelation>>
|
||
pass:[/**] @property shape <<Queries>> using an inline shape definition in GeoJSON or Well Known Text (WKT) format. */
|
||
shape?: <<GeoShape>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslShapeQuery]]
|
||
=== QueryDslShapeQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslShapeQueryKeys extends <<QueryDslQueryBase>> {
|
||
ignore_unmapped?: boolean
|
||
}
|
||
type QueryDslShapeQuery = QueryDslShapeQueryKeys
|
||
& { [property: string]: <<QueryDslShapeFieldQuery>> | boolean | <<float>> | string }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSimpleQueryStringFlag]]
|
||
=== QueryDslSimpleQueryStringFlag
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSimpleQueryStringFlags]]
|
||
=== QueryDslSimpleQueryStringFlags
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslSimpleQueryStringFlags = <<SpecUtilsPipeSeparatedFlags>><<<QueryDslSimpleQueryStringFlag>>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSimpleQueryStringQuery]]
|
||
=== QueryDslSimpleQueryStringQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSimpleQueryStringQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property analyzer <<Analyzer>> used to convert text in the query string into tokens. */
|
||
analyzer?: string
|
||
pass:[/**] @property analyze_wildcard If `true`, the query attempts to analyze wildcard terms in the query string. */
|
||
analyze_wildcard?: boolean
|
||
pass:[/**] @property auto_generate_synonyms_phrase_query If `true`, the parser creates a match_phrase query for each multi-position token. */
|
||
auto_generate_synonyms_phrase_query?: boolean
|
||
pass:[/**] @property default_operator Default boolean logic used to interpret text in the query string if no operators are specified. */
|
||
default_operator?: <<QueryDslOperator>>
|
||
pass:[/**] @property fields Array of fields you wish to search. Accepts wildcard expressions. You also can boost relevance scores for matches to particular fields using a caret (`^`) notation. Defaults to the `index.query.default_field index` setting, which has a default value of `*`. */
|
||
fields?: <<Field>>[]
|
||
pass:[/**] @property flags List of enabled operators for the simple query string syntax. */
|
||
flags?: <<QueryDslSimpleQueryStringFlags>>
|
||
pass:[/**] @property fuzzy_max_expansions Maximum number of terms to which the query expands for fuzzy matching. */
|
||
fuzzy_max_expansions?: <<integer>>
|
||
pass:[/**] @property fuzzy_prefix_length Number of beginning characters left unchanged for fuzzy matching. */
|
||
fuzzy_prefix_length?: <<integer>>
|
||
pass:[/**] @property fuzzy_transpositions If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */
|
||
fuzzy_transpositions?: boolean
|
||
pass:[/**] @property lenient If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */
|
||
lenient?: boolean
|
||
pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. */
|
||
minimum_should_match?: <<MinimumShouldMatch>>
|
||
pass:[/**] @property query <<Query>> string in the simple query string syntax you wish to parse and use for search. */
|
||
query: string
|
||
pass:[/**] @property quote_field_suffix Suffix appended to quoted text in the query string. */
|
||
quote_field_suffix?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanContainingQuery]]
|
||
=== QueryDslSpanContainingQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSpanContainingQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property big Can be any span query. Matching spans from `big` that contain matches from `little` are returned. */
|
||
big: <<QueryDslSpanQuery>>
|
||
pass:[/**] @property little Can be any span query. Matching spans from `big` that contain matches from `little` are returned. */
|
||
little: <<QueryDslSpanQuery>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanFieldMaskingQuery]]
|
||
=== QueryDslSpanFieldMaskingQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSpanFieldMaskingQuery extends <<QueryDslQueryBase>> {
|
||
field: <<Field>>
|
||
query: <<QueryDslSpanQuery>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanFirstQuery]]
|
||
=== QueryDslSpanFirstQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSpanFirstQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property end Controls the maximum end position permitted in a match. */
|
||
end: <<integer>>
|
||
pass:[/**] @property match Can be any other span type query. */
|
||
match: <<QueryDslSpanQuery>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanGapQuery]]
|
||
=== QueryDslSpanGapQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslSpanGapQuery = Partial<Record<<<Field>>, <<integer>>>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanMultiTermQuery]]
|
||
=== QueryDslSpanMultiTermQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSpanMultiTermQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property match Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). */
|
||
match: <<QueryDslQueryContainer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanNearQuery]]
|
||
=== QueryDslSpanNearQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSpanNearQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property clauses Array of one or more other span type queries. */
|
||
clauses: <<QueryDslSpanQuery>>[]
|
||
pass:[/**] @property in_order Controls whether matches are required to be in-order. */
|
||
in_order?: boolean
|
||
pass:[/**] @property slop Controls the maximum number of intervening unmatched positions permitted. */
|
||
slop?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanNotQuery]]
|
||
=== QueryDslSpanNotQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSpanNotQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property dist The number of tokens from within the include span that can’t have overlap with the exclude span. Equivalent to setting both `pre` and `post`. */
|
||
dist?: <<integer>>
|
||
pass:[/**] @property exclude Span query whose matches must not overlap those returned. */
|
||
exclude: <<QueryDslSpanQuery>>
|
||
pass:[/**] @property include Span query whose matches are filtered. */
|
||
include: <<QueryDslSpanQuery>>
|
||
pass:[/**] @property post The number of tokens after the include span that can’t have overlap with the exclude span. */
|
||
post?: <<integer>>
|
||
pass:[/**] @property pre The number of tokens before the include span that can’t have overlap with the exclude span. */
|
||
pre?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanOrQuery]]
|
||
=== QueryDslSpanOrQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSpanOrQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property clauses Array of one or more other span type queries. */
|
||
clauses: <<QueryDslSpanQuery>>[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanQuery]]
|
||
=== QueryDslSpanQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSpanQuery {
|
||
pass:[/**] @property span_containing Accepts a list of span queries, but only returns those spans which also match a second span query. */
|
||
span_containing?: <<QueryDslSpanContainingQuery>>
|
||
pass:[/**] @property span_field_masking Allows queries like `span_near` or `span_or` across different fields. */
|
||
span_field_masking?: <<QueryDslSpanFieldMaskingQuery>>
|
||
pass:[/**] @property span_first Accepts another span query whose matches must appear within the first N positions of the field. */
|
||
span_first?: <<QueryDslSpanFirstQuery>>
|
||
span_gap?: <<QueryDslSpanGapQuery>>
|
||
pass:[/**] @property span_multi Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. */
|
||
span_multi?: <<QueryDslSpanMultiTermQuery>>
|
||
pass:[/**] @property span_near Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. */
|
||
span_near?: <<QueryDslSpanNearQuery>>
|
||
pass:[/**] @property span_not Wraps another span query, and excludes any documents which match that query. */
|
||
span_not?: <<QueryDslSpanNotQuery>>
|
||
pass:[/**] @property span_or Combines multiple span queries and returns documents which match any of the specified queries. */
|
||
span_or?: <<QueryDslSpanOrQuery>>
|
||
pass:[/**] @property span_term The equivalent of the `term` query but for use with other span queries. */
|
||
span_term?: Partial<Record<<<Field>>, <<QueryDslSpanTermQuery>> | string>>
|
||
pass:[/**] @property span_within The result from a single span query is returned as <<long>> is its span falls within the spans returned by a list of other span queries. */
|
||
span_within?: <<QueryDslSpanWithinQuery>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanTermQuery]]
|
||
=== QueryDslSpanTermQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSpanTermQuery extends <<QueryDslQueryBase>> {
|
||
value: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSpanWithinQuery]]
|
||
=== QueryDslSpanWithinQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSpanWithinQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property big Can be any span query. Matching spans from `little` that are enclosed within `big` are returned. */
|
||
big: <<QueryDslSpanQuery>>
|
||
pass:[/**] @property little Can be any span query. Matching spans from `little` that are enclosed within `big` are returned. */
|
||
little: <<QueryDslSpanQuery>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslSparseVectorQuery]]
|
||
=== QueryDslSparseVectorQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslSparseVectorQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property field The name of the field that contains the token-weight pairs to be searched against. This field must be a mapped sparse_vector field. */
|
||
field: <<Field>>
|
||
pass:[/**] @property query_vector Dictionary of precomputed sparse vectors and their associated weights. Only one of inference_id or query_vector may be supplied in a request. */
|
||
query_vector?: Record<string, <<float>>>
|
||
pass:[/**] @property inference_id The inference ID to use to convert the query text into token-weight pairs. It must be the same inference ID that was used to create the tokens from the input text. Only one of inference_id and query_vector is allowed. If inference_id is specified, query must also be specified. Only one of inference_id or query_vector may be supplied in a request. */
|
||
inference_id?: <<Id>>
|
||
pass:[/**] @property query The query text you want to use for search. If inference_id is specified, query must also be specified. */
|
||
query?: string
|
||
pass:[/**] @property prune Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. Default: false */
|
||
prune?: boolean
|
||
pass:[/**] @property pruning_config Optional pruning configuration. If enabled, this will omit non-significant tokens from the query in order to improve query performance. This is only used if prune is set to true. If prune is set to true but pruning_config is not specified, default values will be used. */
|
||
pruning_config?: <<QueryDslTokenPruningConfig>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslTermQuery]]
|
||
=== QueryDslTermQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslTermQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property value <<Term>> you wish to find in the provided field. */
|
||
value: <<FieldValue>>
|
||
pass:[/**] @property case_insensitive Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. When `false`, the case sensitivity of matching depends on the underlying field’s mapping. */
|
||
case_insensitive?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslTermRangeQuery]]
|
||
=== QueryDslTermRangeQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslTermRangeQuery extends <<QueryDslRangeQueryBase>><string> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslTermsLookup]]
|
||
=== QueryDslTermsLookup
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslTermsLookup {
|
||
index: <<IndexName>>
|
||
id: <<Id>>
|
||
path: <<Field>>
|
||
routing?: <<Routing>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslTermsQuery]]
|
||
=== QueryDslTermsQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslTermsQueryKeys extends <<QueryDslQueryBase>> {
|
||
}
|
||
type QueryDslTermsQuery = QueryDslTermsQueryKeys
|
||
& { [property: string]: <<QueryDslTermsQueryField>> | <<float>> | string }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslTermsQueryField]]
|
||
=== QueryDslTermsQueryField
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslTermsQueryField = <<FieldValue>>[] | <<QueryDslTermsLookup>>
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslTermsSetQuery]]
|
||
=== QueryDslTermsSetQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslTermsSetQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property minimum_should_match Specification describing number of matching terms required to return a document. */
|
||
minimum_should_match?: <<MinimumShouldMatch>>
|
||
pass:[/**] @property minimum_should_match_field Numeric field containing the number of matching terms required to return a document. */
|
||
minimum_should_match_field?: <<Field>>
|
||
pass:[/**] @property minimum_should_match_script Custom script containing the number of matching terms required to return a document. */
|
||
minimum_should_match_script?: <<Script>> | string
|
||
pass:[/**] @property terms Array of terms you wish to find in the provided field. */
|
||
terms: string[]
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslTextExpansionQuery]]
|
||
=== QueryDslTextExpansionQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslTextExpansionQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property model_id The text expansion NLP model to use */
|
||
model_id: string
|
||
pass:[/**] @property model_text The query text */
|
||
model_text: string
|
||
pass:[/**] @property pruning_config <<Token>> pruning configurations */
|
||
pruning_config?: <<QueryDslTokenPruningConfig>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslTextQueryType]]
|
||
=== QueryDslTextQueryType
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslTokenPruningConfig]]
|
||
=== QueryDslTokenPruningConfig
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslTokenPruningConfig {
|
||
pass:[/**] @property tokens_freq_ratio_threshold Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */
|
||
tokens_freq_ratio_threshold?: <<integer>>
|
||
pass:[/**] @property tokens_weight_threshold Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */
|
||
tokens_weight_threshold?: <<float>>
|
||
pass:[/**] @property only_score_pruned_tokens Whether to only score pruned tokens, vs only scoring kept tokens. */
|
||
only_score_pruned_tokens?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslTypeQuery]]
|
||
=== QueryDslTypeQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslTypeQuery extends <<QueryDslQueryBase>> {
|
||
value: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslUntypedDecayFunction]]
|
||
=== QueryDslUntypedDecayFunction
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslUntypedDecayFunctionKeys extends <<QueryDslDecayFunctionBase>><any, any> {
|
||
}
|
||
type QueryDslUntypedDecayFunction = QueryDslUntypedDecayFunctionKeys
|
||
& { [property: string]: <<QueryDslDecayPlacement>> | <<QueryDslMultiValueMode>> }
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslUntypedDistanceFeatureQuery]]
|
||
=== QueryDslUntypedDistanceFeatureQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslUntypedDistanceFeatureQuery extends <<QueryDslDistanceFeatureQueryBase>><any, any> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslUntypedRangeQuery]]
|
||
=== QueryDslUntypedRangeQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslUntypedRangeQuery extends <<QueryDslRangeQueryBase>><any> {
|
||
pass:[/**] @property format Date format used to convert `date` values in the query. */
|
||
format?: <<DateFormat>>
|
||
pass:[/**] @property time_zone Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */
|
||
time_zone?: <<TimeZone>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslWeightedTokensQuery]]
|
||
=== QueryDslWeightedTokensQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslWeightedTokensQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property tokens The tokens representing this query */
|
||
tokens: Record<string, <<float>>>
|
||
pass:[/**] @property pruning_config <<Token>> pruning configurations */
|
||
pruning_config?: <<QueryDslTokenPruningConfig>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslWildcardQuery]]
|
||
=== QueryDslWildcardQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslWildcardQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property case_insensitive Allows case insensitive matching of the pattern with the indexed field values when set to true. Default is false which means the case sensitivity of matching depends on the underlying field’s mapping. */
|
||
case_insensitive?: boolean
|
||
pass:[/**] @property rewrite Method used to rewrite the query. */
|
||
rewrite?: <<MultiTermQueryRewrite>>
|
||
pass:[/**] @property value Wildcard pattern for terms you wish to find in the provided field. Required, when wildcard is not set. */
|
||
value?: string
|
||
pass:[/**] @property wildcard Wildcard pattern for terms you wish to find in the provided field. Required, when value is not set. */
|
||
wildcard?: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslWrapperQuery]]
|
||
=== QueryDslWrapperQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface QueryDslWrapperQuery extends <<QueryDslQueryBase>> {
|
||
pass:[/**] @property query A base64 encoded query. The binary data format can be any of JSON, YAML, CBOR or SMILE encodings */
|
||
query: string
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[QueryDslZeroTermsQuery]]
|
||
=== QueryDslZeroTermsQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
type QueryDslZeroTermsQuery = 'all' | 'none'
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|