345 lines
9.8 KiB
Plaintext
345 lines
9.8 KiB
Plaintext
[[reference-shared-types-rank_eval]]
|
||
|
||
////////
|
||
===========================================================================================================================
|
||
|| ||
|
||
|| ||
|
||
|| ||
|
||
|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ ||
|
||
|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ ||
|
||
|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ ||
|
||
|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ ||
|
||
|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ ||
|
||
|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ ||
|
||
|| ||
|
||
|| ||
|
||
|| This file is autogenerated, DO NOT send pull requests that changes this file directly. ||
|
||
|| You should update the script that does the generation, which can be found in: ||
|
||
|| https://github.com/elastic/elastic-client-generator-js ||
|
||
|| ||
|
||
|| You can run the script with the following command: ||
|
||
|| npm run elasticsearch -- --version <version> ||
|
||
|| ||
|
||
|| ||
|
||
|| ||
|
||
===========================================================================================================================
|
||
////////
|
||
|
||
|
||
|
||
== Shared RankEval types
|
||
|
||
|
||
[discrete]
|
||
[[RankEvalDocumentRating]]
|
||
=== RankEvalDocumentRating
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalDocumentRating {
|
||
pass:[/**] @property _id The document ID. */
|
||
_id: <<Id>>
|
||
pass:[/**] @property _index The document’s index. For data streams, this should be the document’s backing index. */
|
||
_index: <<IndexName>>
|
||
pass:[/**] @property rating The document’s relevance with regard to this search request. */
|
||
rating: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalHit]]
|
||
=== RankEvalRankEvalHit
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalHit {
|
||
_id: <<Id>>
|
||
_index: <<IndexName>>
|
||
_score: <<double>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalHitItem]]
|
||
=== RankEvalRankEvalHitItem
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalHitItem {
|
||
hit: <<RankEvalRankEvalHit>>
|
||
rating?: <<double>> | null
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalMetric]]
|
||
=== RankEvalRankEvalMetric
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalMetric {
|
||
precision?: <<RankEvalRankEvalMetricPrecision>>
|
||
recall?: <<RankEvalRankEvalMetricRecall>>
|
||
mean_reciprocal_rank?: <<RankEvalRankEvalMetricMeanReciprocalRank>>
|
||
dcg?: <<RankEvalRankEvalMetricDiscountedCumulativeGain>>
|
||
expected_reciprocal_rank?: <<RankEvalRankEvalMetricExpectedReciprocalRank>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalMetricBase]]
|
||
=== RankEvalRankEvalMetricBase
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalMetricBase {
|
||
pass:[/**] @property k Sets the maximum number of documents retrieved per query. This value will act in place of the usual size parameter in the query. */
|
||
k?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalMetricDetail]]
|
||
=== RankEvalRankEvalMetricDetail
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalMetricDetail {
|
||
pass:[/**] @property metric_score The metric_score in the details section shows the contribution of this query to the global quality metric score */
|
||
metric_score: <<double>>
|
||
pass:[/**] @property unrated_docs The unrated_docs section contains an _index and _id entry for each document in the search result for this query that didn’t have a ratings value. This can be used to ask the user to supply ratings for these documents */
|
||
unrated_docs: <<RankEvalUnratedDocument>>[]
|
||
pass:[/**] @property hits The hits section shows a grouping of the search results with their supplied ratings */
|
||
hits: <<RankEvalRankEvalHitItem>>[]
|
||
pass:[/**] @property metric_details The metric_details give additional information about the calculated quality metric (e.g. how many of the retrieved documents were relevant). The content varies for each metric but allows for better interpretation of the results */
|
||
metric_details: Record<string, Record<string, any>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalMetricDiscountedCumulativeGain]]
|
||
=== RankEvalRankEvalMetricDiscountedCumulativeGain
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalMetricDiscountedCumulativeGain extends <<RankEvalRankEvalMetricBase>> {
|
||
pass:[/**] @property normalize If set to true, this metric will calculate the Normalized DCG. */
|
||
normalize?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalMetricExpectedReciprocalRank]]
|
||
=== RankEvalRankEvalMetricExpectedReciprocalRank
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalMetricExpectedReciprocalRank extends <<RankEvalRankEvalMetricBase>> {
|
||
pass:[/**] @property maximum_relevance The highest relevance grade used in the user-supplied relevance judgments. */
|
||
maximum_relevance: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalMetricMeanReciprocalRank]]
|
||
=== RankEvalRankEvalMetricMeanReciprocalRank
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalMetricMeanReciprocalRank extends <<RankEvalRankEvalMetricRatingTreshold>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalMetricPrecision]]
|
||
=== RankEvalRankEvalMetricPrecision
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalMetricPrecision extends <<RankEvalRankEvalMetricRatingTreshold>> {
|
||
pass:[/**] @property ignore_unlabeled Controls how unlabeled documents in the search results are counted. If set to true, unlabeled documents are ignored and neither count as relevant or irrelevant. Set to false (the default), they are treated as irrelevant. */
|
||
ignore_unlabeled?: boolean
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalMetricRatingTreshold]]
|
||
=== RankEvalRankEvalMetricRatingTreshold
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalMetricRatingTreshold extends <<RankEvalRankEvalMetricBase>> {
|
||
pass:[/**] @property relevant_rating_threshold Sets the rating threshold above which documents are considered to be "relevant". */
|
||
relevant_rating_threshold?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalMetricRecall]]
|
||
=== RankEvalRankEvalMetricRecall
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalMetricRecall extends <<RankEvalRankEvalMetricRatingTreshold>> {
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalQuery]]
|
||
=== RankEvalRankEvalQuery
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalQuery {
|
||
query: <<QueryDslQueryContainer>>
|
||
size?: <<integer>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRankEvalRequestItem]]
|
||
=== RankEvalRankEvalRequestItem
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRankEvalRequestItem {
|
||
pass:[/**] @property id The search request’s ID, used to group result details later. */
|
||
id: <<Id>>
|
||
pass:[/**] @property request The query being evaluated. */
|
||
request?: <<RankEvalRankEvalQuery>> | <<QueryDslQueryContainer>>
|
||
pass:[/**] @property ratings List of document ratings */
|
||
ratings: <<RankEvalDocumentRating>>[]
|
||
pass:[/**] @property template_id The search template <<Id>> */
|
||
template_id?: <<Id>>
|
||
pass:[/**] @property params The search template parameters. */
|
||
params?: Record<string, any>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalRequest]]
|
||
=== RankEvalRequest
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalRequest extends <<RequestBase>> {
|
||
index?: <<Indices>>
|
||
allow_no_indices?: boolean
|
||
expand_wildcards?: <<ExpandWildcards>>
|
||
ignore_unavailable?: boolean
|
||
search_type?: string
|
||
requests: <<RankEvalRankEvalRequestItem>>[]
|
||
metric?: <<RankEvalRankEvalMetric>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalResponse]]
|
||
=== RankEvalResponse
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalResponse {
|
||
metric_score: <<double>>
|
||
details: Record<<<Id>>, <<RankEvalRankEvalMetricDetail>>>
|
||
failures: Record<string, any>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|
||
|
||
[discrete]
|
||
[[RankEvalUnratedDocument]]
|
||
=== RankEvalUnratedDocument
|
||
|
||
[pass]
|
||
++++
|
||
<pre>
|
||
++++
|
||
interface RankEvalUnratedDocument {
|
||
_id: <<Id>>
|
||
_index: <<IndexName>>
|
||
}
|
||
[pass]
|
||
++++
|
||
</pre>
|
||
++++
|