84 lines
4.2 KiB
Plaintext
84 lines
4.2 KiB
Plaintext
[[reference-rank_eval]]
|
|
== client.rankEval
|
|
|
|
////////
|
|
===========================================================================================================================
|
|
|| ||
|
|
|| ||
|
|
|| ||
|
|
|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ ||
|
|
|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ ||
|
|
|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ ||
|
|
|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ ||
|
|
|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ ||
|
|
|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ ||
|
|
|| ||
|
|
|| ||
|
|
|| This file is autogenerated, DO NOT send pull requests that changes this file directly. ||
|
|
|| You should update the script that does the generation, which can be found in: ||
|
|
|| https://github.com/elastic/elastic-client-generator-js ||
|
|
|| ||
|
|
|| You can run the script with the following command: ||
|
|
|| npm run elasticsearch -- --version <version> ||
|
|
|| ||
|
|
|| ||
|
|
|| ||
|
|
===========================================================================================================================
|
|
////////
|
|
++++
|
|
<style>
|
|
.lang-ts a.xref {
|
|
text-decoration: underline !important;
|
|
}
|
|
</style>
|
|
++++
|
|
|
|
|
|
[discrete]
|
|
[[client.rankEval]]
|
|
== `client.rankEval()`
|
|
|
|
Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries.
|
|
|
|
{ref}/search-rank-eval.html[{es} documentation]
|
|
[discrete]
|
|
=== Function signature
|
|
|
|
[source,ts]
|
|
----
|
|
(request: RankEvalRequest, options?: TransportRequestOptions) => Promise<RankEvalResponse>
|
|
----
|
|
|
|
[discrete]
|
|
=== Request
|
|
|
|
[source,ts,subs=+macros]
|
|
----
|
|
interface RankEvalRequest extends <<RequestBase>> {
|
|
index?: <<Indices>>
|
|
allow_no_indices?: boolean
|
|
expand_wildcards?: <<ExpandWildcards>>
|
|
ignore_unavailable?: boolean
|
|
search_type?: string
|
|
requests: <<RankEvalRankEvalRequestItem>>[]
|
|
metric?: <<RankEvalRankEvalMetric>>
|
|
}
|
|
|
|
----
|
|
|
|
|
|
[discrete]
|
|
=== Response
|
|
|
|
[source,ts,subs=+macros]
|
|
----
|
|
interface RankEvalResponse {
|
|
metric_score: <<double>>
|
|
details: Record<<<Id>>, <<RankEvalRankEvalMetricDetail>>>
|
|
failures: Record<string, any>
|
|
}
|
|
|
|
----
|
|
|
|
|