Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 496161acdf | |||
| 542c377459 | |||
| 2ea3979e95 | |||
| 5603482c43 | |||
| ba2955947e | |||
| 768295bdb0 | |||
| b159d474b0 | |||
| bda15ca3ca | |||
| 2cc0fd4df9 | |||
| 603e4695cb | |||
| 2c6e0ddb62 | |||
| de27dd9697 | |||
| 295553c249 |
@ -1,13 +1,13 @@
|
||||
---
|
||||
- job:
|
||||
name: elastic+elasticsearch-js+8.2
|
||||
display-name: 'elastic / elasticsearch-js # 8.2'
|
||||
description: Testing the elasticsearch-js 8.2 branch.
|
||||
name: elastic+elasticsearch-js+8.0
|
||||
display-name: 'elastic / elasticsearch-js # 8.0'
|
||||
description: Testing the elasticsearch-js 8.0 branch.
|
||||
junit_results: "*-junit.xml"
|
||||
parameters:
|
||||
- string:
|
||||
name: branch_specifier
|
||||
default: refs/heads/8.2
|
||||
default: refs/heads/8.0
|
||||
description: the Git branch specifier to build (<branchName>, <tagName>,
|
||||
<commitId>, etc.)
|
||||
triggers:
|
||||
@ -1,6 +1,6 @@
|
||||
---
|
||||
STACK_VERSION:
|
||||
- "8.3.3-SNAPSHOT"
|
||||
- "8.2.0-SNAPSHOT"
|
||||
|
||||
NODE_JS_VERSION:
|
||||
- 18
|
||||
|
||||
@ -18,7 +18,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/8.2/release-notes-8.2.1.
|
||||
|
||||
The previous release contained a bug that broken ndjson APIs.
|
||||
We have released `v8.2.0-patch.1` to address this.
|
||||
This fix is the same as the one we have released and we strongly recommend upgrading to this version.
|
||||
This fix is the same as the one we have released and we storngly recommend upgrading to this version.
|
||||
|
||||
[discrete]
|
||||
===== Fix node shutdown apis https://github.com/elastic/elasticsearch-js/pull/1697[#1697]
|
||||
|
||||
@ -77,8 +77,8 @@ async function run () {
|
||||
// fix the document before to try it again.
|
||||
status: action[operation].status,
|
||||
error: action[operation].error,
|
||||
operation: operations[i * 2],
|
||||
document: operations[i * 2 + 1]
|
||||
operation: body[i * 2],
|
||||
document: body[i * 2 + 1]
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
= Elasticsearch JavaScript Client
|
||||
|
||||
include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[]
|
||||
:branch: master
|
||||
include::{asciidoc-dir}/../../shared/attributes.asciidoc[]
|
||||
|
||||
include::introduction.asciidoc[]
|
||||
|
||||
@ -377,9 +377,9 @@ child.search({
|
||||
To improve observability, the client offers an easy way to configure the
|
||||
`X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this
|
||||
allows you to discover this identifier in the
|
||||
https://www.elastic.co/guide/en/elasticsearch/reference/8.3/logging.html#deprecation-logging[deprecation logs],
|
||||
helps you with https://www.elastic.co/guide/en/elasticsearch/reference/8.3/index-modules-slowlog.html#_identifying_search_slow_log_origin[identifying search slow log origin]
|
||||
as well as https://www.elastic.co/guide/en/elasticsearch/reference/8.3/tasks.html#_identifying_running_tasks[identifying running tasks].
|
||||
https://www.elastic.co/guide/en/elasticsearch/reference/master/logging.html#deprecation-logging[deprecation logs],
|
||||
helps you with https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-slowlog.html#_identifying_search_slow_log_origin[identifying search slow log origin]
|
||||
as well as https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html#_identifying_running_tasks[identifying running tasks].
|
||||
|
||||
The `X-Opaque-Id` should be configured in each request, for doing that you can
|
||||
use the `opaqueId` option, as you can see in the following example. The
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@elastic/elasticsearch",
|
||||
"version": "8.3.3",
|
||||
"versionCanary": "8.3.3-canary.0",
|
||||
"version": "8.2.1",
|
||||
"versionCanary": "8.2.1-canary.0",
|
||||
"description": "The official Elasticsearch client for Node.js",
|
||||
"main": "index.js",
|
||||
"types": "index.d.ts",
|
||||
@ -91,4 +91,4 @@
|
||||
"coverage": false,
|
||||
"check-coverage": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,7 +91,7 @@ export default class Internal {
|
||||
async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
|
||||
async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<T.TODO>
|
||||
async health (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<any> {
|
||||
const acceptedPath: string[] = []
|
||||
const acceptedPath: string[] = ['component', 'feature']
|
||||
const querystring: Record<string, any> = {}
|
||||
const body = undefined
|
||||
|
||||
@ -104,18 +104,8 @@ export default class Internal {
|
||||
}
|
||||
}
|
||||
|
||||
let method = ''
|
||||
let path = ''
|
||||
if (params.component != null && params.feature != null) {
|
||||
method = 'GET'
|
||||
path = `/_internal/_health/${encodeURIComponent(params.component.toString())}/${encodeURIComponent(params.feature.toString())}`
|
||||
} else if (params.component != null) {
|
||||
method = 'GET'
|
||||
path = `/_internal/_health/${encodeURIComponent(params.component.toString())}`
|
||||
} else {
|
||||
method = 'GET'
|
||||
path = '/_internal/_health'
|
||||
}
|
||||
const method = 'GET'
|
||||
const path = '/_internal/_health'
|
||||
return await this.transport.request({ path, method, querystring, body }, options)
|
||||
}
|
||||
|
||||
|
||||
@ -122,7 +122,7 @@ export default class Enrich {
|
||||
async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise<T.EnrichPutPolicyResponse>
|
||||
async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const acceptedPath: string[] = ['name']
|
||||
const acceptedBody: string[] = ['geo_match', 'match', 'range']
|
||||
const acceptedBody: string[] = ['geo_match', 'match']
|
||||
const querystring: Record<string, any> = {}
|
||||
// @ts-expect-error
|
||||
const userBody: any = params?.body
|
||||
|
||||
@ -1158,10 +1158,10 @@ export default class Ml {
|
||||
return await this.transport.request({ path, method, querystring, body }, options)
|
||||
}
|
||||
|
||||
async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlInferTrainedModelResponse>
|
||||
async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.MlInferTrainedModelResponse, unknown>>
|
||||
async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise<T.MlInferTrainedModelResponse>
|
||||
async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlInferTrainedModelDeploymentResponse>
|
||||
async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.MlInferTrainedModelDeploymentResponse, unknown>>
|
||||
async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise<T.MlInferTrainedModelDeploymentResponse>
|
||||
async inferTrainedModelDeployment (this: That, params: T.MlInferTrainedModelDeploymentRequest | TB.MlInferTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const acceptedPath: string[] = ['model_id']
|
||||
const acceptedBody: string[] = ['docs', 'inference_config']
|
||||
const querystring: Record<string, any> = {}
|
||||
@ -1188,7 +1188,7 @@ export default class Ml {
|
||||
}
|
||||
|
||||
const method = 'POST'
|
||||
const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/_infer`
|
||||
const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_infer`
|
||||
return await this.transport.request({ path, method, querystring, body }, options)
|
||||
}
|
||||
|
||||
|
||||
@ -8495,7 +8495,6 @@ export interface DanglingIndicesListDanglingIndicesResponse {
|
||||
export interface EnrichConfiguration {
|
||||
geo_match?: EnrichPolicy
|
||||
match: EnrichPolicy
|
||||
range: EnrichPolicy
|
||||
}
|
||||
|
||||
export interface EnrichPolicy {
|
||||
@ -8544,7 +8543,6 @@ export interface EnrichPutPolicyRequest extends RequestBase {
|
||||
name: Name
|
||||
geo_match?: EnrichPolicy
|
||||
match?: EnrichPolicy
|
||||
range?: EnrichPolicy
|
||||
}
|
||||
|
||||
export type EnrichPutPolicyResponse = AcknowledgedResponseBase
|
||||
@ -11867,31 +11865,6 @@ export interface MlInferenceConfigCreateContainer {
|
||||
ner?: MlNerInferenceOptions
|
||||
pass_through?: MlPassThroughInferenceOptions
|
||||
text_embedding?: MlTextEmbeddingInferenceOptions
|
||||
question_answering?: MlQuestionAnsweringInferenceOptions
|
||||
}
|
||||
|
||||
export interface MlInferenceConfigUpdateContainer {
|
||||
regression?: MlRegressionInferenceOptions
|
||||
classification?: MlClassificationInferenceOptions
|
||||
text_classification?: MlTextClassificationInferenceUpdateOptions
|
||||
zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions
|
||||
fill_mask?: MlFillMaskInferenceUpdateOptions
|
||||
ner?: MlNerInferenceUpdateOptions
|
||||
pass_through?: MlPassThroughInferenceUpdateOptions
|
||||
text_embedding?: MlTextEmbeddingInferenceUpdateOptions
|
||||
question_answering?: MlQuestionAnsweringInferenceUpdateOptions
|
||||
}
|
||||
|
||||
export interface MlInferenceResponseResult {
|
||||
entities?: MlTrainedModelEntities[]
|
||||
is_truncated?: boolean
|
||||
predicted_value?: MlPredictedValue[]
|
||||
predicted_value_sequence?: string
|
||||
prediction_probability?: double
|
||||
prediction_score?: double
|
||||
top_classes?: MlTopClassEntry[]
|
||||
warning?: string
|
||||
feature_importance?: MlTrainedModelInferenceFeatureImportance[]
|
||||
}
|
||||
|
||||
export interface MlInfluence {
|
||||
@ -12072,6 +12045,15 @@ export interface MlNlpBertTokenizationConfig {
|
||||
span?: integer
|
||||
}
|
||||
|
||||
export interface MlNlpInferenceConfigUpdateContainer {
|
||||
text_classification?: MlTextClassificationInferenceUpdateOptions
|
||||
zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions
|
||||
fill_mask?: MlFillMaskInferenceUpdateOptions
|
||||
ner?: MlNerInferenceUpdateOptions
|
||||
pass_through?: MlPassThroughInferenceUpdateOptions
|
||||
text_embedding?: MlTextEmbeddingInferenceUpdateOptions
|
||||
}
|
||||
|
||||
export interface MlNlpRobertaTokenizationConfig {
|
||||
add_prefix_space?: boolean
|
||||
with_special_tokens?: boolean
|
||||
@ -12128,22 +12110,7 @@ export interface MlPerPartitionCategorization {
|
||||
stop_on_warn?: boolean
|
||||
}
|
||||
|
||||
export type MlPredictedValue = string | double | boolean | integer
|
||||
|
||||
export interface MlQuestionAnsweringInferenceOptions {
|
||||
num_top_classes?: integer
|
||||
tokenization?: MlTokenizationConfigContainer
|
||||
results_field?: string
|
||||
max_answer_length?: integer
|
||||
}
|
||||
|
||||
export interface MlQuestionAnsweringInferenceUpdateOptions {
|
||||
question: string
|
||||
num_top_classes?: integer
|
||||
tokenization?: MlNlpTokenizationUpdateOptions
|
||||
results_field?: string
|
||||
max_answer_length?: integer
|
||||
}
|
||||
export type MlPredictedValue = string | double
|
||||
|
||||
export interface MlRegressionInferenceOptions {
|
||||
results_field?: Field
|
||||
@ -12283,14 +12250,14 @@ export interface MlTrainedModelDeploymentNodesStats {
|
||||
average_inference_time_ms: double
|
||||
error_count: integer
|
||||
inference_count: integer
|
||||
inference_threads: integer
|
||||
last_access: long
|
||||
model_threads: integer
|
||||
node: MlDiscoveryNode
|
||||
number_of_allocations: integer
|
||||
number_of_pending_requests: integer
|
||||
rejection_execution_count: integer
|
||||
routing_state: MlTrainedModelAllocationRoutingTable
|
||||
start_time: long
|
||||
threads_per_allocation: integer
|
||||
timeout_count: integer
|
||||
}
|
||||
|
||||
@ -12298,15 +12265,15 @@ export interface MlTrainedModelDeploymentStats {
|
||||
allocation_status: MlTrainedModelDeploymentAllocationStatus
|
||||
error_count: integer
|
||||
inference_count: integer
|
||||
inference_threads: integer
|
||||
model_id: Id
|
||||
model_threads: integer
|
||||
nodes: MlTrainedModelDeploymentNodesStats
|
||||
number_of_allocations: integer
|
||||
queue_capacity: integer
|
||||
rejected_execution_count: integer
|
||||
reason: string
|
||||
start_time: long
|
||||
state: MlDeploymentState
|
||||
threads_per_allocation: integer
|
||||
timeout_count: integer
|
||||
}
|
||||
|
||||
@ -12318,17 +12285,6 @@ export interface MlTrainedModelEntities {
|
||||
end_pos: integer
|
||||
}
|
||||
|
||||
export interface MlTrainedModelInferenceClassImportance {
|
||||
class_name: string
|
||||
importance: double
|
||||
}
|
||||
|
||||
export interface MlTrainedModelInferenceFeatureImportance {
|
||||
feature_name: string
|
||||
importance?: double
|
||||
classes?: MlTrainedModelInferenceClassImportance[]
|
||||
}
|
||||
|
||||
export interface MlTrainedModelInferenceStats {
|
||||
cache_miss_count: integer
|
||||
failure_count: integer
|
||||
@ -12925,15 +12881,21 @@ export interface MlGetTrainedModelsStatsResponse {
|
||||
trained_model_stats: MlTrainedModelStats[]
|
||||
}
|
||||
|
||||
export interface MlInferTrainedModelRequest extends RequestBase {
|
||||
export interface MlInferTrainedModelDeploymentRequest extends RequestBase {
|
||||
model_id: Id
|
||||
timeout?: Time
|
||||
docs: Record<string, any>[]
|
||||
inference_config?: MlInferenceConfigUpdateContainer
|
||||
docs: Record<string, string>[]
|
||||
inference_config?: MlNlpInferenceConfigUpdateContainer
|
||||
}
|
||||
|
||||
export interface MlInferTrainedModelResponse {
|
||||
inference_results: MlInferenceResponseResult[]
|
||||
export interface MlInferTrainedModelDeploymentResponse {
|
||||
entities?: MlTrainedModelEntities[]
|
||||
is_truncated?: boolean
|
||||
predicted_value?: MlPredictedValue[]
|
||||
predicted_value_sequence?: string
|
||||
prediction_probability?: double
|
||||
top_classes: MlTopClassEntry[]
|
||||
warning?: string
|
||||
}
|
||||
|
||||
export interface MlInfoAnomalyDetectors {
|
||||
@ -13356,9 +13318,9 @@ export interface MlStartDatafeedResponse {
|
||||
|
||||
export interface MlStartTrainedModelDeploymentRequest extends RequestBase {
|
||||
model_id: Id
|
||||
number_of_allocations?: integer
|
||||
inference_threads?: integer
|
||||
model_threads?: integer
|
||||
queue_capacity?: integer
|
||||
threads_per_allocation?: integer
|
||||
timeout?: Time
|
||||
wait_for?: MlDeploymentAllocationState
|
||||
}
|
||||
|
||||
@ -8637,7 +8637,6 @@ export interface DanglingIndicesListDanglingIndicesResponse {
|
||||
export interface EnrichConfiguration {
|
||||
geo_match?: EnrichPolicy
|
||||
match: EnrichPolicy
|
||||
range: EnrichPolicy
|
||||
}
|
||||
|
||||
export interface EnrichPolicy {
|
||||
@ -8688,7 +8687,6 @@ export interface EnrichPutPolicyRequest extends RequestBase {
|
||||
body?: {
|
||||
geo_match?: EnrichPolicy
|
||||
match?: EnrichPolicy
|
||||
range?: EnrichPolicy
|
||||
}
|
||||
}
|
||||
|
||||
@ -12103,31 +12101,6 @@ export interface MlInferenceConfigCreateContainer {
|
||||
ner?: MlNerInferenceOptions
|
||||
pass_through?: MlPassThroughInferenceOptions
|
||||
text_embedding?: MlTextEmbeddingInferenceOptions
|
||||
question_answering?: MlQuestionAnsweringInferenceOptions
|
||||
}
|
||||
|
||||
export interface MlInferenceConfigUpdateContainer {
|
||||
regression?: MlRegressionInferenceOptions
|
||||
classification?: MlClassificationInferenceOptions
|
||||
text_classification?: MlTextClassificationInferenceUpdateOptions
|
||||
zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions
|
||||
fill_mask?: MlFillMaskInferenceUpdateOptions
|
||||
ner?: MlNerInferenceUpdateOptions
|
||||
pass_through?: MlPassThroughInferenceUpdateOptions
|
||||
text_embedding?: MlTextEmbeddingInferenceUpdateOptions
|
||||
question_answering?: MlQuestionAnsweringInferenceUpdateOptions
|
||||
}
|
||||
|
||||
export interface MlInferenceResponseResult {
|
||||
entities?: MlTrainedModelEntities[]
|
||||
is_truncated?: boolean
|
||||
predicted_value?: MlPredictedValue[]
|
||||
predicted_value_sequence?: string
|
||||
prediction_probability?: double
|
||||
prediction_score?: double
|
||||
top_classes?: MlTopClassEntry[]
|
||||
warning?: string
|
||||
feature_importance?: MlTrainedModelInferenceFeatureImportance[]
|
||||
}
|
||||
|
||||
export interface MlInfluence {
|
||||
@ -12308,6 +12281,15 @@ export interface MlNlpBertTokenizationConfig {
|
||||
span?: integer
|
||||
}
|
||||
|
||||
export interface MlNlpInferenceConfigUpdateContainer {
|
||||
text_classification?: MlTextClassificationInferenceUpdateOptions
|
||||
zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions
|
||||
fill_mask?: MlFillMaskInferenceUpdateOptions
|
||||
ner?: MlNerInferenceUpdateOptions
|
||||
pass_through?: MlPassThroughInferenceUpdateOptions
|
||||
text_embedding?: MlTextEmbeddingInferenceUpdateOptions
|
||||
}
|
||||
|
||||
export interface MlNlpRobertaTokenizationConfig {
|
||||
add_prefix_space?: boolean
|
||||
with_special_tokens?: boolean
|
||||
@ -12364,22 +12346,7 @@ export interface MlPerPartitionCategorization {
|
||||
stop_on_warn?: boolean
|
||||
}
|
||||
|
||||
export type MlPredictedValue = string | double | boolean | integer
|
||||
|
||||
export interface MlQuestionAnsweringInferenceOptions {
|
||||
num_top_classes?: integer
|
||||
tokenization?: MlTokenizationConfigContainer
|
||||
results_field?: string
|
||||
max_answer_length?: integer
|
||||
}
|
||||
|
||||
export interface MlQuestionAnsweringInferenceUpdateOptions {
|
||||
question: string
|
||||
num_top_classes?: integer
|
||||
tokenization?: MlNlpTokenizationUpdateOptions
|
||||
results_field?: string
|
||||
max_answer_length?: integer
|
||||
}
|
||||
export type MlPredictedValue = string | double
|
||||
|
||||
export interface MlRegressionInferenceOptions {
|
||||
results_field?: Field
|
||||
@ -12519,14 +12486,14 @@ export interface MlTrainedModelDeploymentNodesStats {
|
||||
average_inference_time_ms: double
|
||||
error_count: integer
|
||||
inference_count: integer
|
||||
inference_threads: integer
|
||||
last_access: long
|
||||
model_threads: integer
|
||||
node: MlDiscoveryNode
|
||||
number_of_allocations: integer
|
||||
number_of_pending_requests: integer
|
||||
rejection_execution_count: integer
|
||||
routing_state: MlTrainedModelAllocationRoutingTable
|
||||
start_time: long
|
||||
threads_per_allocation: integer
|
||||
timeout_count: integer
|
||||
}
|
||||
|
||||
@ -12534,15 +12501,15 @@ export interface MlTrainedModelDeploymentStats {
|
||||
allocation_status: MlTrainedModelDeploymentAllocationStatus
|
||||
error_count: integer
|
||||
inference_count: integer
|
||||
inference_threads: integer
|
||||
model_id: Id
|
||||
model_threads: integer
|
||||
nodes: MlTrainedModelDeploymentNodesStats
|
||||
number_of_allocations: integer
|
||||
queue_capacity: integer
|
||||
rejected_execution_count: integer
|
||||
reason: string
|
||||
start_time: long
|
||||
state: MlDeploymentState
|
||||
threads_per_allocation: integer
|
||||
timeout_count: integer
|
||||
}
|
||||
|
||||
@ -12554,17 +12521,6 @@ export interface MlTrainedModelEntities {
|
||||
end_pos: integer
|
||||
}
|
||||
|
||||
export interface MlTrainedModelInferenceClassImportance {
|
||||
class_name: string
|
||||
importance: double
|
||||
}
|
||||
|
||||
export interface MlTrainedModelInferenceFeatureImportance {
|
||||
feature_name: string
|
||||
importance?: double
|
||||
classes?: MlTrainedModelInferenceClassImportance[]
|
||||
}
|
||||
|
||||
export interface MlTrainedModelInferenceStats {
|
||||
cache_miss_count: integer
|
||||
failure_count: integer
|
||||
@ -13240,18 +13196,24 @@ export interface MlGetTrainedModelsStatsResponse {
|
||||
trained_model_stats: MlTrainedModelStats[]
|
||||
}
|
||||
|
||||
export interface MlInferTrainedModelRequest extends RequestBase {
|
||||
export interface MlInferTrainedModelDeploymentRequest extends RequestBase {
|
||||
model_id: Id
|
||||
timeout?: Time
|
||||
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
|
||||
body?: {
|
||||
docs: Record<string, any>[]
|
||||
inference_config?: MlInferenceConfigUpdateContainer
|
||||
docs: Record<string, string>[]
|
||||
inference_config?: MlNlpInferenceConfigUpdateContainer
|
||||
}
|
||||
}
|
||||
|
||||
export interface MlInferTrainedModelResponse {
|
||||
inference_results: MlInferenceResponseResult[]
|
||||
export interface MlInferTrainedModelDeploymentResponse {
|
||||
entities?: MlTrainedModelEntities[]
|
||||
is_truncated?: boolean
|
||||
predicted_value?: MlPredictedValue[]
|
||||
predicted_value_sequence?: string
|
||||
prediction_probability?: double
|
||||
top_classes: MlTopClassEntry[]
|
||||
warning?: string
|
||||
}
|
||||
|
||||
export interface MlInfoAnomalyDetectors {
|
||||
@ -13722,9 +13684,9 @@ export interface MlStartDatafeedResponse {
|
||||
|
||||
export interface MlStartTrainedModelDeploymentRequest extends RequestBase {
|
||||
model_id: Id
|
||||
number_of_allocations?: integer
|
||||
inference_threads?: integer
|
||||
model_threads?: integer
|
||||
queue_capacity?: integer
|
||||
threads_per_allocation?: integer
|
||||
timeout?: Time
|
||||
wait_for?: MlDeploymentAllocationState
|
||||
}
|
||||
|
||||
@ -20,7 +20,6 @@
|
||||
import { ConnectionOptions as TlsConnectionOptions } from 'tls'
|
||||
import { URL } from 'url'
|
||||
import buffer from 'buffer'
|
||||
import os from 'os'
|
||||
import {
|
||||
Transport,
|
||||
UndiciConnection,
|
||||
@ -174,9 +173,7 @@ export default class Client extends API {
|
||||
tls: null,
|
||||
caFingerprint: null,
|
||||
agent: null,
|
||||
headers: {
|
||||
'user-agent': `elasticsearch-js/${clientVersion} Node.js ${nodeVersion}; Transport ${transportVersion}; (${os.platform()} ${os.release()} ${os.arch()})`
|
||||
},
|
||||
headers: {},
|
||||
nodeFilter: null,
|
||||
generateRequestId: null,
|
||||
name: 'elasticsearch-js',
|
||||
|
||||
@ -229,6 +229,7 @@ export default class Helpers {
|
||||
rest_total_hits_as_int: params.rest_total_hits_as_int,
|
||||
scroll_id
|
||||
}, options as TransportRequestOptionsWithMeta)
|
||||
// @ts-expect-error
|
||||
response = r as TransportResult<T.ScrollResponse<TDocument, TAggregations>, unknown>
|
||||
assert(response !== undefined, 'The response is undefined, please file a bug report')
|
||||
if (response.statusCode !== 429) break
|
||||
|
||||
@ -432,12 +432,3 @@ test('caFingerprint can\'t be configured over http / 2', t => {
|
||||
)
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('user agent is in the correct format', t => {
|
||||
const client = new Client({ node: 'http://localhost:9200' })
|
||||
const agentRaw = client.transport[symbols.kHeaders]['user-agent'] || ''
|
||||
const agentSplit = agentRaw.split(/\s+/)
|
||||
t.equal(agentSplit[0].split('/')[0], 'elasticsearch-js')
|
||||
t.ok(/^\d+\.\d+\.\d+/.test(agentSplit[0].split('/')[1]))
|
||||
t.end()
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user